diff --git a/mps/.github/workflows/build-and-test.yml b/mps/.github/workflows/build-and-test.yml new file mode 100644 index 00000000000..0f0473aca36 --- /dev/null +++ b/mps/.github/workflows/build-and-test.yml @@ -0,0 +1,114 @@ +# .github/workflows/build-and-test.yml -- GitHub CI build and test configuration for the MPS +# +# Copyright (c) 2019-2022 `GitHub contributors`_ (MIT License). +# Copyright (c) 2023 Ravenbrook Limited. See end of file for license. +# +# See design.mps.test.ci. +# +# TODO: Exclude certain branches. +# +# TODO: Regular builds of version branches. See +# . + +name: build and test + +on: + - push + - pull_request + # Also run when triggered manually, e.g. by tool/github-ci-kick + # + - workflow_dispatch + +jobs: + + posix: + + # The build matrix for GitHub CI on Posix platforms + # + # See design.mps.tests.ci.github.platforms. + # + # FreeBSD and ARM64 targets are in Travis CI, configured by + # .travis.yml. + # + # See . + + strategy: + fail-fast: false # don't cancel all builds when one build fails + matrix: + os: [ubuntu-latest, macos-latest] + compiler: [clang, gcc] + exclude: + - os: macos-latest + compiler: gcc + + runs-on: ${{ matrix.os }} + + # See design.mps.tests.ci.run.posix. + steps: + - uses: actions/checkout@v4 # see + - run: CC=${{ matrix.compiler }} ./configure + - run: make + - run: make test + + windows: + + runs-on: windows-latest + + # See design.mps.tests.ci.run.windows. + # + # The path to Visual Studio is documented at + # . + + steps: + - uses: actions/checkout@v4 # see + - run: | + call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64 + cd code + nmake /f w3i6mv.nmk all testci testansi testpollnone + shell: cmd + + +# A. REFERENCES +# +# [GitHub CI] "About continuous integration"; . +# +# +# B. DOCUMENT HISTORY +# +# 2023-01-11 RB Adapted from . +# 2023-01-15 RB Added licence and document history. +# +# +# C. COPYRIGHT AND LICENSE +# +# NOTE: This is the `MIT Licence `_ +# inherited from +# and not the usual licence for the MPS. +# +# Copyright (c) 2019-2022 `GitHub contributors`_. +# Copyright (c) 2023 Ravenbrook Limited . +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# .. _GitHub contributors: https://github.com/actions/starter-workflows/commits/1d9d6d7fb0a8a27ef98efbbfa9689cd14c906383/ci/c-cpp.yml +# +# +# $Id$ diff --git a/mps/.github/workflows/fixme-check.yml b/mps/.github/workflows/fixme-check.yml new file mode 100644 index 00000000000..e42f7fa3bcc --- /dev/null +++ b/mps/.github/workflows/fixme-check.yml @@ -0,0 +1,21 @@ +# .github/workflows/fixme-check.yml -- check for FIXME task labels +# +# This is a GitHub CI workflow +# +# to check for FIXME and similar task labels left unresolved in the +# MPS source tree. + +name: FIXME check + +on: + # Run as part of CI checks on branch push and on merged pull request. + - push + - pull_request + - workflow_dispatch # allow manual triggering + +jobs: + check-fixme: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 # see + - run: tool/check-fixme diff --git a/mps/.github/workflows/rst-check.yml b/mps/.github/workflows/rst-check.yml new file mode 100644 index 00000000000..1f643c57427 --- /dev/null +++ b/mps/.github/workflows/rst-check.yml @@ -0,0 +1,23 @@ +# .github/workflows/rst-check.yml -- check syntax of reStructuredText files +# +# This is a GitHub CI workflow +# +# to check the syntax of reStructuredText files. + +name: reStructuredText syntax check + +on: + # Run as part of CI checks on branch push and on merged pull request. + - push + - pull_request + - workflow_dispatch # allow manual triggering + +jobs: + check-rst: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 # see + - name: Install docutils + run: sudo apt-get install -y docutils + - name: Check reStructuredText syntax + run: tool/check-rst diff --git a/mps/.github/workflows/shell-script-check.yml b/mps/.github/workflows/shell-script-check.yml new file mode 100644 index 00000000000..e8e9e2ae513 --- /dev/null +++ b/mps/.github/workflows/shell-script-check.yml @@ -0,0 +1,23 @@ +# .github/workflows/shell-script-check.yml -- check shell scripts +# +# This is a GitHub CI workflow +# +# to check shell scripts. + +name: shell script check + +on: + # Run as part of CI checks on branch push and on merged pull request. + - push + - pull_request + - workflow_dispatch # allow manual triggering + +jobs: + check-shell-scripts: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 # see + - name: Install shellcheck + run: sudo apt-get install -y shellcheck + - name: Check shell scripts + run: tool/check-shell-scripts diff --git a/mps/.gitignore b/mps/.gitignore new file mode 100644 index 00000000000..89c70e46dc5 --- /dev/null +++ b/mps/.gitignore @@ -0,0 +1,32 @@ +# .p4ignore -- Perforce files to ignore from MPS Project +# $Id$ +# Don't forget to consider files in more specific directories. +# Personal Perforce configurations +.p4config +# Mac OS X Finder turds +.DS_Store +# Patch results +*.orig +*.rej +# Autoconf and Automake output +Makefile +autom4te.cache +config.log +config.status +.deps +.dirstamp +bin +lib +# Misc +TAGS +*.dSYM +*.pyc +test/obj +test/test/log +test/test/obj +....gcda +....gcno +\#*# +*~ +.#.* +core diff --git a/mps/.p4ignore b/mps/.p4ignore new file mode 120000 index 00000000000..3e4e48b0b5f --- /dev/null +++ b/mps/.p4ignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/mps/.readthedocs.yaml b/mps/.readthedocs.yaml new file mode 100644 index 00000000000..8ae20323cd0 --- /dev/null +++ b/mps/.readthedocs.yaml @@ -0,0 +1,70 @@ +# .readthedocs.yaml -- Build configuration for MPS manual on Read The Docs +# +# Copyright (c) 2023 Ravenbrook Limited. See end of file for license. +# +# This file controls how Read the Docs builds and publishes the MPS +# manual at . +# +# See for +# the file format. +# +# Project configuration is at +# . The GitHub +# Ravenbot user is an administrator +# . + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: '3' + +python: + install: + - requirements: manual/requirements.pip + +sphinx: + configuration: manual/source/conf.py + fail_on_warning: true + +# A. REFERENCES +# +# [Readthedocs] "Read the Docs: Documentation Simplified"; +# . +# +# +# B. DOCUMENT HISTORY +# +# 2023-02-02 RB Created as part of MPS GitHub migration. +# +# +# C. COPYRIGHT AND LICENSE +# +# Copyright © 2023 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# +# $Id$ diff --git a/mps/.travis.yml b/mps/.travis.yml new file mode 100644 index 00000000000..3a9d340e5a6 --- /dev/null +++ b/mps/.travis.yml @@ -0,0 +1,105 @@ +# .travis.yml -- Travis CI configuration for the MPS +# +# Copyright (c) 2013-2023 Ravenbrook Limited. See end of file for license. +# +# See design.mps.test.ci. + +# Some branches don't need builds. Add them here to avoid using build +# resources and unnecessary build messages. +branches: + except: + - branch/2023-01-07/pull-request-merge-procedure + - branch/2023-01-11/github-ci + +language: c # see . + +# The build matrix for Travis CI +# +# See design.mps.tests.ci.travis.platforms. +# +# Most x86_64/amd64 builds are in GitHub CI, configured by +# .github/workflows/build-and-test.yml. + +os: + - freebsd + - linux +arch: + - arm64 +compiler: + - clang + - gcc + +script: # see design.mps.test.ci.run.posix +- ./configure --prefix=$PWD/prefix && make install && make test + +matrix: + + # Extra build jobs to add to the matrix + include: + + # GitHub CI does not provide FreeBSD + # + # on any architecture, so we add it here for amd64. See also + # design.mps.tests.ci.travis.platforms. + + - os: freebsd + arch: amd64 + compiler: clang + - os: freebsd + arch: amd64 + compiler: gcc + + # Specific combinations to exclude from the matrix + exclude: + - os: osx + compiler: gcc + +notifications: + email: + - mps-travis@ravenbrook.com + +# This shows how you can ask Travis to install or update packages. +#before_install: +# - if test "$TRAVIS_OS_NAME" = "linux"; then sudo apt-get -qq update; fi +# - if test "$TRAVIS_OS_NAME" = "linux"; then sudo apt-get install -y gcc-4.7; fi + + +# A. REFERENCES +# +# +# B. DOCUMENT HISTORY +# +# 2013-05-19 RB Created. +# 2023-01-15 RB Added licence and (note) document history. +# +# +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2013-2023 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# +# $Id$ diff --git a/mps/CONTRIBUTING b/mps/CONTRIBUTING new file mode 120000 index 00000000000..b765af98457 --- /dev/null +++ b/mps/CONTRIBUTING @@ -0,0 +1 @@ +contributing.rst \ No newline at end of file diff --git a/mps/INSTALL b/mps/INSTALL new file mode 120000 index 00000000000..e989720e22b --- /dev/null +++ b/mps/INSTALL @@ -0,0 +1 @@ +manual/build.txt \ No newline at end of file diff --git a/mps/Makefile.in b/mps/Makefile.in new file mode 100644 index 00000000000..a1e87594061 --- /dev/null +++ b/mps/Makefile.in @@ -0,0 +1,115 @@ +# Makefile.in -- source for autoconf Makefile +# +# $Id$ +# Copyright (C) 2012-2023 Ravenbrook Limited. See end of file for license. +# +# YOU DON'T NEED AUTOCONF TO BUILD THE MPS +# This is just here for people who want or expect a configure script. +# See [Building the Memory Pool System](manual/build.txt) for how best +# to build and integrate the MPS. +# +# THIS IS NOT A GNU MAKEFILE +# This makefile be compatible with the default make on every (Posix) +# target platform, e.g. BSD make. +# + +INSTALL=@INSTALL@ +INSTALL_DATA=@INSTALL_DATA@ +INSTALL_PROGRAM=@INSTALL_PROGRAM@ +MAKE=@MAKE@ +MPS_OS_NAME=@MPS_OS_NAME@ +MPS_ARCH_NAME=@MPS_ARCH_NAME@ +MPS_BUILD_NAME=@MPS_BUILD_NAME@ +MPS_TARGET_NAME=$(MPS_OS_NAME)$(MPS_ARCH_NAME)$(MPS_BUILD_NAME) +EXTRA_TARGETS=@EXTRA_TARGETS@ +prefix=$(DESTDIR)@prefix@ +TARGET_OPTS=-C code -f $(MPS_TARGET_NAME).gmk EXTRA_TARGETS="$(EXTRA_TARGETS)" +XCODEBUILD=xcrun xcodebuild -project code/mps.xcodeproj + +all: @BUILD_TARGET@ + +build-via-make: + $(MAKE) $(TARGET_OPTS) + +clean-make-build: + $(MAKE) $(TARGET_OPTS) clean + +install-make-build: make-install-dirs build-via-make + $(INSTALL_DATA) code/mps*.h $(prefix)/include/ + $(INSTALL_DATA) code/$(MPS_TARGET_NAME)/cool/mps.a $(prefix)/lib/libmps-debug.a + $(INSTALL_DATA) code/$(MPS_TARGET_NAME)/hot/mps.a $(prefix)/lib/libmps.a + for PROGRAM in $(EXTRA_TARGETS); do $(INSTALL_PROGRAM) code/$(MPS_TARGET_NAME)/hot/$$PROGRAM $(prefix)/bin/$$PROGRAM; done + +build-via-xcode: + $(XCODEBUILD) -config Debug + $(XCODEBUILD) -config Release + +clean-xcode-build: + $(XCODEBUILD) -config Debug clean + $(XCODEBUILD) -config Release clean + +install-xcode-build: make-install-dirs build-via-xcode + $(INSTALL_DATA) code/mps*.h $(prefix)/include/ + $(INSTALL_DATA) code/xc/Debug/libmps.a $(prefix)/lib/libmps-debug.a + $(INSTALL_DATA) code/xc/Release/libmps.a $(prefix)/lib/libmps.a + for PROGRAM in $(EXTRA_TARGETS); do $(INSTALL_PROGRAM) code/xc/Release/$$PROGRAM $(prefix)/bin/$$PROGRAM; done + +Makefile: Makefile.in config.status + ./config.status Makefile + +clean: @CLEAN_TARGET@ + +config.status: configure + +configure: configure.ac + autoreconf -vif + +distclean: clean + rm -rf autom4te.cache/ config.log config.status Makefile + +make-install-dirs: + mkdir -p $(prefix)/bin + mkdir -p $(prefix)/lib + mkdir -p $(prefix)/include + +install: @INSTALL_TARGET@ + +test-make-build: + $(MAKE) $(TARGET_OPTS) testci testratio testscheme + $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool clean testansi + $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool CFLAGS="-DCONFIG_POLL_NONE" clean testpollnone + +test-xcode-build: + $(XCODEBUILD) -config Debug -target testci + $(XCODEBUILD) -config Release -target testci + +test: @TEST_TARGET@ + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2012-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/NEWS b/mps/NEWS new file mode 120000 index 00000000000..6bf8922f3b5 --- /dev/null +++ b/mps/NEWS @@ -0,0 +1 @@ +manual/source/release.rst \ No newline at end of file diff --git a/mps/README b/mps/README new file mode 120000 index 00000000000..0d79d56d9fb --- /dev/null +++ b/mps/README @@ -0,0 +1 @@ +readme.txt \ No newline at end of file diff --git a/mps/code/.gitignore b/mps/code/.gitignore new file mode 100644 index 00000000000..ff483930b8a --- /dev/null +++ b/mps/code/.gitignore @@ -0,0 +1,63 @@ +# code/.p4ignore -- Perforce files to ignore list +# $Id$ +# Make output +anangc +ananll +ananmv +fri3gc +fri3ll +fri6gc +fri6ll +lia6gc +lia6ll +lii3gc +lii6gc +lii6ll +w3i3mv +w3i6mv +xca6ll +xci3gc +xci3ll +xci6gc +xci6ll +# Visual Studio junk +Debug +Release +*.filters +*.user +*.suo +# Telemetry event logs +mpsio*.log +mpsio*.sql +mpsio*.txt +# Build products +*.o +*.obj +*.a +*.so +*.lib +*.exe +a.out +core +# Xcode junk +xc +mps.xcodeproj/xcuserdata +mps.xcodeproj/project.xcworkspace +tags +# Temporary files +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +# Mac OS X Finder turds +.DS_Store +# Emacs backups +*~ +# GNU make dependencies +*/*/*.d diff --git a/mps/code/.p4ignore b/mps/code/.p4ignore new file mode 120000 index 00000000000..3e4e48b0b5f --- /dev/null +++ b/mps/code/.p4ignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/mps/code/abq.c b/mps/code/abq.c new file mode 100644 index 00000000000..8ef18995ea3 --- /dev/null +++ b/mps/code/abq.c @@ -0,0 +1,330 @@ +/* abq.c: QUEUE IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: A fixed-length FIFO queue. + * + * .design: + */ + +#include "meter.h" +#include "abq.h" +#include "mpm.h" + +SRCID(abq, "$Id$"); + + +/* Private prototypes */ + +static Size ABQQueueSize(Count elements, Size elementSize); +static Index ABQNextIndex(ABQ abq, Index index); +static void *ABQElement(ABQ abq, Index index); + + +/* Methods */ + +/* ABQInit -- Initialize an ABQ + * + * elements is the number of elements the queue can hold + */ +Res ABQInit(Arena arena, ABQ abq, void *owner, Count elements, Size elementSize) +{ + void *p; + Res res; + + AVERT(Arena, arena); + AVER(abq != NULL); + AVER(elements > 0); + + /* Allocate a dummy extra element in order to be able to distinguish + "empty" from "full" */ + elements = elements + 1; + + res = ControlAlloc(&p, arena, ABQQueueSize(elements, elementSize)); + if (res != ResOK) + return res; + + abq->elements = elements; + abq->elementSize = elementSize; + abq->in = 0; + abq->out = 0; + abq->queue = p; + + METER_INIT(abq->push, "push", owner); + METER_INIT(abq->pop, "pop", owner); + METER_INIT(abq->peek, "peek", owner); + METER_INIT(abq->delete, "delete", owner); + + abq->sig = ABQSig; + + AVERT(ABQ, abq); + return ResOK; +} + + +/* ABQCheck -- validate an ABQ */ +Bool ABQCheck(ABQ abq) +{ + CHECKS(ABQ, abq); + CHECKL(abq->elements > 0); + CHECKL(abq->elementSize > 0); + CHECKL(abq->in < abq->elements); + CHECKL(abq->out < abq->elements); + CHECKL(abq->queue != NULL); + + return TRUE; +} + + +/* ABQFinish -- finish an ABQ */ +void ABQFinish(Arena arena, ABQ abq) +{ + AVERT(Arena, arena); + AVERT(ABQ, abq); + + METER_EMIT(&abq->push); + METER_EMIT(&abq->pop); + METER_EMIT(&abq->peek); + METER_EMIT(&abq->delete); + ControlFree(arena, abq->queue, ABQQueueSize(abq->elements, abq->elementSize)); + + abq->elements = 0; + abq->queue = NULL; + + abq->sig = SigInvalid; +} + + +/* ABQPush -- push an element onto the tail of the ABQ */ +Bool ABQPush(ABQ abq, void *element) +{ + AVERT(ABQ, abq); + + METER_ACC(abq->push, ABQDepth(abq)); + + if (ABQIsFull(abq)) + return FALSE; + + (void)mps_lib_memcpy(ABQElement(abq, abq->in), element, abq->elementSize); + abq->in = ABQNextIndex(abq, abq->in); + + AVERT(ABQ, abq); + return TRUE; +} + + +/* ABQPop -- pop an element from the head of the ABQ */ +Bool ABQPop(ABQ abq, void *elementReturn) +{ + AVER(elementReturn != NULL); + AVERT(ABQ, abq); + + METER_ACC(abq->pop, ABQDepth(abq)); + + if (ABQIsEmpty(abq)) + return FALSE; + + (void)mps_lib_memcpy(elementReturn, ABQElement(abq, abq->out), abq->elementSize); + + abq->out = ABQNextIndex(abq, abq->out); + + AVERT(ABQ, abq); + return TRUE; +} + + +/* ABQPeek -- peek at the head of the ABQ */ +Bool ABQPeek(ABQ abq, void *elementReturn) +{ + AVER(elementReturn != NULL); + AVERT(ABQ, abq); + + METER_ACC(abq->peek, ABQDepth(abq)); + + if (ABQIsEmpty(abq)) + return FALSE; + + (void)mps_lib_memcpy(elementReturn, ABQElement(abq, abq->out), abq->elementSize); + + /* Identical to pop, but don't increment out */ + + AVERT(ABQ, abq); + return TRUE; +} + + +/* ABQDescribe -- Describe an ABQ */ +Res ABQDescribe(ABQ abq, ABQDescribeElement describeElement, mps_lib_FILE *stream, Count depth) +{ + Res res; + Index index; + + if (!TESTT(ABQ, abq)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "ABQ $P {\n", (WriteFP)abq, + " elements $U\n", (WriteFU)abq->elements, + " elementSize $W\n", (WriteFW)abq->elementSize, + " in $U\n", (WriteFU)abq->in, + " out $U\n", (WriteFU)abq->out, + " queue:\n", + NULL); + if(res != ResOK) + return res; + + for (index = abq->out; index != abq->in; ) { + res = (*describeElement)(ABQElement(abq, index), stream, depth + 2); + if(res != ResOK) + return res; + index = ABQNextIndex(abq, index); + } + + METER_WRITE(abq->push, stream, depth + 2); + METER_WRITE(abq->pop, stream, depth + 2); + METER_WRITE(abq->peek, stream, depth + 2); + METER_WRITE(abq->delete, stream, depth + 2); + + res = WriteF(stream, depth, "} ABQ $P\n", (WriteFP)abq, NULL); + if(res != ResOK) + return res; + + return ResOK; +} + + +/* ABQIsEmpty -- Is an ABQ empty? */ +Bool ABQIsEmpty(ABQ abq) +{ + AVERT(ABQ, abq); + + return abq->out == abq->in; +} + + +/* ABQIsFull -- Is an ABQ full? */ +Bool ABQIsFull(ABQ abq) +{ + AVERT(ABQ, abq); + + return ABQNextIndex(abq, abq->in) == abq->out; +} + + +/* ABQDepth -- return the number of elements in an ABQ */ +Count ABQDepth(ABQ abq) +{ + Index out, in; + + AVERT(ABQ, abq); + out = abq->out; + in = abq->in; + + if (in >= out) + return in - out; + else + return in + abq->elements - out; +} + + +/* ABQIterate -- call 'visitor' for each element in an ABQ */ +void ABQIterate(ABQ abq, ABQVisitor visitor, void *closure) +{ + Index copy, index, in; + + AVERT(ABQ, abq); + AVER(FUNCHECK(visitor)); + + copy = abq->out; + index = abq->out; + in = abq->in; + + while (index != in) { + void *element = ABQElement(abq, index); + Bool delete = FALSE; + Bool cont; + cont = (*visitor)(&delete, element, closure); + AVERT(Bool, cont); + AVERT(Bool, delete); + if (!delete) { + if (copy != index) + (void)mps_lib_memcpy(ABQElement(abq, copy), element, abq->elementSize); + copy = ABQNextIndex(abq, copy); + } + index = ABQNextIndex(abq, index); + if (!cont) + break; + } + + /* If any elements were deleted, need to copy remainder of queue. */ + if (copy != index) { + while (index != in) { + (void)mps_lib_memcpy(ABQElement(abq, copy), ABQElement(abq, index), + abq->elementSize); + copy = ABQNextIndex(abq, copy); + index = ABQNextIndex(abq, index); + } + abq->in = copy; + } + + AVERT(ABQ, abq); +} + + +/* ABQQueueSize -- calculate the storage required for the vector to + store the elements */ +static Size ABQQueueSize(Count elements, Size elementSize) +{ + return (Size)(elements * elementSize); +} + + +/* ABQNextIndex -- calculate the next index into the queue vector from + the current one */ +static Index ABQNextIndex(ABQ abq, Index index) +{ + Index next = index + 1; + if (next == abq->elements) + next = 0; + return next; +} + +/* ABQElement -- return pointer to the index'th element in the queue + vector. */ +static void *ABQElement(ABQ abq, Index index) +{ + return PointerAdd(abq->queue, index * abq->elementSize); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/abq.h b/mps/code/abq.h new file mode 100644 index 00000000000..5e7a5d2f698 --- /dev/null +++ b/mps/code/abq.h @@ -0,0 +1,90 @@ +/* abq.h: QUEUE INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: A fixed-length FIFO queue. + * + * .design: + */ + +#ifndef abq_h +#define abq_h + +#include "meter.h" +#include "mpm.h" + + +/* Signatures */ + +#define ABQSig ((Sig)0x519AB099) /* SIGnature ABQ */ + + +/* Prototypes */ + +typedef struct ABQStruct *ABQ; +typedef Res (*ABQDescribeElement)(void *element, mps_lib_FILE *stream, Count depth); +typedef Bool (*ABQVisitor)(Bool *deleteReturn, void *element, void *closure); + +extern Res ABQInit(Arena arena, ABQ abq, void *owner, Count elements, Size elementSize); +extern Bool ABQCheck(ABQ abq); +extern void ABQFinish(Arena arena, ABQ abq); +extern Bool ABQPush(ABQ abq, void *element); +extern Bool ABQPop(ABQ abq, void *elementReturn); +extern Bool ABQPeek(ABQ abq, void *elementReturn); +extern Res ABQDescribe(ABQ abq, ABQDescribeElement describeElement, mps_lib_FILE *stream, Count depth); +extern Bool ABQIsEmpty(ABQ abq); +extern Bool ABQIsFull(ABQ abq); +extern Count ABQDepth(ABQ abq); +extern void ABQIterate(ABQ abq, ABQVisitor visitor, void *closure); + + +/* Types */ + +typedef struct ABQStruct +{ + Sig sig; /* design.mps.sig.field */ + Count elements; + Size elementSize; + Index in; + Index out; + void *queue; + + /* Meter queue depth at each operation */ + METER_DECL(push) + METER_DECL(pop) + METER_DECL(peek) + METER_DECL(delete) +} ABQStruct; + +#endif /* abq_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/abqtest.c b/mps/code/abqtest.c new file mode 100644 index 00000000000..8ebb7813546 --- /dev/null +++ b/mps/code/abqtest.c @@ -0,0 +1,210 @@ +/* abqtest.c: AVAILABLE BLOCK QUEUE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "abq.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscmfs.h" +#include "mpstd.h" +#include "testlib.h" + +#include /* printf */ + + +SRCID(abqtest, "$Id$"); + +static mps_pool_t pool; +static ABQStruct abq; /* the ABQ which we will use */ +static Size abqSize; /* the size of the current ABQ */ + +#define ABQ_SIZE 10 +#define TEST_ITER 10000 + + +static unsigned long abqRnd(unsigned long n) +{ + return rnd()%n; +} + + +static unsigned pushee = 1; +static unsigned popee = 1; +static unsigned deleted = 0; + + +typedef struct TestBlockStruct *TestBlock; + +typedef struct TestBlockStruct +{ + TestBlock next; + unsigned id; + Addr base; + Addr limit; +} TestBlockStruct; + + +static TestBlock testBlocks = NULL; + + +static TestBlock CreateTestBlock(unsigned no) +{ + TestBlock b; + mps_addr_t p; + + die(mps_alloc(&p, pool, sizeof(TestBlockStruct)), "alloc"); + + b = p; + b->next = testBlocks; + b->id = no; + b->base = 0; + b->limit = 0; + + testBlocks = b; + + return b; +} + + +static void DestroyTestBlock(TestBlock b) +{ + if (b == testBlocks) + testBlocks = b->next; + else { + TestBlock prev; + + for (prev = testBlocks; prev != 0; prev = prev->next) + if (prev->next == b) { + prev->next = b->next; + break; + } + } + + mps_free(pool, b, sizeof(TestBlockStruct)); +} + +typedef struct TestClosureStruct *TestClosure; +typedef struct TestClosureStruct { + TestBlock b; + Res res; +} TestClosureStruct; + +static Bool TestDeleteCallback(Bool *deleteReturn, void *element, + void *closure) +{ + TestBlock *a = (TestBlock *)element; + TestClosure cl = (TestClosure)closure; + if (*a == cl->b) { + *deleteReturn = TRUE; + cl->res = ResOK; + } else { + *deleteReturn = FALSE; + } + return TRUE; +} + + +static void step(void) +{ + TestBlock a; + + switch (abqRnd(9)) { + case 0: case 1: case 2: case 3: + push: + a = CreateTestBlock(pushee); + if (!ABQPush(&abq, &a)) { + goto pop; + } + pushee++; + break; + case 5: case 6: case 7: case 8: + pop: + if (!ABQPop(&abq, &a)) { + goto push; + } + if (popee == deleted) { + popee++; + deleted = 0; + } + cdie(a->id == popee, "pop"); + popee++; + DestroyTestBlock(a); + break; + default: + if (!deleted && (pushee > popee)) { + TestBlock b; + TestClosureStruct cl; + deleted = (unsigned)abqRnd (pushee - popee) + popee; + for (b = testBlocks; b != NULL; b = b->next) + if (b->id == deleted) + break; + cdie(b != NULL, "found to delete"); + cl.b = b; + cl.res = ResFAIL; + ABQIterate(&abq, TestDeleteCallback, &cl); + cdie(cl.res == ResOK, "ABQIterate"); + } + } +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + int i; + + testlib_init(argc, argv); + + abqSize = 0; + + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, sizeof(TestBlockStruct)); + die(mps_pool_create_k(&pool, arena, mps_class_mfs(), args), "pool_create"); + } MPS_ARGS_END(args); + + die(ABQInit((Arena)arena, &abq, NULL, ABQ_SIZE, sizeof(TestBlock)), + "ABQInit"); + + abqSize = ABQ_SIZE; + + for (i = 0; i < TEST_ITER; i++) { + step(); + } + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/addrobj.c b/mps/code/addrobj.c new file mode 100644 index 00000000000..7c98128a8d4 --- /dev/null +++ b/mps/code/addrobj.c @@ -0,0 +1,240 @@ +/* addrobj.c: BASE ADDRESS FROM INTERIOR POINTER TEST + * + * Copyright (c) 2023 Ravenbrook Limited. See end of file for license. + * + * .overview This test is for mps_addr_object(). Its intention is to + * verify that the function returns the appropriate base pointer to an + * object when provided with an interior pointer. It also tests that the + * function fails appropriately when the provided with a pointer to + * unmanaged memory, or to an object in a pool that doesn't support this + * feature. + * + * .limitations Objects that have been moved should cause the function to + * fail with MPS_RES_FAIL, however this is not tested. It could be tested if + * a testbench deliberately created a forwarding object, however this might + * confuse a pool that does automatic garbage collection such as AMC or AMCZ, + * so any such test would need to be designed to handle that. + * This test only examines behaviour in AMCZ and MVFF pools, i.e. A pool (AMCZ) + * which currently implements mps_addr_object() and one (MVFF) that doesn't. + */ + +#include "mps.h" +#include "testlib.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpscmvff.h" +#include "stdio.h" +#include + +/* Define an object size to allocate. The size chosen doesn't matter much, except that this testbench assumes + that the object is large enough that a pointer could point to the interior of the object, without also + pointing to the base pointer of the object at the same time. For char pointers, this is probably 2 bytes. + Since we are using the Dylan library, we define the size of the object in terms of Dylan slots. See + fmtdytst.c for details of the Dylan object structure.*/ +#define N_SLOT_TESTOBJ 100 + +static void test_main(void) +{ + mps_arena_t arena; + mps_pool_t amcz_pool, mvff_pool; + mps_ap_t obj_ap; + mps_fmt_t obj_fmt; + mps_root_t testobj_root; + mps_res_t res; + /* In another testbench (extcon.c) we observed unreliable failures to do with registering the cold end + of the stack. See GitHub issue #210 + . For now, we + declare this as a separate root. */ + static mps_addr_t testobj; + mps_addr_t out, in; + + /* Create arena */ + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), "mps_arena_create_k"); + + + /* INTRO TO TESTS: There are several tests. They test the expected "normal" operation of the + function, using an interior pointer, also corner cases where the interior pointer equals the + base pointer, where it equals the limit pointer. We also test asking about an address in unmanaged + memory, and about an address in a pool which currently does not support mps_addr_object. If you write + more tests, describe them here.*/ + + + /* TEST 1: Test using an interior pointer in an object in an AMCZ pool. + At the time of writing this test, the AMCZ pool is the only pool where + there exists a requirement to provide base addresses from interior pointers. + Currently, the AMCZ pool (and by extension, the AMC pool which shares the same + module as AMCZ) is the only pool for which mps_addr_object is implemented */ + + /* Use the dylan format for convenience */ + die(dylan_fmt(&obj_fmt, arena), "dylan_fmt"); + + /* Create the pool */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, obj_fmt); + die(mps_pool_create_k(&amcz_pool, arena, mps_class_amcz(), args), "mps_pool_create_k amcz"); + } MPS_ARGS_END(args); + + /* Create an area of ambiguous pointers to keep the object alive and in place, in this case + the area only contains room for a single reference since we are only using one object to test */ + die(mps_root_create_area(&testobj_root, arena, + mps_rank_ambig(), (mps_rm_t)0, + &testobj, &testobj+1, + mps_scan_area, NULL), + "mps_root_create_area"); + + /* Create the allocation point */ + die(mps_ap_create_k(&obj_ap, amcz_pool, mps_args_none), "mps_ap_create_k"); + + /* Make a Dylan object, size = (N_SLOT_TESTOBJ+2) * sizeof(mps_word_t). + (See fmtdytst.c for size calculation) */ + { + /* Because make_dylan_vector returns its pointer-to-object as an mps_word_t rather than an + mps_addr_t, and commits the object, we need to somehow safely allocate our object without + type punning and without risking that our object be destroyed. + Rather than redefine our reference table with type mps_word_t, which hides the intention of the table, + park the arena to disable garbage collection. Allocate our dylan object on the (unregistered) stack + storing its address in an mps_word_t. Then store this mps_word_t as an mps_addr_t in our reference + table, and release the arena since our object is now safely pinned. + Another approach would be to create another static registered root for ambiguous references of type + mps_word_t and then copy to the mps_addr_t root, which would avoid needing to park the arena. + */ + mps_word_t p_word; + mps_arena_park(arena); + die(make_dylan_vector(&p_word, obj_ap, N_SLOT_TESTOBJ), "make_dylan_vector"); + /* If we hadn't parked the arena, our vector might have been GC'd here */ + testobj = (mps_addr_t)p_word; + mps_arena_release(arena); + } + + /* Construct a pointer to roughly halfway inside the object */ + in = (mps_addr_t)((char *)testobj + (N_SLOT_TESTOBJ/2) * sizeof(mps_word_t)); + + /* Ensure that this is an interior pointer, and not the base pointer, + since we want to make sure we are testing with a true interior pointer and not + one that also happens to be the base pointer. This Insist is intended to protect + against the testbench losing its ability to test "true" interior pointers (i.e. ones + which don't match the base pointer) if the test object sizes were changed to be very + small. Note that we don't currently consider the "limit" of the object as a corner case + (so we don't Insist(in != limit) ) but we do consider limit+1, i.e. the pointer to the + next object to be a corner case. This test could be updated to consider in == limit as a + corner case. */ + Insist(in > testobj); + + /* Do Test */ + res = mps_addr_object(&out, arena, in); + Insist(out == testobj); + Insist(res == MPS_RES_OK); + printf("Interior pointer input: passed\n"); + + + /* TEST 2: Test using the base pointer itself as an input*/ + + in = testobj; + + /* Do Test */ + res = mps_addr_object(&out, arena, in); + Insist(out == testobj); + Insist(res == MPS_RES_OK); + printf("Base pointer input: passed\n"); + + + + /* TEST 3: Test using a pointer one-off-the-end of the object*/ + + in = (mps_addr_t)((char *)testobj + (N_SLOT_TESTOBJ + 2) * sizeof(mps_word_t)); + + /* Do Test */ + res = mps_addr_object(&out, arena, in); + Insist(res == MPS_RES_FAIL); + printf("Pointer to next object input: passed\n"); + + + /* Clean up from above tests */ + mps_root_destroy(testobj_root); + mps_ap_destroy(obj_ap); + mps_pool_destroy(amcz_pool); + mps_fmt_destroy(obj_fmt); + + + /* TEST 4: Test using a pointer in unmanaged memory */ + + /* Use malloc to allocate non-mps-managed memory on the heap */ + in = malloc(sizeof(mps_word_t)); + Insist(NULL != in); + + /* Do the test */ + res = mps_addr_object(&out, arena, in); + + /* Expect MPS to fail to find a base pointer for addresses not in managed memory */ + Insist(res == MPS_RES_FAIL); + printf("Pointer to unmanaged memory input: passed\n"); + + /* clean up from this test */ + if (NULL != in) + free(in); + + + /* TEST 5: Test using a pointer in a pool which currently doesn't implement mps_addr_object */ + + /* Create mvff pool for which mps_addr_object is not implemented */ + die(mps_pool_create_k(&mvff_pool, arena, mps_class_mvff(), mps_args_none), "mps_pool_create_k mvff"); + + /* allocate an object (just some memory) in this pool */ + die(mps_alloc(&in, mvff_pool, sizeof(mps_word_t)), "mps_alloc"); + + /* Do the test */ + res = mps_addr_object(&out, arena, in); + + Insist(res == MPS_RES_UNIMPL); + printf("Pointer to object in pool where mps_addr_object not implemented: passed\n"); + + + /* If more tests are added here, briefly describe them above under "INTRO TO TESTS" comment */ + + /* Final clean up */ + mps_free(mvff_pool, in, sizeof(mps_word_t)); + mps_pool_destroy(mvff_pool); + mps_arena_destroy(arena); +} + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + test_main(); + + printf("%s: Conclusion, failed to find any defects.\n", argv[0]); + + return 0; +} + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2022-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/airtest.c b/mps/code/airtest.c new file mode 100644 index 00000000000..90dc766c7c2 --- /dev/null +++ b/mps/code/airtest.c @@ -0,0 +1,191 @@ +/* airtest.c: AMBIGUOUS INTERIOR REFERENCE TEST + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .overview: This test case creates a bunch of vectors, registers + * them for finalization, and then discards the base pointers to those + * objects, keeping only ambiguous interior references to the vector + * entries in the stack-allocated table s. + * + * .options: The test has two options: + * + * 'interior' is the value passed as MPS_KEY_INTERIOR when creating + * the AMC pool. If TRUE, interior pointers must keep objects alive, + * and so if any of these objects are finalized, the test fails. If + * FALSE, interior pointers do not keep objects alive, so it is likely + * that all the objects will be finalized. + * + * 'stack' is TRUE if the C stack is registered as a root. (If FALSE, + * we register the table of interior pointers as an ambiguous root.) + * + * .fail.lii6ll: The test case passes on most platforms with + * interior=FALSE and stack=TRUE (that is, all vectors get finalized), + * but fails on lii6ll in variety HOT. Rather than struggle to defeat + * the Clang optimizer, we choose not to test in this configuration. + * In any case, the MPS does not guarantee anything about timely + * finalization . + */ + +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpslib.h" +#include "testlib.h" +#include "fmtscheme.h" + +#define OBJ_LEN (1u << 4) +#define OBJ_COUNT 10 + +static void test_air(int interior, int stack) +{ + size_t n_finalized = 0; + size_t i, j; + obj_t *s[OBJ_COUNT] = {0}; + mps_root_t root = NULL; + if (!stack) { + mps_addr_t *p = (void *)s; + die(mps_root_create_table(&root, scheme_arena, mps_rank_ambig(), 0, p, + OBJ_COUNT), "mps_root_create_table"); + } + mps_message_type_enable(scheme_arena, mps_message_type_finalization()); + for (j = 0; j < OBJ_COUNT; ++j) { + obj_t n = scheme_make_integer(obj_ap, (long)j); + obj_t obj = scheme_make_vector(obj_ap, OBJ_LEN, n); + mps_addr_t ref = obj; + mps_finalize(scheme_arena, &ref); + s[j] = obj->vector.vector; + } + for (i = 1; i < OBJ_LEN; ++i) { + obj_t n = scheme_make_integer(obj_ap, (long)i); + mps_message_t msg; + for (j = 0; j + 1 < OBJ_COUNT; ++j) { + *++s[j] = n; + } + mps_arena_collect(scheme_arena); + mps_arena_release(scheme_arena); + if (mps_message_get(&msg, scheme_arena, mps_message_type_finalization())) { + mps_addr_t ref; + mps_message_finalization_ref(&ref, scheme_arena, msg); + ++ n_finalized; + if (interior) { + obj_t o; + o = ref; + error("wrongly finalized vector %ld at %p", + o->vector.vector[0]->integer.integer, (void *)o); + } + } + } + if (!interior && n_finalized < OBJ_COUNT) { + error("only finalized %"PRIuLONGEST" out of %"PRIuLONGEST" vectors.", + (ulongest_t)n_finalized, (ulongest_t)OBJ_COUNT); + } + if (!stack) { + mps_root_destroy(root); + } +} + +static mps_gen_param_s obj_gen_params[] = { + { 150, 0.85 }, + { 170, 0.45 } +}; + +static void test_main(void *marker, int interior, int stack) +{ + mps_res_t res; + mps_chain_t obj_chain; + mps_fmt_t obj_fmt; + mps_thr_t thread; + mps_root_t reg_root = NULL; + + res = mps_arena_create_k(&scheme_arena, mps_arena_class_vm(), mps_args_none); + if (res != MPS_RES_OK) + error("Couldn't create arena"); + + res = mps_chain_create(&obj_chain, scheme_arena, + sizeof(obj_gen_params) / sizeof(*obj_gen_params), + obj_gen_params); + if (res != MPS_RES_OK) + error("Couldn't create obj chain"); + + scheme_fmt(&obj_fmt); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, obj_chain); + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, obj_fmt); + MPS_ARGS_ADD(args, MPS_KEY_INTERIOR, interior); + die(mps_pool_create_k(&obj_pool, scheme_arena, mps_class_amc(), args), + "mps_pool_create_k"); + } MPS_ARGS_END(args); + + res = mps_ap_create_k(&obj_ap, obj_pool, mps_args_none); + if (res != MPS_RES_OK) + error("Couldn't create obj allocation point"); + + res = mps_thread_reg(&thread, scheme_arena); + if (res != MPS_RES_OK) + error("Couldn't register thread"); + + if (stack) { + res = mps_root_create_thread(®_root, scheme_arena, thread, marker); + if (res != MPS_RES_OK) + error("Couldn't create root"); + } + + test_air(interior, stack); + + mps_arena_park(scheme_arena); + if (stack) + mps_root_destroy(reg_root); + mps_thread_dereg(thread); + mps_ap_destroy(obj_ap); + mps_pool_destroy(obj_pool); + mps_chain_destroy(obj_chain); + mps_fmt_destroy(obj_fmt); + mps_arena_destroy(scheme_arena); +} + +int main(int argc, char *argv[]) +{ + void *marker = ▮ + + testlib_init(argc, argv); + + test_main(marker, TRUE, TRUE); + test_main(marker, TRUE, FALSE); + /* not test_main(marker, FALSE, TRUE) -- see .fail.lii6ll. */ + test_main(marker, FALSE, FALSE); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/amcss.c b/mps/code/amcss.c new file mode 100644 index 00000000000..018211054bc --- /dev/null +++ b/mps/code/amcss.c @@ -0,0 +1,385 @@ +/* amcss.c: POOL CLASS AMC STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpm.h" +#include "mpslib.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" +#include "mpslib.h" + +#include /* fflush, printf, putchar */ + + +/* These values have been tuned in the hope of getting one dynamic collection. */ +#define testArenaSIZE ((size_t)1000*1024) +#define gen1SIZE ((size_t)20) +#define gen2SIZE ((size_t)85) +#define avLEN 3 +#define exactRootsCOUNT 180 +#define ambigRootsCOUNT 50 +#define genCOUNT 2 +#define collectionsCOUNT 37 +#define rampSIZE 9 +#define initTestFREQ 6000 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + { gen1SIZE, 0.85 }, { gen2SIZE, 0.45 } }; + + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) + + +static mps_arena_t arena; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; +static size_t scale; /* Overall scale factor. */ +static unsigned long nCollsStart; +static unsigned long nCollsDone; + + +/* report -- report statistics from any messages */ + +static void report(void) +{ + mps_message_type_t type; + + while(mps_message_queue_type(&type, arena)) { + mps_message_t message; + + cdie(mps_message_get(&message, arena, type), "message get"); + + if (type == mps_message_type_gc_start()) { + nCollsStart += 1; + printf("\n{\n Collection %lu started. Because:\n", nCollsStart); + printf(" %s\n", mps_message_gc_start_why(arena, message)); + printf(" clock: %"PRIuLONGEST"\n", (ulongest_t)mps_message_clock(arena, message)); + + } else if (type == mps_message_type_gc()) { + size_t live, condemned, not_condemned; + + nCollsDone += 1; + live = mps_message_gc_live_size(arena, message); + condemned = mps_message_gc_condemned_size(arena, message); + not_condemned = mps_message_gc_not_condemned_size(arena, message); + + printf("\n Collection %lu finished:\n", nCollsDone); + printf(" live %"PRIuLONGEST"\n", (ulongest_t)live); + printf(" condemned %"PRIuLONGEST"\n", (ulongest_t)condemned); + printf(" not_condemned %"PRIuLONGEST"\n", (ulongest_t)not_condemned); + printf(" clock: %"PRIuLONGEST"\n", (ulongest_t)mps_message_clock(arena, message)); + printf("}\n"); + } else { + cdie(0, "unknown message type"); + break; + } + + mps_message_discard(arena, message); + } +} + + +/* make -- create one new object */ + +static mps_addr_t make(size_t rootsCount) +{ + /* The calls variable is useful when debugging to stop the debugging + after a certain number of allocations using a debugger + "watchpoint". Allocations are a good "clock tick" for this test. + The test itself doesn't use the variable, so we declare it + volatile, which also forces updates into memory. */ + static volatile unsigned long calls = 0; + size_t length = rnd() % (scale * avLEN); + size_t size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + ++ calls; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size); + if (res) { + ArenaDescribe(arena, mps_lib_get_stderr(), 4); + die(res, "MPS_RESERVE_BLOCK"); + } + res = dylan_init(p, size, exactRoots, rootsCount); + if (res) + die(res, "dylan_init"); + } while(!mps_commit(ap, p, size)); + + return p; +} + + +/* test_stepper -- stepping function for walk */ + +static void test_stepper(mps_addr_t object, mps_fmt_t fmt, mps_pool_t pool, + void *p, size_t s) +{ + testlib_unused(object); testlib_unused(fmt); testlib_unused(pool); + testlib_unused(s); + (*(unsigned long *)p)++; +} + + +/* area_scan -- area scanning function for mps_pool_walk */ + +static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + unsigned long *count = closure; + mps_res_t res; + while (base < limit) { + mps_addr_t prev = base; + ++ *count; + res = dylan_scan1(ss, &base); + if (res != MPS_RES_OK) return res; + Insist(prev < base); + } + Insist(base == limit); + return MPS_RES_OK; +} + + +/* test -- the body of the test */ + +static void test(mps_pool_class_t pool_class, size_t roots_count) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_root_t exactRoot, ambigRoot; + unsigned long objs; size_t i; + mps_word_t collections, rampSwitch; + mps_alloc_pattern_t ramp = mps_alloc_pattern_ramp(); + int ramping; + mps_ap_t busy_ap; + mps_addr_t busy_init; + mps_pool_t pool; + int described = 0; + + die(dylan_fmt(&format, arena), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + die(mps_pool_create(&pool, arena, pool_class, format, chain), + "pool_create(amc)"); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + nCollsStart = 0; + nCollsDone = 0; + collections = 0; + rampSwitch = rampSIZE; + die(mps_ap_alloc_pattern_begin(ap, ramp), "pattern begin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), "pattern begin (busy_ap)"); + ramping = 1; + objs = 0; + while (collections < collectionsCOUNT) { + size_t r; + + report(); + if (collections != nCollsStart) { + if (!described) { + die(ArenaDescribe(arena, mps_lib_get_stdout(), 0), "ArenaDescribe"); + described = TRUE; + } + collections = nCollsStart; + + printf("%lu objects (nCollsStart=%"PRIuLONGEST")\n", objs, + (ulongest_t)collections); + + /* test mps_arena_has_addr */ + { + size_t hitRatio; + unsigned hitsWanted = 4; /* aim for 4 hits (on average) */ + /* [Note: The for-loop condition used to be "i < 4 * hitRatio", + * with "4" an unexplained naked constant. I have now labelled + * it "hitsWanted", as I think that is the intent. RHSK] + */ + + /* how many random addrs must we try, to hit the arena once? */ + hitRatio = (0xfffffffful / mps_arena_committed(arena)); + for (i = 0; i < hitsWanted * hitRatio ; i++) { + /* An exact root maybe in the arena, so add a random 32-bit + * offset to it. We may get no hits if it is objNULL. + */ + mps_addr_t p = (char *)exactRoots[rnd() % exactRootsCOUNT] + + rnd()-0x80000000ul; + if (mps_arena_has_addr(arena, p)) { + printf("%p is in the arena\n", p); + } + } + } + + for (i = 0; i < exactRootsCOUNT; ++i) + cdie(exactRoots[i] == objNULL + || (dylan_check(exactRoots[i]) + && mps_arena_has_addr(arena, exactRoots[i])), + "all roots check"); + cdie(!mps_arena_has_addr(arena, NULL), + "NULL in arena"); + + if (collections == collectionsCOUNT / 2) { + unsigned long count1 = 0, count2 = 0; + mps_arena_park(arena); + mps_arena_formatted_objects_walk(arena, test_stepper, &count1, 0); + die(mps_pool_walk(pool, area_scan, &count2), "mps_pool_walk"); + mps_arena_release(arena); + printf("stepped on %lu objects.\n", count1); + printf("walked %lu objects.\n", count2); + Insist(count1 == count2); + } + if (collections == rampSwitch) { + int begin_ramp = !ramping + || /* Every other time, switch back immediately. */ (collections & 1); + + rampSwitch += rampSIZE; + if (ramping) { + die(mps_ap_alloc_pattern_end(ap, ramp), "pattern end (ap)"); + die(mps_ap_alloc_pattern_end(busy_ap, ramp), "pattern end (busy_ap)"); + ramping = 0; + /* kill half of the roots */ + for(i = 0; i < exactRootsCOUNT; i += 2) { + if (exactRoots[i] != objNULL) { + cdie(dylan_check(exactRoots[i]), "ramp kill check"); + exactRoots[i] = objNULL; + } + } + } + if (begin_ramp) { + die(mps_ap_alloc_pattern_begin(ap, ramp), + "pattern rebegin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), + "pattern rebegin (busy_ap)"); + ramping = 1; + } + } + } + + r = (size_t)rnd(); + if (r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if (exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(roots_count); + if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(roots_count); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + if (r % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + + if (objs % 1024 == 0) { + report(); + putchar('.'); + (void)fflush(stdout); + } + + ++objs; + } + + (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_release(arena); +} + +int main(int argc, char *argv[]) +{ + size_t i, grainSize; + mps_thr_t thread; + + testlib_init(argc, argv); + + scale = (size_t)1 << (rnd() % 6); + for (i = 0; i < genCOUNT; ++i) testChain[i].mps_capacity *= scale; + grainSize = rnd_grain(scale * testArenaSIZE); + printf("Picked scale=%lu grainSize=%lu\n", (unsigned long)scale, (unsigned long)grainSize); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, scale * testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, grainSize); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "arena_create"); + } MPS_ARGS_END(args); + mps_message_type_enable(arena, mps_message_type_gc()); + mps_message_type_enable(arena, mps_message_type_gc_start()); + die(mps_thread_reg(&thread, arena), "thread_reg"); + test(mps_class_amc(), exactRootsCOUNT); + test(mps_class_amcz(), 0); + mps_thread_dereg(thread); + report(); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/amcsshe.c b/mps/code/amcsshe.c new file mode 100644 index 00000000000..25bbf4a4ee8 --- /dev/null +++ b/mps/code/amcsshe.c @@ -0,0 +1,295 @@ +/* amcsshe.c: POOL CLASS AMC STRESS TEST WITH HEADER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + */ + +#include "fmthe.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" + +#include /* fflush, printf, putchar */ + + +/* These values have been tuned in the hope of getting one dynamic collection. */ +#define headerFACTOR ((float)(20 + headerSIZE) / 20) +/* headerFACTOR measures how much larger objects are compared to fmtdy. */ +#define testArenaSIZE ((size_t)(2000*headerFACTOR)*1024) +#define gen1SIZE ((size_t)(150*headerFACTOR)) +#define gen2SIZE ((size_t)(170*headerFACTOR)) +#define avLEN 3 +#define exactRootsCOUNT 200 +#define ambigRootsCOUNT 50 +#define bogusRootsCOUNT 4096 +#define collectionsCOUNT 37 +#define rampSIZE 9 +#define initTestFREQ 6000 +#define genCOUNT 2 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + { gen1SIZE, 0.85 }, { gen2SIZE, 0.45 } }; + + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) + + +static mps_pool_t pool; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; +static mps_addr_t bogusRoots[bogusRootsCOUNT]; + +static mps_addr_t make(size_t roots_count) +{ + size_t length = rnd() % (2*avLEN); + size_t size = (length+2) * sizeof(mps_word_t); + mps_addr_t p, userP; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size + headerSIZE); + if (res) + die(res, "MPS_RESERVE_BLOCK"); + userP = (mps_addr_t)((char*)p + headerSIZE); + res = dylan_init(userP, size, exactRoots, roots_count); + if (res) + die(res, "dylan_init"); + ((int*)p)[0] = realHeader; + ((int*)p)[1] = 0xED0ED; + } while(!mps_commit(ap, p, size + headerSIZE)); + + return userP; +} + + +/* report - report statistics from any terminated GCs */ + +static void report(mps_arena_t arena) +{ + mps_message_t message; + static int nCollections = 0; + + while (mps_message_get(&message, arena, mps_message_type_gc())) { + size_t live, condemned, not_condemned; + + live = mps_message_gc_live_size(arena, message); + condemned = mps_message_gc_condemned_size(arena, message); + not_condemned = mps_message_gc_not_condemned_size(arena, message); + + printf("\nCollection %d finished:\n", ++nCollections); + printf("live %"PRIuLONGEST"\n", (ulongest_t)live); + printf("condemned %"PRIuLONGEST"\n", (ulongest_t)condemned); + printf("not_condemned %"PRIuLONGEST"\n", (ulongest_t)not_condemned); + + mps_message_discard(arena, message); + } +} + + +/* test -- the body of the test */ + +static void *test(mps_arena_t arena, mps_pool_class_t pool_class, + size_t roots_count) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_root_t exactRoot, ambigRoot, bogusRoot; + unsigned long objs; size_t i; + mps_word_t collections, rampSwitch; + mps_alloc_pattern_t ramp = mps_alloc_pattern_ramp(); + int ramping; + mps_ap_t busy_ap; + mps_addr_t busy_init; + + die(EnsureHeaderFormat(&format, arena), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + die(mps_pool_create(&pool, arena, pool_class, format, chain), + "pool_create(amc)"); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + die(mps_root_create_table(&bogusRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &bogusRoots[0], bogusRootsCOUNT), + "root_create_table(bogus)"); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + collections = 0; + rampSwitch = rampSIZE; + die(mps_ap_alloc_pattern_begin(ap, ramp), "pattern begin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), "pattern begin (busy_ap)"); + ramping = 1; + objs = 0; + while (collections < collectionsCOUNT) { + mps_word_t c; + size_t r; + + c = mps_collections(arena); + + if (collections != c) { + collections = c; + printf("\nCollection %"PRIuLONGEST", %lu objects.\n", + (ulongest_t)c, objs); + report(arena); + for (r = 0; r < exactRootsCOUNT; ++r) { + if (exactRoots[r] != objNULL) + die(HeaderFormatCheck(exactRoots[r]), "wrapper check"); + } + if (collections == rampSwitch) { + int begin_ramp = !ramping + || /* Every other time, switch back immediately. */ (collections & 1); + + rampSwitch += rampSIZE; + if (ramping) { + die(mps_ap_alloc_pattern_end(ap, ramp), "pattern end (ap)"); + die(mps_ap_alloc_pattern_end(busy_ap, ramp), "pattern end (busy_ap)"); + ramping = 0; + /* kill half of the roots */ + for(i = 0; i < exactRootsCOUNT; i += 2) { + if (exactRoots[i] != objNULL) { + die(HeaderFormatCheck(exactRoots[i]), "ramp kill check"); + exactRoots[i] = objNULL; + } + } + } + if (begin_ramp) { + die(mps_ap_alloc_pattern_begin(ap, ramp), + "pattern rebegin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), + "pattern rebegin (busy_ap)"); + ramping = 1; + } + } + /* fill bogusRoots with variations of a real pointer */ + r = rnd() % exactRootsCOUNT; + if (exactRoots[r] != objNULL) { + char *p = (char*)exactRoots[r]; + + for(i = 0; i < bogusRootsCOUNT; ++i) + bogusRoots[i] = (mps_addr_t)(p + i); + } + } + + r = (size_t)rnd(); + if (r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if (exactRoots[i] != objNULL) + die(HeaderFormatCheck(exactRoots[i]), "wrapper check"); + exactRoots[i] = make(roots_count); + if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(roots_count); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + if (r % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + + if (objs % 1024 == 0) { + report(arena); + putchar('.'); + (void)fflush(stdout); + } + + ++objs; + } + + (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_root_destroy(bogusRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_release(arena); + + return NULL; +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "arena_create"); + } MPS_ARGS_END(args); + mps_message_type_enable(arena, mps_message_type_gc()); + die(mps_thread_reg(&thread, arena), "thread_reg"); + test(arena, mps_class_amc(), exactRootsCOUNT); + test(arena, mps_class_amcz(), 0); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/amcssth.c b/mps/code/amcssth.c new file mode 100644 index 00000000000..35bd6a14ed5 --- /dev/null +++ b/mps/code/amcssth.c @@ -0,0 +1,387 @@ +/* amcssth.c: POOL CLASS AMC STRESS TEST WITH TWO THREADS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * The main thread parks the arena half way through the test case and + * runs mps_pool_walk() and mps_arena_formatted_objects_walk(). This + * checks that walking works while the other threads continue to + * allocate in the background. + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "testthr.h" +#include "mpslib.h" +#include "mpscamc.h" +#include "mpsavm.h" + +#include /* fflush, printf, putchar */ + + +/* These values have been tuned in the hope of getting one dynamic collection. */ +#define testArenaSIZE ((size_t)1000*1024) +#define gen1SIZE ((size_t)150) +#define gen2SIZE ((size_t)170) +#define avLEN 3 +#define exactRootsCOUNT 180 +#define ambigRootsCOUNT 50 +#define genCOUNT 2 +#define collectionsCOUNT 37 +#define rampSIZE 9 +#define initTestFREQ 6000 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + { gen1SIZE, 0.85 }, { gen2SIZE, 0.45 } }; + + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) + + +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; + + +static mps_word_t collections; +static mps_arena_t arena; +static mps_root_t exactRoot, ambigRoot; +static unsigned long objs = 0; + + +/* make -- create one new object */ + +static mps_addr_t make(mps_ap_t ap, size_t roots_count) +{ + size_t length = rnd() % (2*avLEN); + size_t size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size); + if (res) + die(res, "MPS_RESERVE_BLOCK"); + res = dylan_init(p, size, exactRoots, roots_count); + if (res) + die(res, "dylan_init"); + } while(!mps_commit(ap, p, size)); + + return p; +} + + +/* test_stepper -- stepping function for walk */ + +static void test_stepper(mps_addr_t object, mps_fmt_t fmt, mps_pool_t pool, + void *p, size_t s) +{ + testlib_unused(object); testlib_unused(fmt); testlib_unused(pool); + testlib_unused(s); + (*(unsigned long *)p)++; +} + + +/* area_scan -- area scanning function for mps_pool_walk */ + +static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + unsigned long *count = closure; + mps_res_t res; + while (base < limit) { + mps_addr_t prev = base; + ++ *count; + res = dylan_scan1(ss, &base); + if (res != MPS_RES_OK) return res; + Insist(prev < base); + } + Insist(base == limit); + return MPS_RES_OK; +} + + +/* churn -- create an object and install into roots */ + +static void churn(mps_ap_t ap, size_t roots_count) +{ + size_t i; + size_t r; + + ++objs; + r = (size_t)rnd(); + if (r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if (exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(ap, roots_count); + if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(ap, roots_count); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } +} + + +typedef struct closure_s { + mps_pool_t pool; + size_t roots_count; +} closure_s, *closure_t; + +static void *kid_thread(void *arg) +{ + void *marker = ▮ + mps_thr_t thread1, thread2; + mps_root_t reg_root; + mps_ap_t ap; + closure_t cl = arg; + + /* Register the thread twice to check this is supported -- see + * + */ + die(mps_thread_reg(&thread1, arena), "thread_reg"); + die(mps_thread_reg(&thread2, arena), "thread_reg"); + die(mps_root_create_thread(®_root, arena, thread1, marker), + "root_create"); + + die(mps_ap_create(&ap, cl->pool, mps_rank_exact()), "BufferCreate(fooey)"); + while(mps_collections(arena) < collectionsCOUNT) { + churn(ap, cl->roots_count); + } + mps_ap_destroy(ap); + + mps_root_destroy(reg_root); + mps_thread_dereg(thread2); + mps_thread_dereg(thread1); + + return NULL; +} + + +/* test -- the body of the test */ + +static void test_pool(const char *name, mps_pool_t pool, size_t roots_count) +{ + size_t i; + mps_word_t rampSwitch; + mps_alloc_pattern_t ramp = mps_alloc_pattern_ramp(); + int ramping; + mps_ap_t ap, busy_ap; + mps_addr_t busy_init; + testthr_t kids[10]; + closure_s cl; + int walked = FALSE, ramped = FALSE; + + printf("\n------ pool: %s-------\n", name); + + cl.pool = pool; + cl.roots_count = roots_count; + collections = 0; + + for (i = 0; i < NELEMS(kids); ++i) + testthr_create(&kids[i], kid_thread, &cl); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + rampSwitch = rampSIZE; + die(mps_ap_alloc_pattern_begin(ap, ramp), "pattern begin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), "pattern begin (busy_ap)"); + ramping = 1; + while (collections < collectionsCOUNT) { + mps_message_type_t type; + + if (mps_message_queue_type(&type, arena)) { + mps_message_t msg; + mps_bool_t b = mps_message_get(&msg, arena, type); + Insist(b); /* we just checked there was one */ + + if (type == mps_message_type_gc()) { + size_t live = mps_message_gc_live_size(arena, msg); + size_t condemned = mps_message_gc_condemned_size(arena, msg); + size_t not_condemned = mps_message_gc_not_condemned_size(arena, msg); + + printf("\nCollection %lu finished:\n", (unsigned long)collections++); + printf("live %"PRIuLONGEST"\n", (ulongest_t)live); + printf("condemned %"PRIuLONGEST"\n", (ulongest_t)condemned); + printf("not_condemned %"PRIuLONGEST"\n", (ulongest_t)not_condemned); + + } else if (type == mps_message_type_gc_start()) { + printf("\nCollection %lu started, %lu objects, committed=%lu.\n", + (unsigned long)collections, objs, + (unsigned long)mps_arena_committed(arena)); + + for (i = 0; i < exactRootsCOUNT; ++i) + cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), + "all roots check"); + + if (collections >= collectionsCOUNT / 2 && !walked) + { + unsigned long count1 = 0, count2 = 0; + mps_arena_park(arena); + mps_arena_formatted_objects_walk(arena, test_stepper, &count1, 0); + die(mps_pool_walk(pool, area_scan, &count2), "mps_pool_walk"); + mps_arena_release(arena); + printf("stepped on %lu objects.\n", count1); + printf("walked %lu objects.\n", count2); + walked = TRUE; + } + if (collections >= rampSwitch && !ramped) { + /* Every other time, switch back immediately. */ + int begin_ramp = !ramping || (collections & 1); + + rampSwitch += rampSIZE; + if (ramping) { + die(mps_ap_alloc_pattern_end(ap, ramp), "pattern end (ap)"); + die(mps_ap_alloc_pattern_end(busy_ap, ramp), + "pattern end (busy_ap)"); + ramping = 0; + /* kill half of the roots */ + for(i = 0; i < exactRootsCOUNT; i += 2) { + if (exactRoots[i] != objNULL) { + cdie(dylan_check(exactRoots[i]), "ramp kill check"); + exactRoots[i] = objNULL; + } + } + } + if (begin_ramp) { + die(mps_ap_alloc_pattern_begin(ap, ramp), + "pattern rebegin (ap)"); + die(mps_ap_alloc_pattern_begin(busy_ap, ramp), + "pattern rebegin (busy_ap)"); + ramping = 1; + } + } + ramped = TRUE; + } + + mps_message_discard(arena, msg); + } + + churn(ap, roots_count); + { + size_t r = (size_t)rnd(); + if (r % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + } + if (objs % 1024 == 0) { + putchar('.'); + fflush(stdout); + } + } + + (void)mps_commit(busy_ap, busy_init, 64); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + + for (i = 0; i < NELEMS(kids); ++i) + testthr_join(&kids[i], NULL); +} + +static void test_arena(void) +{ + size_t i; + mps_fmt_t format; + mps_chain_t chain; + mps_thr_t thread; + mps_root_t reg_root; + mps_pool_t amc_pool, amcz_pool; + void *marker = ▮ + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "arena_create"); + } MPS_ARGS_END(args); + mps_message_type_enable(arena, mps_message_type_gc()); + mps_message_type_enable(arena, mps_message_type_gc_start()); + + die(dylan_fmt(&format, arena), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + die(mps_thread_reg(&thread, arena), "thread_reg"); + die(mps_root_create_thread(®_root, arena, thread, marker), + "root_create"); + + die(mps_pool_create(&amc_pool, arena, mps_class_amc(), format, chain), + "pool_create(amc)"); + die(mps_pool_create(&amcz_pool, arena, mps_class_amcz(), format, chain), + "pool_create(amcz)"); + + test_pool("AMC", amc_pool, exactRootsCOUNT); + test_pool("AMCZ", amcz_pool, 0); + + mps_arena_park(arena); + mps_pool_destroy(amc_pool); + mps_pool_destroy(amcz_pool); + mps_root_destroy(reg_root); + mps_thread_dereg(thread); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_destroy(arena); +} + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + test_arena(); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/amsss.c b/mps/code/amsss.c new file mode 100644 index 00000000000..f7ef981e108 --- /dev/null +++ b/mps/code/amsss.c @@ -0,0 +1,275 @@ +/* amsss.c: POOL CLASS AMS STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * .design: Adapted from amcss.c, but not counting collections, just + * total size of objects allocated (because epoch doesn't increment when + * AMS is collected). */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpscams.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" +#include "mpm.h" + +#include /* fflush, printf */ + + +#define exactRootsCOUNT 50 +#define ambigRootsCOUNT 100 +/* This is enough for three GCs. */ +#define totalSizeMAX 800 * (size_t)1024 +#define totalSizeSTEP 200 * (size_t)1024 +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) +#define testArenaSIZE ((size_t)1<<20) +#define initTestFREQ 3000 +#define splatTestFREQ 6000 +static mps_gen_param_s testChain[1] = { { 160, 0.90 } }; + + +static mps_arena_t arena; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; +static size_t totalSize = 0; + + +/* report - report statistics from any messages */ + +static void report(void) +{ + static int nStart = 0; + static int nComplete = 0; + mps_message_type_t type; + + while(mps_message_queue_type(&type, arena)) { + mps_message_t message; + + cdie(mps_message_get(&message, arena, type), "message get"); + + if (type == mps_message_type_gc_start()) { + printf("\nCollection start %d. Because:\n", ++nStart); + printf("%s\n", mps_message_gc_start_why(arena, message)); + + } else if (type == mps_message_type_gc()) { + size_t live, condemned, not_condemned; + + live = mps_message_gc_live_size(arena, message); + condemned = mps_message_gc_condemned_size(arena, message); + not_condemned = mps_message_gc_not_condemned_size(arena, message); + + printf("\nCollection complete %d:\n", ++nComplete); + printf("live %"PRIuLONGEST"\n", (ulongest_t)live); + printf("condemned %"PRIuLONGEST"\n", (ulongest_t)condemned); + printf("not_condemned %"PRIuLONGEST"\n", (ulongest_t)not_condemned); + + } else { + cdie(0, "unknown message type"); + } + + mps_message_discard(arena, message); + } +} + + +/* make -- object allocation and init */ + +static mps_addr_t make(void) +{ + size_t length = rnd() % 20, size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size); + if (res) + die(res, "MPS_RESERVE_BLOCK"); + res = dylan_init(p, size, exactRoots, exactRootsCOUNT); + if (res) + die(res, "dylan_init"); + } while(!mps_commit(ap, p, size)); + + totalSize += size; + return p; +} + + +/* test -- the actual stress test */ + +static mps_pool_debug_option_s freecheckOptions = + { NULL, 0, "Dead", 4 }; + +static void test_pool(mps_pool_class_t pool_class, mps_arg_s args[], + mps_bool_t haveAmbiguous) +{ + mps_pool_t pool; + mps_root_t exactRoot, ambigRoot = NULL; + size_t lastStep = 0, i, r; + unsigned long objs; + mps_ap_t busy_ap; + mps_addr_t busy_init; + + die(mps_pool_create_k(&pool, arena, pool_class, args), "pool_create"); + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + if (haveAmbiguous) + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + if (haveAmbiguous) + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + die(PoolDescribe(pool, mps_lib_get_stdout(), 0), "PoolDescribe"); + + objs = 0; totalSize = 0; + while(totalSize < totalSizeMAX) { + if (totalSize > lastStep + totalSizeSTEP) { + lastStep = totalSize; + printf("\nSize %"PRIuLONGEST" bytes, %lu objects.\n", + (ulongest_t)totalSize, objs); + (void)fflush(stdout); + for(i = 0; i < exactRootsCOUNT; ++i) + cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), + "all roots check"); + } + + r = (size_t)rnd(); + if (!haveAmbiguous || (r & 1)) { + i = (r >> 1) % exactRootsCOUNT; + if (exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(); + if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + if (rnd() % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + + if (rnd() % splatTestFREQ == 0) + mps_pool_check_free_space(pool); + + ++objs; + if (objs % 256 == 0) { + printf("."); + report(); + (void)fflush(stdout); + } + } + + (void)mps_commit(busy_ap, busy_init, 64); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + if (haveAmbiguous) + mps_root_destroy(ambigRoot); + + mps_pool_destroy(pool); +} + + +int main(int argc, char *argv[]) +{ + int i; + mps_thr_t thread; + mps_fmt_t format; + mps_chain_t chain; + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "arena_create"); + } MPS_ARGS_END(args); + + mps_message_type_enable(arena, mps_message_type_gc_start()); + mps_message_type_enable(arena, mps_message_type_gc()); + die(mps_thread_reg(&thread, arena), "thread_reg"); + die(mps_fmt_create_A(&format, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, 1, testChain), "chain_create"); + + for (i = 0; i < 8; i++) { + int debug = i % 2; + int ownChain = (i / 2) % 2; + int ambig = (i / 4) % 2; + printf("\n\n*** AMS%s with %sCHAIN and %sSUPPORT_AMBIGUOUS\n", + debug ? " Debug" : "", + ownChain ? "" : "!", + ambig ? "" : "!"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, format); + if (ownChain) + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + MPS_ARGS_ADD(args, MPS_KEY_AMS_SUPPORT_AMBIGUOUS, ambig); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &freecheckOptions); + test_pool(debug ? mps_class_ams_debug() : mps_class_ams(), args, ambig); + } MPS_ARGS_END(args); + } + + mps_arena_park(arena); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/amssshe.c b/mps/code/amssshe.c new file mode 100644 index 00000000000..ad8c7b6f3a8 --- /dev/null +++ b/mps/code/amssshe.c @@ -0,0 +1,199 @@ +/* amssshe.c: POOL CLASS AMS STRESS TEST WITH HEADERS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: Adapted from amsss.c. + */ + +#include "fmthe.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpscams.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" + +#include /* fflush, printf */ + + +#define exactRootsCOUNT 50 +#define ambigRootsCOUNT 100 +/* This is enough for five GCs. */ +#define totalSizeMAX 800 * (size_t)1024 +#define totalSizeSTEP 200 * (size_t)1024 +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) +#define testArenaSIZE ((size_t)16<<20) +#define initTestFREQ 6000 +static mps_gen_param_s testChain[1] = { { 160, 0.90 } }; + + +static mps_pool_t pool; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; +static size_t totalSize = 0; + + +static mps_addr_t make(void) +{ + size_t length = rnd() % 20, size = (length+2) * sizeof(mps_word_t); + mps_addr_t p, userP; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size + headerSIZE); + if(res) + die(res, "MPS_RESERVE_BLOCK"); + userP = (mps_addr_t)((char*)p + headerSIZE); + res = dylan_init(userP, size, exactRoots, exactRootsCOUNT); + if(res) + die(res, "dylan_init"); + ((int*)p)[0] = realHeader; + ((int*)p)[1] = 0xED0ED; + } while(!mps_commit(ap, p, size + headerSIZE)); + + totalSize += size; + return userP; +} + + +static void test(mps_arena_t arena) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_root_t exactRoot, ambigRoot; + size_t lastStep = 0, i, r; + unsigned long objs; + mps_ap_t busy_ap; + mps_addr_t busy_init; + + die(EnsureHeaderFormat(&format, arena), "make header format"); + die(mps_chain_create(&chain, arena, 1, testChain), "chain_create"); + die(mps_pool_create(&pool, arena, mps_class_ams(), format, chain, + TRUE), "pool_create(ams)"); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + objs = 0; + while(totalSize < totalSizeMAX) { + if(totalSize > lastStep + totalSizeSTEP) { + lastStep = totalSize; + printf("\nSize %"PRIuLONGEST" bytes, %lu objects.\n", + (ulongest_t)totalSize, objs); + (void)fflush(stdout); + for(i = 0; i < exactRootsCOUNT; ++i) + cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), + "all roots check"); + } + + r = (size_t)rnd(); + if(r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if(exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(); + if(exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + if(rnd() % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + + ++objs; + if (objs % 256 == 0) { + printf("."); + (void)fflush(stdout); + } + } + + (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_release(arena); +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "arena_create"); + } MPS_ARGS_END(args); + die(mps_thread_reg(&thread, arena), "thread_reg"); + test(arena); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/anangc.gmk b/mps/code/anangc.gmk new file mode 100644 index 00000000000..b24aeaeb0a4 --- /dev/null +++ b/mps/code/anangc.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# anangc.gmk: BUILD FOR ANSI/ANSI/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = anangc + +MPMPF = \ + lockan.c \ + prmcan.c \ + prmcanan.c \ + protan.c \ + span.c \ + than.c \ + vman.c + +LIBS = -lm -lpthread + +include gc.gmk + +CFLAGSCOMPILER += -DCONFIG_PF_ANSI -DCONFIG_THREAD_SINGLE + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/ananll.gmk b/mps/code/ananll.gmk new file mode 100644 index 00000000000..3b8860e1e5b --- /dev/null +++ b/mps/code/ananll.gmk @@ -0,0 +1,56 @@ +# -*- makefile -*- +# +# ananll.gmk: BUILD FOR ANSI/ANSI/Clang PLATFORM +# +# $Id$ +# Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + +PFM = ananll + +MPMPF = \ + lockan.c \ + prmcan.c \ + prmcanan.c \ + protan.c \ + span.c \ + than.c \ + vman.c + +LIBS = -lm -lpthread + +include ll.gmk + +CFLAGSCOMPILER += -DCONFIG_PF_ANSI -DCONFIG_THREAD_SINGLE + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/ananmv.nmk b/mps/code/ananmv.nmk new file mode 100644 index 00000000000..fd7af74532f --- /dev/null +++ b/mps/code/ananmv.nmk @@ -0,0 +1,51 @@ +# ananmv.nmk: ANSI/ANSI/MICROSOFT VISUAL C/C++ NMAKE FILE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = ananmv + +PFMDEFS = /DCONFIG_PF_ANSI /DCONFIG_THREAD_SINGLE + +MPMPF = \ + [lockan] \ + [prmcan] \ + [prmcanan] \ + [protan] \ + [span] \ + [than] \ + [vman] + +!INCLUDE commpre.nmk +!INCLUDE mv.nmk +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/apss.c b/mps/code/apss.c new file mode 100644 index 00000000000..f98ca19ffb4 --- /dev/null +++ b/mps/code/apss.c @@ -0,0 +1,274 @@ +/* apss.c: AP MANUAL ALLOC STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + + +#include "mpscmvff.h" +#include "mpscmvt.h" +#include "mpslib.h" +#include "mpsacl.h" +#include "mpsavm.h" + +#include "testlib.h" +#include "mpslib.h" + +#include /* printf */ +#include /* malloc */ + + +#define testArenaSIZE ((((size_t)3)<<24) - 4) +#define testSetSIZE 200 +#define testLOOPS 10 +#define MAX_ALIGN 64 /* TODO: Make this test work up to arena_grain_size? */ + + +/* make -- allocate one object */ + +static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) +{ + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, *p, ap, size); + if(res != MPS_RES_OK) + return res; + } while(!mps_commit(ap, *p, size)); + + return MPS_RES_OK; +} + + +/* check_allocated_size -- check the allocated size of the pool */ + +static void check_allocated_size(mps_pool_t pool, mps_ap_t ap, size_t allocated) +{ + size_t total_size = mps_pool_total_size(pool); + size_t free_size = mps_pool_free_size(pool); + size_t ap_free = (size_t)((char *)ap->limit - (char *)ap->init); + Insist(total_size - free_size == allocated + ap_free); +} + + +/* stress -- create a pool of the requested type and allocate in it */ + +static mps_res_t stress(mps_arena_t arena, mps_pool_debug_option_s *options, + mps_align_t align, + size_t (*size)(size_t i, mps_align_t align), + const char *name, mps_pool_class_t pool_class, + mps_arg_s args[]) +{ + mps_res_t res = MPS_RES_OK; + mps_pool_t pool; + mps_ap_t ap; + size_t i, k; + int *ps[testSetSIZE]; + size_t ss[testSetSIZE]; + size_t allocated = 0; /* Total allocated memory */ + size_t debugOverhead = options ? 2 * alignUp(options->fence_size, align) : 0; + + printf("stress %s\n", name); + + die(mps_pool_create_k(&pool, arena, pool_class, args), "pool_create"); + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + + /* allocate a load of objects */ + for (i=0; i= sizeof(ps[i])) + *ps[i] = 1; /* Write something, so it gets swap. */ + check_allocated_size(pool, ap, allocated); + } + + /* Check introspection functions */ + for (i = 0; i < NELEMS(ps); ++i) { + mps_pool_t addr_pool = NULL; + Insist(mps_arena_has_addr(arena, ps[i])); + Insist(mps_addr_pool(&addr_pool, arena, ps[i])); + Insist(addr_pool == pool); + } + + mps_pool_check_fenceposts(pool); + + for (k=0; k> (i / 10)), 2) + 1, align); +} + + +static mps_pool_debug_option_s bothOptions = { + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", + /* .free_size = */ 4 +}; + +static mps_pool_debug_option_s fenceOptions = { + /* .fence_template = */ "123456789abcdef", + /* .fence_size = */ 15, + /* .free_template = */ NULL, + /* .free_size = */ 0 +}; + + +/* test -- create arena using given class and arguments; test all the + * pool classes in this arena + */ + +static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[], + size_t arena_grain_size, + mps_pool_debug_option_s *options) +{ + mps_arena_t arena; + die(mps_arena_create_k(&arena, arena_class, arena_args), "mps_arena_create"); + + (void)arena_grain_size; /* TODO: test larger alignments up to this */ + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, rnd_double()); + die(stress(arena, NULL, align, randomSizeAligned, "MVFF", + mps_class_mvff(), args), "stress MVFF"); + } MPS_ARGS_END(args); + + /* IWBN to test MVFFDebug, but the MPS doesn't support debugging + allocation points. See job003995. */ + (void)options; + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + die(stress(arena, NULL, align, randomSizeAligned, "MVT", + mps_class_mvt(), args), "stress MVT"); + } MPS_ARGS_END(args); + + /* Manual allocation should not cause any garbage collections. */ + Insist(mps_collections(arena) == 0); + mps_arena_destroy(arena); +} + + +int main(int argc, char *argv[]) +{ + size_t arena_grain_size; + + testlib_init(argc, argv); + + arena_grain_size = rnd_grain(testArenaSIZE); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_COMMIT_LIMIT, testArenaSIZE); + test(mps_arena_class_vm(), args, arena_grain_size, &fenceOptions); + } MPS_ARGS_END(args); + + arena_grain_size = rnd_grain(2 * testArenaSIZE); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + test(mps_arena_class_vm(), args, arena_grain_size, &bothOptions); + } MPS_ARGS_END(args); + + arena_grain_size = rnd_grain(testArenaSIZE); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_CL_BASE, malloc(testArenaSIZE)); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + test(mps_arena_class_cl(), args, arena_grain_size, &bothOptions); + } MPS_ARGS_END(args); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arena.c b/mps/code/arena.c new file mode 100644 index 00000000000..9f29432eaa1 --- /dev/null +++ b/mps/code/arena.c @@ -0,0 +1,1411 @@ +/* arena.c: ARENA ALLOCATION FEATURES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .sources: is the main design document. */ + +#include "tract.h" +#include "poolmvff.h" +#include "mpm.h" +#include "cbs.h" +#include "bt.h" +#include "poolmfs.h" +#include "mpscmfs.h" + + +SRCID(arena, "$Id$"); + + +#define ArenaControlPool(arena) MVFFPool(&(arena)->controlPoolStruct) +#define ArenaCBSBlockPool(arena) MFSPool(&(arena)->freeCBSBlockPoolStruct) +#define ArenaFreeLand(arena) CBSLand(&(arena)->freeLandStruct) + + +/* ArenaGrainSizeCheck -- check that size is a valid arena grain size */ + +Bool ArenaGrainSizeCheck(Size size) +{ + CHECKL(size > 0); + /* */ + CHECKL(SizeIsAligned(size, MPS_PF_ALIGN)); + /* Grain size must be a power of 2 for the tract lookup and the + * zones to work. */ + CHECKL(SizeIsP2(size)); + + return TRUE; +} + + +/* Forward declarations */ + +static void ArenaTrivCompact(Arena arena, Trace trace); +static void arenaFreePage(Arena arena, Addr base, Pool pool); +static void arenaFreeLandFinish(Arena arena); +static Res ArenaAbsInit(Arena arena, Size grainSize, ArgList args); +static void ArenaAbsFinish(Inst inst); +static Res ArenaAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth); + + +static void ArenaNoFree(Addr base, Size size, Pool pool) +{ + UNUSED(base); + UNUSED(size); + UNUSED(pool); + NOTREACHED; +} + +static Res ArenaNoChunkInit(Chunk chunk, BootBlock boot) +{ + UNUSED(chunk); + UNUSED(boot); + NOTREACHED; + return ResUNIMPL; +} + +static void ArenaNoChunkFinish(Chunk chunk) +{ + UNUSED(chunk); + NOTREACHED; +} + +static Res ArenaNoPagesMarkAllocated(Arena arena, Chunk chunk, + Index baseIndex, Count pages, + Pool pool) +{ + UNUSED(arena); + UNUSED(chunk); + UNUSED(baseIndex); + UNUSED(pages); + UNUSED(pool); + NOTREACHED; + return ResUNIMPL; +} + +static Bool ArenaNoChunkPageMapped(Chunk chunk, Index index) +{ + UNUSED(chunk); + UNUSED(index); + NOTREACHED; + return FALSE; +} + +static Res ArenaNoCreate(Arena *arenaReturn, ArgList args) +{ + UNUSED(arenaReturn); + UNUSED(args); + NOTREACHED; + return ResUNIMPL; +} + +static void ArenaNoDestroy(Arena arena) +{ + UNUSED(arena); + NOTREACHED; +} + +DEFINE_CLASS(Inst, ArenaClass, klass) +{ + INHERIT_CLASS(klass, ArenaClass, InstClass); + AVERT(InstClass, klass); +} + + +/* AbstractArenaClass -- The abstract arena class definition */ + +DEFINE_CLASS(Arena, AbstractArena, klass) +{ + INHERIT_CLASS(&klass->instClassStruct, AbstractArena, Inst); + klass->instClassStruct.finish = ArenaAbsFinish; + klass->instClassStruct.describe = ArenaAbsDescribe; + klass->size = sizeof(ArenaStruct); + klass->varargs = ArgTrivVarargs; + klass->init = ArenaAbsInit; + klass->create = ArenaNoCreate; + klass->destroy = ArenaNoDestroy; + klass->purgeSpare = ArenaNoPurgeSpare; + klass->extend = ArenaNoExtend; + klass->grow = ArenaNoGrow; + klass->free = ArenaNoFree; + klass->chunkInit = ArenaNoChunkInit; + klass->chunkFinish = ArenaNoChunkFinish; + klass->compact = ArenaTrivCompact; + klass->pagesMarkAllocated = ArenaNoPagesMarkAllocated; + klass->chunkPageMapped = ArenaNoChunkPageMapped; + klass->sig = ArenaClassSig; + AVERT(ArenaClass, klass); +} + + +/* ArenaClassCheck -- check the consistency of an arena class */ + +Bool ArenaClassCheck(ArenaClass klass) +{ + CHECKD(InstClass, &klass->instClassStruct); + CHECKL(klass->size >= sizeof(ArenaStruct)); + CHECKL(FUNCHECK(klass->varargs)); + CHECKL(FUNCHECK(klass->init)); + CHECKL(FUNCHECK(klass->create)); + CHECKL(FUNCHECK(klass->destroy)); + CHECKL(FUNCHECK(klass->purgeSpare)); + CHECKL(FUNCHECK(klass->extend)); + CHECKL(FUNCHECK(klass->grow)); + CHECKL(FUNCHECK(klass->free)); + CHECKL(FUNCHECK(klass->chunkInit)); + CHECKL(FUNCHECK(klass->chunkFinish)); + CHECKL(FUNCHECK(klass->compact)); + CHECKL(FUNCHECK(klass->pagesMarkAllocated)); + CHECKL(FUNCHECK(klass->chunkPageMapped)); + + /* Check that arena classes override sets of related methods. */ + CHECKL((klass->init == ArenaAbsInit) + == (klass->instClassStruct.finish == ArenaAbsFinish)); + CHECKL((klass->create == ArenaNoCreate) + == (klass->destroy == ArenaNoDestroy)); + CHECKL((klass->chunkInit == ArenaNoChunkInit) + == (klass->chunkFinish == ArenaNoChunkFinish)); + + CHECKS(ArenaClass, klass); + return TRUE; +} + + +/* ArenaCheck -- check the arena */ + +Bool ArenaCheck(Arena arena) +{ + CHECKC(AbstractArena, arena); + CHECKD(Globals, ArenaGlobals(arena)); + + CHECKL(BoolCheck(arena->poolReady)); + if (arena->poolReady) { /* */ + CHECKD(MVFF, &arena->controlPoolStruct); + } + + /* .reserved.check: Would like to check that arena->committed <= + * arena->reserved, but that isn't always true in the VM arena. + * Memory is committed early on when VMChunkCreate calls vmArenaMap + * (to provide a place for the chunk struct) but is not recorded as + * reserved until ChunkInit calls ArenaChunkInsert. + */ + CHECKL(arena->committed <= arena->commitLimit); + CHECKL(arena->spareCommitted <= arena->committed); + CHECKL(0.0 <= arena->spare); + CHECKL(arena->spare <= 1.0); + CHECKL(0.0 <= arena->pauseTime); + + CHECKL(arena->zoneShift == ZoneShiftUNSET + || ShiftCheck(arena->zoneShift)); + CHECKL(ArenaGrainSizeCheck(arena->grainSize)); + + /* Stripes can't be smaller than grains. */ + CHECKL(arena->zoneShift == ZoneShiftUNSET + || ((Size)1 << arena->zoneShift) >= arena->grainSize); + + if (arena->lastTract == NULL) { + CHECKL(arena->lastTractBase == (Addr)0); + } else { + CHECKL(TractBase(arena->lastTract) == arena->lastTractBase); + } + + if (arena->primary != NULL) { + CHECKD(Chunk, arena->primary); + } + CHECKD_NOSIG(Ring, ArenaChunkRing(arena)); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(TreeCheck(ArenaChunkTree(arena))); + /* TODO: check that the chunkRing and chunkTree have identical members */ + /* nothing to check for chunkSerial */ + + CHECKL(LocusCheck(arena)); + + CHECKL(BoolCheck(arena->hasFreeLand)); + if (arena->hasFreeLand) + CHECKD(Land, ArenaFreeLand(arena)); + + CHECKL(BoolCheck(arena->zoned)); + + return TRUE; +} + + +/* ArenaAbsInit -- initialize the generic part of the arena */ + +static Res ArenaAbsInit(Arena arena, Size grainSize, ArgList args) +{ + Res res; + Bool zoned = ARENA_DEFAULT_ZONED; + Size commitLimit = ARENA_DEFAULT_COMMIT_LIMIT; + double spare = ARENA_SPARE_DEFAULT; + double pauseTime = ARENA_DEFAULT_PAUSE_TIME; + mps_arg_s arg; + + AVER(arena != NULL); + AVERT(ArenaGrainSize, grainSize); + + if (ArgPick(&arg, args, MPS_KEY_ARENA_ZONED)) + zoned = arg.val.b; + if (ArgPick(&arg, args, MPS_KEY_COMMIT_LIMIT)) + commitLimit = arg.val.size; + /* MPS_KEY_SPARE_COMMIT_LIMIT is deprecated */ + if (ArgPick(&arg, args, MPS_KEY_SPARE_COMMIT_LIMIT)) { + if (0 < commitLimit && commitLimit <= arg.val.size) + spare = (double)arg.val.size / (double)commitLimit; + else + spare = 1.0; + } + if (ArgPick(&arg, args, MPS_KEY_SPARE)) + spare = arg.val.d; + if (ArgPick(&arg, args, MPS_KEY_PAUSE_TIME)) + pauseTime = arg.val.d; + + /* Superclass init */ + InstInit(CouldBeA(Inst, arena)); + + arena->reserved = (Size)0; + arena->committed = (Size)0; + arena->commitLimit = commitLimit; + arena->spareCommitted = (Size)0; + arena->spare = spare; + arena->pauseTime = pauseTime; + arena->grainSize = grainSize; + /* zoneShift must be overridden by arena class init */ + arena->zoneShift = ZoneShiftUNSET; + arena->poolReady = FALSE; /* */ + arena->lastTract = NULL; + arena->lastTractBase = NULL; + arena->hasFreeLand = FALSE; + arena->freeZones = ZoneSetUNIV; + arena->zoned = zoned; + + arena->primary = NULL; + RingInit(ArenaChunkRing(arena)); + arena->chunkTree = TreeEMPTY; + arena->chunkSerial = (Serial)0; + + LocusInit(arena); + + res = GlobalsInit(ArenaGlobals(arena)); + if (res != ResOK) + goto failGlobalsInit; + + SetClassOfPoly(arena, CLASS(AbstractArena)); + arena->sig = ArenaSig; + AVERC(Arena, arena); + + /* Initialise a pool to hold the CBS blocks for the arena's free + * land. This pool can't be allowed to extend itself using + * ArenaAlloc because it is used to implement ArenaAlloc, so + * MFSExtendSelf is set to FALSE. Failures to extend are handled + * where the free land is used: see arenaFreeLandInsertExtend. */ + + MPS_ARGS_BEGIN(piArgs) { + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSZonedBlockStruct)); + MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, ArenaGrainSize(arena)); + MPS_ARGS_ADD(piArgs, MFSExtendSelf, FALSE); + res = PoolInit(ArenaCBSBlockPool(arena), arena, PoolClassMFS(), piArgs); + } MPS_ARGS_END(piArgs); + AVER(res == ResOK); /* no allocation, no failure expected */ + if (res != ResOK) + goto failMFSInit; + + EventLabelPointer(ArenaCBSBlockPool(arena), EventInternString("CBSBlock")); + return ResOK; + +failMFSInit: + GlobalsFinish(ArenaGlobals(arena)); +failGlobalsInit: + InstFinish(MustBeA(Inst, arena)); + return res; +} + + +/* VM keys are defined here even though the code they apply to might + * not be linked. For example, MPS_KEY_VMW3_TOP_DOWN only applies to + * vmw3.c. The reason is that we want these keywords to be optional + * even on the wrong platform, so that clients can write simple portable + * code. They should be free to pass MPS_KEY_VMW3_TOP_DOWN on other + * platforms, knowing that it has no effect. To do that, the key must + * exist on all platforms. */ + +ARG_DEFINE_KEY(VMW3_TOP_DOWN, Bool); + + +/* ArenaCreate -- create the arena and call initializers */ + +ARG_DEFINE_KEY(ARENA_GRAIN_SIZE, Size); +ARG_DEFINE_KEY(ARENA_SIZE, Size); +ARG_DEFINE_KEY(ARENA_ZONED, Bool); +ARG_DEFINE_KEY(COMMIT_LIMIT, Size); +ARG_DEFINE_KEY(SPARE_COMMIT_LIMIT, Size); +ARG_DEFINE_KEY(PAUSE_TIME, double); + +static Res arenaFreeLandInit(Arena arena) +{ + Res res; + + AVERT(Arena, arena); + AVER(!arena->hasFreeLand); + AVER(arena->primary != NULL); + + /* Initialise the free land. */ + MPS_ARGS_BEGIN(liArgs) { + MPS_ARGS_ADD(liArgs, CBSBlockPool, ArenaCBSBlockPool(arena)); + res = LandInit(ArenaFreeLand(arena), CLASS(CBSZoned), arena, + ArenaGrainSize(arena), arena, liArgs); + } MPS_ARGS_END(liArgs); + AVER(res == ResOK); /* no allocation, no failure expected */ + if (res != ResOK) + goto failLandInit; + + /* With the primary chunk initialised we can add page memory to the + * free land that describes the free address space in the primary + * chunk. */ + res = ArenaFreeLandInsert(arena, + PageIndexBase(arena->primary, + arena->primary->allocBase), + arena->primary->limit); + if (res != ResOK) + goto failFreeLandInsert; + + arena->hasFreeLand = TRUE; + return ResOK; + +failFreeLandInsert: + LandFinish(ArenaFreeLand(arena)); +failLandInit: + return res; +} + +Res ArenaCreate(Arena *arenaReturn, ArenaClass klass, ArgList args) +{ + Arena arena; + Res res; + + AVER(arenaReturn != NULL); + AVERT(ArenaClass, klass); + AVERT(ArgList, args); + + /* We must initialise the event subsystem very early, because event logging + will start as soon as anything interesting happens and expect to write + to the EventLast pointers. */ + EventInit(); + + res = klass->create(&arena, args); + if (res != ResOK) + goto failInit; + + /* Zone shift must have been set up by klass->create() */ + AVER(ShiftCheck(arena->zoneShift)); + + /* TODO: Consider how each of the stages below could be incorporated + into arena initialization, rather than tacked on here. */ + + /* Grain size must have been set up by klass->create() */ + if (ArenaGrainSize(arena) > ((Size)1 << arena->zoneShift)) { + res = ResMEMORY; /* size was too small */ + goto failStripeSize; + } + + res = arenaFreeLandInit(arena); + if (res != ResOK) + goto failFreeLandInit; + + res = ControlInit(arena); + if (res != ResOK) + goto failControlInit; + + res = GlobalsCompleteCreate(ArenaGlobals(arena)); + if (res != ResOK) + goto failGlobalsCompleteCreate; + + AVERT(Arena, arena); + *arenaReturn = arena; + return ResOK; + +failGlobalsCompleteCreate: + ControlFinish(arena); +failControlInit: + arenaFreeLandFinish(arena); +failFreeLandInit: +failStripeSize: + klass->destroy(arena); +failInit: + return res; +} + + +/* ArenaAbsFinish -- finish the generic part of the arena */ + +static void ArenaAbsFinish(Inst inst) +{ + Arena arena = MustBeA(AbstractArena, inst); + AVERC(Arena, arena); + PoolFinish(ArenaCBSBlockPool(arena)); + arena->sig = SigInvalid; + NextMethod(Inst, AbstractArena, finish)(inst); + GlobalsFinish(ArenaGlobals(arena)); + LocusFinish(arena); + RingFinish(ArenaChunkRing(arena)); + AVER(ArenaChunkTree(arena) == TreeEMPTY); +} + + +/* ArenaDestroy -- destroy the arena */ + +static void arenaMFSPageFreeVisitor(Pool pool, Addr base, Size size, + void *closure) +{ + AVERT(Pool, pool); + AVER(closure == UNUSED_POINTER); + UNUSED(closure); + UNUSED(size); + AVER(size == ArenaGrainSize(PoolArena(pool))); + arenaFreePage(PoolArena(pool), base, pool); +} + +static void arenaFreeLandFinish(Arena arena) +{ + AVERT(Arena, arena); + AVER(arena->hasFreeLand); + + /* We're about to free the memory occupied by the free land, which + contains a CBS. We want to make sure that LandFinish doesn't try + to check the CBS, so nuke it here. TODO: LandReset? */ + arena->freeLandStruct.splayTreeStruct.root = TreeEMPTY; + + /* The CBS block pool can't free its own memory via ArenaFree because + * that would use the free land. */ + MFSFinishExtents(ArenaCBSBlockPool(arena), arenaMFSPageFreeVisitor, + UNUSED_POINTER); + + arena->hasFreeLand = FALSE; + LandFinish(ArenaFreeLand(arena)); +} + +void ArenaDestroy(Arena arena) +{ + AVERT(Arena, arena); + + GlobalsPrepareToDestroy(ArenaGlobals(arena)); + + ControlFinish(arena); + + /* We must tear down the free land before the chunks, because pages + * containing CBS blocks might be allocated in those chunks. */ + arenaFreeLandFinish(arena); + + /* Call class-specific destruction. This will call ArenaAbsFinish. */ + Method(Arena, arena, destroy)(arena); + + EventFinish(); +} + + +/* ControlInit -- initialize the control pool */ + +Res ControlInit(Arena arena) +{ + Res res; + + AVERT(Arena, arena); + AVER(!arena->poolReady); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, CONTROL_EXTEND_BY); + res = PoolInit(ArenaControlPool(arena), arena, + PoolClassMVFF(), args); + } MPS_ARGS_END(args); + if (res != ResOK) + return res; + arena->poolReady = TRUE; /* */ + EventLabelPointer(&arena->controlPoolStruct, EventInternString("Control")); + return ResOK; +} + + +/* ControlFinish -- finish the control pool */ + +void ControlFinish(Arena arena) +{ + AVERT(Arena, arena); + AVER(arena->poolReady); + arena->poolReady = FALSE; + PoolFinish(ArenaControlPool(arena)); +} + + +/* ArenaDescribe -- describe the arena */ + +static Res ArenaAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Arena arena = CouldBeA(AbstractArena, inst); + Res res; + + if (!TESTC(AbstractArena, arena)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = InstDescribe(CouldBeA(Inst, arena), stream, depth); + if (res != ResOK) + return res; + + if (arena->poolReady) { + res = WriteF(stream, depth + 2, + "controlPool $P\n", (WriteFP)&arena->controlPoolStruct, + NULL); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth + 2, + "reserved $W\n", (WriteFW)arena->reserved, + "committed $W\n", (WriteFW)arena->committed, + "commitLimit $W\n", (WriteFW)arena->commitLimit, + "spareCommitted $W\n", (WriteFW)arena->spareCommitted, + "spare $D\n", (WriteFD)arena->spare, + "zoneShift $U\n", (WriteFU)arena->zoneShift, + "grainSize $W\n", (WriteFW)arena->grainSize, + "lastTract $P\n", (WriteFP)arena->lastTract, + "lastTractBase $P\n", (WriteFP)arena->lastTractBase, + "primary $P\n", (WriteFP)arena->primary, + "hasFreeLand $S\n", WriteFYesNo(arena->hasFreeLand), + "freeZones $B\n", (WriteFB)arena->freeZones, + "zoned $S\n", WriteFYesNo(arena->zoned), + NULL); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "droppedMessages $U$S\n", (WriteFU)arena->droppedMessages, + (arena->droppedMessages == 0 ? "" : " -- MESSAGES DROPPED!"), + NULL); + if (res != ResOK) + return res; + + res = GlobalsDescribe(ArenaGlobals(arena), stream, depth + 2); + if (res != ResOK) + return res; + + return res; +} + +Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth) +{ + return Method(Inst, arena, describe)(MustBeA(Inst, arena), stream, depth); +} + + +/* arenaDescribeTractsInChunk -- describe the tracts in a chunk */ + +static Res arenaDescribeTractsInChunk(Chunk chunk, mps_lib_FILE *stream, Count depth) +{ + Res res; + Index pi; + + if (!TESTT(Chunk, chunk)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, "Chunk [$P, $P) ($U) {\n", + (WriteFP)chunk->base, (WriteFP)chunk->limit, + (WriteFU)chunk->serial, + NULL); + if (res != ResOK) + return res; + + for (pi = chunk->allocBase; pi < chunk->pages; ++pi) { + if (BTGet(chunk->allocTable, pi)) { + Tract tract = PageTract(ChunkPage(chunk, pi)); + res = WriteF(stream, depth + 2, "[$P, $P)", + (WriteFP)TractBase(tract), + (WriteFP)TractLimit(tract, ChunkArena(chunk)), + NULL); + if (res != ResOK) + return res; + if (TractHasPool(tract)) { + Pool pool = TractPool(tract); + PoolClass poolClass = ClassOfPoly(Pool, pool); + res = WriteF(stream, 0, " $P $U ($S)", + (WriteFP)pool, + (WriteFU)(pool->serial), + (WriteFS)ClassName(poolClass), + NULL); + if (res != ResOK) + return res; + } + res = WriteF(stream, 0, "\n", NULL); + if (res != ResOK) + return res; + } + } + + res = WriteF(stream, depth, "} Chunk [$P, $P)\n", + (WriteFP)chunk->base, (WriteFP)chunk->limit, + NULL); + return res; +} + + +/* ArenaDescribeTracts -- describe all the tracts in the arena */ + +Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth) +{ + Ring node, next; + Res res; + + if (!TESTT(Arena, arena)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + RING_FOR(node, ArenaChunkRing(arena), next) { + Chunk chunk = RING_ELT(Chunk, arenaRing, node); + res = arenaDescribeTractsInChunk(chunk, stream, depth); + if (res != ResOK) + return res; + } + + return ResOK; +} + + +/* ControlAlloc -- allocate a small block directly from the control pool + * + * .arena.control-pool: Actually the block will be allocated from the + * control pool, which is an MV pool embedded in the arena itself. + * + * .controlalloc.addr: In implementations where Addr is not compatible + * with void* , ControlAlloc must take care of + * allocating so that the block can be addressed with a void*. */ + +Res ControlAlloc(void **baseReturn, Arena arena, size_t size) +{ + Addr base; + Res res; + + AVERT(Arena, arena); + AVER(baseReturn != NULL); + AVER(size > 0); + AVER(arena->poolReady); + + res = PoolAlloc(&base, ArenaControlPool(arena), (Size)size); + if (res != ResOK) + return res; + + *baseReturn = (void *)base; /* see .controlalloc.addr */ + return ResOK; +} + + +/* ControlFree -- free a block allocated using ControlAlloc */ + +void ControlFree(Arena arena, void* base, size_t size) +{ + Pool pool; + + AVERT(Arena, arena); + AVER(base != NULL); + AVER(size > 0); + AVER(arena->poolReady); + + pool = ArenaControlPool(arena); + PoolFree(pool, (Addr)base, (Size)size); +} + + +/* ControlDescribe -- describe the arena's control pool */ + +Res ControlDescribe(Arena arena, mps_lib_FILE *stream, Count depth) +{ + Res res; + + if (!TESTT(Arena, arena)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = PoolDescribe(ArenaControlPool(arena), stream, depth); + + return res; +} + + +/* ArenaChunkInsert -- insert chunk into arena's chunk tree and ring, + * update the total reserved address space, and set the primary chunk + * if not already set. + */ + +void ArenaChunkInsert(Arena arena, Chunk chunk) +{ + Bool inserted; + Tree tree, updatedTree = NULL; + + AVERT(Arena, arena); + AVERT(Chunk, chunk); + tree = &chunk->chunkTree; + + inserted = TreeInsert(&updatedTree, ArenaChunkTree(arena), + tree, ChunkKey(tree), ChunkCompare); + AVER(inserted); + AVER(updatedTree); + TreeBalance(&updatedTree); + arena->chunkTree = updatedTree; + RingAppend(ArenaChunkRing(arena), &chunk->arenaRing); + + arena->reserved += ChunkReserved(chunk); + + /* As part of the bootstrap, the first created chunk becomes the primary + chunk. This step allows ArenaFreeLandInsert to allocate pages. */ + if (arena->primary == NULL) + arena->primary = chunk; +} + + +/* ArenaChunkRemoved -- chunk was removed from the arena and is being + * finished, so update the total reserved address space, and unset the + * primary chunk if necessary. + */ + +void ArenaChunkRemoved(Arena arena, Chunk chunk) +{ + Size size; + + AVERT(Arena, arena); + AVERT(Chunk, chunk); + + size = ChunkReserved(chunk); + AVER(arena->reserved >= size); + arena->reserved -= size; + + if (chunk == arena->primary) { + /* The primary chunk must be the last chunk to be removed. */ + AVER(RingIsSingle(ArenaChunkRing(arena))); + AVER(arena->reserved == 0); + arena->primary = NULL; + } +} + + +/* arenaAllocPage -- allocate one page from the arena + * + * This is a primitive allocator used to allocate pages for the arena + * Land. It is called rarely and can use a simple search. It may not + * use the Land or any pool, because it is used as part of the + * bootstrap. . + */ + +static Res arenaAllocPageInChunk(Addr *baseReturn, Chunk chunk, Pool pool) +{ + Res res; + Index basePageIndex, limitPageIndex; + Arena arena; + + AVER(baseReturn != NULL); + AVERT(Chunk, chunk); + AVERT(Pool, pool); + arena = ChunkArena(chunk); + + if (!BTFindShortResRange(&basePageIndex, &limitPageIndex, + chunk->allocTable, + chunk->allocBase, chunk->pages, 1)) + return ResRESOURCE; + + res = Method(Arena, arena, pagesMarkAllocated)(arena, chunk, + basePageIndex, 1, + pool); + if (res != ResOK) + return res; + + *baseReturn = PageIndexBase(chunk, basePageIndex); + return ResOK; +} + +static Res arenaAllocPage(Addr *baseReturn, Arena arena, Pool pool) +{ + Res res; + + AVER(baseReturn != NULL); + AVERT(Arena, arena); + AVERT(Pool, pool); + + /* Favour the primary chunk, because pages allocated this way aren't + currently freed, and we don't want to prevent chunks being destroyed. */ + /* TODO: Consider how the ArenaCBSBlockPool might free pages. */ + res = arenaAllocPageInChunk(baseReturn, arena->primary, pool); + if (res != ResOK) { + Ring node, next; + RING_FOR(node, ArenaChunkRing(arena), next) { + Chunk chunk = RING_ELT(Chunk, arenaRing, node); + if (chunk != arena->primary) { + res = arenaAllocPageInChunk(baseReturn, chunk, pool); + if (res == ResOK) + break; + } + } + } + return res; +} + + +/* arenaFreePage -- free page allocated by arenaAllocPage */ + +static void arenaFreePage(Arena arena, Addr base, Pool pool) +{ + AVERT(Arena, arena); + AVERT(Pool, pool); + Method(Arena, arena, free)(base, ArenaGrainSize(arena), pool); +} + + +/* arenaExtendCBSBlockPool -- add a page of memory to the CBS block pool + * + * IMPORTANT: Must be followed by arenaExcludePage to ensure that the + * page doesn't get allocated by ArenaAlloc. See .insert.exclude. + */ + +static Res arenaExtendCBSBlockPool(Range pageRangeReturn, Arena arena) +{ + Addr pageBase, pageLimit; + Res res; + + res = arenaAllocPage(&pageBase, arena, ArenaCBSBlockPool(arena)); + if (res != ResOK) + return res; + pageLimit = AddrAdd(pageBase, ArenaGrainSize(arena)); + MFSExtend(ArenaCBSBlockPool(arena), pageBase, pageLimit); + + RangeInit(pageRangeReturn, pageBase, pageLimit); + return ResOK; +} + +/* arenaExcludePage -- exclude CBS block pool's page from free land + * + * Exclude the page we specially allocated for the CBS block pool + * so that it doesn't get reallocated. + */ + +static void arenaExcludePage(Arena arena, Range pageRange) +{ + RangeStruct oldRange; + Res res; + Land land = ArenaFreeLand(arena); + + res = LandDelete(&oldRange, land, pageRange); + AVER(res == ResOK); /* we just gave memory to the Land */ +} + + +/* arenaFreeLandInsertExtend -- add range to arena's free land, maybe + * extending block pool + * + * The arena's free land can't get memory for its block pool in the + * usual way (via ArenaAlloc), because it is the mechanism behind + * ArenaAlloc! So we extend the block pool via a back door (see + * arenaExtendCBSBlockPool). . + * + * Only fails if it can't get a page for the block pool. + */ + +static Res arenaFreeLandInsertExtend(Range rangeReturn, Arena arena, + Range range) +{ + Res res; + Land land; + + AVER(rangeReturn != NULL); + AVERT(Arena, arena); + AVERT(Range, range); + + land = ArenaFreeLand(arena); + res = LandInsert(rangeReturn, land, range); + + if (res == ResLIMIT) { /* CBS block pool ran out of blocks */ + RangeStruct pageRange; + res = arenaExtendCBSBlockPool(&pageRange, arena); + if (res != ResOK) + return res; + /* .insert.exclude: Must insert before exclude so that we can + bootstrap when the zoned CBS is empty. */ + res = LandInsert(rangeReturn, land, range); + AVER(res == ResOK); /* we just gave memory to the CBS block pool */ + arenaExcludePage(arena, &pageRange); + } + + return ResOK; +} + + +/* ArenaFreeLandInsert -- add range to arena's free land, maybe extending + * block pool + * + * The inserted block of address space may not abut any existing block. + * This restriction ensures that we don't coalesce chunks and allocate + * object across the boundary, preventing chunk deletion. + */ + +Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit) +{ + RangeStruct range, oldRange; + Res res; + + AVERT(Arena, arena); + AVER(base < limit); + + RangeInit(&range, base, limit); + res = arenaFreeLandInsertExtend(&oldRange, arena, &range); + if (res != ResOK) + return res; + + /* .chunk.no-coalesce: Make sure it didn't coalesce. We don't want + chunks to coalesce so that there are no chunk-crossing + allocations that would prevent chunks being destroyed. See + for the mechanism that ensures that + chunks never coalesce. */ + AVER(RangesEqual(&oldRange, &range)); + + return ResOK; +} + + +/* ArenaFreeLandDelete -- remove range from arena's free land if + * possible without extending the block pool + */ + +Res ArenaFreeLandDelete(Arena arena, Addr base, Addr limit) +{ + RangeStruct range, oldRange; + Res res; + Land land; + + RangeInit(&range, base, limit); + land = ArenaFreeLand(arena); + res = LandDelete(&oldRange, land, &range); + + return res; +} + + +/* ArenaFreeLandAlloc -- allocate a contiguous range of tracts of + * size bytes from the arena's free land. + * + * size, zones, and high are as for LandFindInZones. + * + * If successful, mark the allocated tracts as belonging to pool, set + * *tractReturn to point to the first tract in the range, and return + * ResOK. + */ + +Res ArenaFreeLandAlloc(Tract *tractReturn, Arena arena, ZoneSet zones, + Bool high, Size size, Pool pool) +{ + RangeStruct range, oldRange; + Chunk chunk = NULL; /* suppress uninit warning */ + Bool found, b; + Index baseIndex; + Count pages; + Res res; + Land land; + + AVER(tractReturn != NULL); + AVERT(Arena, arena); + /* ZoneSet is arbitrary */ + AVER(size > (Size)0); + AVERT(Pool, pool); + AVER(arena == PoolArena(pool)); + AVER(SizeIsArenaGrains(size, arena)); + + if (!arena->zoned) + zones = ZoneSetUNIV; + + /* Step 1. Find a range of address space. */ + + land = ArenaFreeLand(arena); + res = LandFindInZones(&found, &range, &oldRange, land, size, zones, high); + + if (res == ResLIMIT) { /* found block, but couldn't store info */ + RangeStruct pageRange; + res = arenaExtendCBSBlockPool(&pageRange, arena); + if (res != ResOK) /* disastrously short on memory */ + return res; + arenaExcludePage(arena, &pageRange); + res = LandFindInZones(&found, &range, &oldRange, land, size, zones, high); + AVER(res != ResLIMIT); + } + + AVER(res == ResOK); /* unexpected error from ZoneCBS */ + if (res != ResOK) /* defensive return */ + return res; + + if (!found) /* out of address space */ + return ResRESOURCE; + + /* Step 2. Make memory available in the address space range. */ + + b = ChunkOfAddr(&chunk, arena, RangeBase(&range)); + AVER(b); + AVER(RangeIsAligned(&range, ChunkPageSize(chunk))); + baseIndex = INDEX_OF_ADDR(chunk, RangeBase(&range)); + pages = ChunkSizeToPages(chunk, RangeSize(&range)); + + res = Method(Arena, arena, pagesMarkAllocated)(arena, chunk, baseIndex, pages, pool); + if (res != ResOK) + goto failMark; + + arena->freeZones = ZoneSetDiff(arena->freeZones, + ZoneSetOfRange(arena, + RangeBase(&range), + RangeLimit(&range))); + + *tractReturn = PageTract(ChunkPage(chunk, baseIndex)); + return ResOK; + +failMark: + { + Res insertRes = arenaFreeLandInsertExtend(&oldRange, arena, &range); + AVER(insertRes == ResOK); /* We only just deleted it. */ + /* If the insert does fail, we lose some address space permanently. */ + } + return res; +} + + +/* ArenaAlloc -- allocate some tracts from the arena */ + +Res ArenaAlloc(Addr *baseReturn, LocusPref pref, Size size, Pool pool) +{ + Res res; + Arena arena; + Addr base; + Tract tract; + + AVER(baseReturn != NULL); + AVERT(LocusPref, pref); + AVER(size > (Size)0); + AVERT(Pool, pool); + + arena = PoolArena(pool); + AVERT(Arena, arena); + AVER(SizeIsArenaGrains(size, arena)); + + res = PolicyAlloc(&tract, arena, pref, size, pool); + if (res != ResOK) + goto allocFail; + + base = TractBase(tract); + + /* cache the tract - */ + arena->lastTract = tract; + arena->lastTractBase = base; + + EVENT5(ArenaAlloc, arena, tract, base, size, pool); + + *baseReturn = base; + return ResOK; + +allocFail: + EVENT4(ArenaAllocFail, arena, size, pool, (unsigned)res); + return res; +} + + +/* ArenaFree -- free some tracts to the arena */ + +void ArenaFree(Addr base, Size size, Pool pool) +{ + Arena arena; + RangeStruct range, oldRange; + Res res; + + AVERT(Pool, pool); + AVER(base != NULL); + AVER(size > (Size)0); + arena = PoolArena(pool); + AVERT(Arena, arena); + AVER(AddrIsArenaGrain(base, arena)); + AVER(SizeIsArenaGrains(size, arena)); + + RangeInitSize(&range, base, size); + + /* uncache the tract if in range - */ + if (base <= arena->lastTractBase && arena->lastTractBase < RangeLimit(&range)) + { + arena->lastTract = NULL; + arena->lastTractBase = (Addr)0; + } + + res = arenaFreeLandInsertExtend(&oldRange, arena, &range); + if (res != ResOK) { + Land land = ArenaFreeLand(arena); + res = LandInsertSteal(&oldRange, land, &range); /* may update range */ + AVER(res == ResOK); + if (RangeIsEmpty(&range)) + goto done; + } + Method(Arena, arena, free)(RangeBase(&range), RangeSize(&range), pool); + +done: + /* Freeing memory might create spare pages, but not more than this. */ + AVER(arena->spareCommitted <= ArenaSpareCommitLimit(arena)); + + EVENT4(ArenaFree, arena, base, size, pool); +} + + +Size ArenaReserved(Arena arena) +{ + AVERT(Arena, arena); + return arena->reserved; +} + +Size ArenaCommitted(Arena arena) +{ + AVERT(Arena, arena); + return arena->committed; +} + +Size ArenaSpareCommitted(Arena arena) +{ + AVERT(Arena, arena); + return arena->spareCommitted; +} + +double ArenaSpare(Arena arena) +{ + AVERT(Arena, arena); + return arena->spare; +} + +void ArenaSetSpare(Arena arena, double spare) +{ + Size spareMax; + + AVERT(Arena, arena); + AVER(0.0 <= spare); + AVER(spare <= 1.0); + + arena->spare = spare; + EVENT2(ArenaSetSpare, arena, spare); + + spareMax = ArenaSpareCommitLimit(arena); + if (arena->spareCommitted > spareMax) { + Size excess = arena->spareCommitted - spareMax; + (void)Method(Arena, arena, purgeSpare)(arena, excess); + } +} + +double ArenaPauseTime(Arena arena) +{ + AVERT(Arena, arena); + return arena->pauseTime; +} + +void ArenaSetPauseTime(Arena arena, double pauseTime) +{ + AVERT(Arena, arena); + AVER(0.0 <= pauseTime); + arena->pauseTime = pauseTime; + EVENT2(PauseTimeSet, arena, pauseTime); +} + +/* Used by arenas which don't use spare committed memory */ +Size ArenaNoPurgeSpare(Arena arena, Size size) +{ + AVERT(Arena, arena); + UNUSED(size); + return 0; +} + + +Res ArenaNoGrow(Arena arena, LocusPref pref, Size size) +{ + AVERT(Arena, arena); + AVERT(LocusPref, pref); + UNUSED(size); + return ResRESOURCE; +} + + +Size ArenaCommitLimit(Arena arena) +{ + AVERT(Arena, arena); + return arena->commitLimit; +} + +Res ArenaSetCommitLimit(Arena arena, Size limit) +{ + Size committed; + Res res; + + AVERT(Arena, arena); + AVER(ArenaCommitted(arena) <= arena->commitLimit); + + committed = ArenaCommitted(arena); + if (limit < committed) { + /* Attempt to set the limit below current committed */ + if (limit >= committed - arena->spareCommitted) { + Size excess = committed - limit; + (void)Method(Arena, arena, purgeSpare)(arena, excess); + AVER(limit >= ArenaCommitted(arena)); + arena->commitLimit = limit; + res = ResOK; + } else { + res = ResFAIL; + } + } else { + arena->commitLimit = limit; + res = ResOK; + } + EVENT3(CommitLimitSet, arena, limit, (unsigned)res); + return res; +} + + +/* ArenaAvail -- return available memory in the arena */ + +Size ArenaAvail(Arena arena) +{ + Size sSwap; + + sSwap = ArenaReserved(arena); + if (sSwap > arena->commitLimit) + sSwap = arena->commitLimit; + + /* TODO: sSwap should take into account the amount of backing store + available to supply the arena with memory. This would be the amount + available in the paging file, which is possibly the amount of free + disk space in some circumstances. We'd have to see whether we can get + this information from the operating system. It also depends on the + arena class, of course. */ + + AVER(sSwap >= arena->committed); + return sSwap - arena->committed + arena->spareCommitted; +} + + +/* ArenaCollectable -- return estimate of collectable memory in arena */ + +Size ArenaCollectable(Arena arena) +{ + /* Conservative estimate -- see job003929. */ + Size committed = ArenaCommitted(arena); + Size spareCommitted = ArenaSpareCommitted(arena); + AVER(committed >= spareCommitted); + return committed - spareCommitted; +} + + +/* ArenaAccumulateTime -- accumulate time spent tracing */ + +void ArenaAccumulateTime(Arena arena, Clock start, Clock end) +{ + AVERT(Arena, arena); + AVER(start <= end); + arena->tracedTime += (double)(end - start) / (double)ClocksPerSec(); +} + + +/* ArenaExtend -- Add a new chunk in the arena */ + +Res ArenaExtend(Arena arena, Addr base, Size size) +{ + Res res; + + AVERT(Arena, arena); + AVER(base != (Addr)0); + AVER(size > 0); + + res = Method(Arena, arena, extend)(arena, base, size); + if (res != ResOK) + return res; + + EVENT3(ArenaExtend, arena, base, size); + return ResOK; +} + + +/* ArenaNoExtend -- fail to extend the arena by a chunk */ + +Res ArenaNoExtend(Arena arena, Addr base, Size size) +{ + AVERT(Arena, arena); + AVER(base != (Addr)0); + AVER(size > (Size)0); + + NOTREACHED; + return ResUNIMPL; +} + + +/* ArenaCompact -- respond (or not) to trace reclaim */ + +void ArenaCompact(Arena arena, Trace trace) +{ + AVERT(Arena, arena); + AVERT(Trace, trace); + Method(Arena, arena, compact)(arena, trace); +} + +static void ArenaTrivCompact(Arena arena, Trace trace) +{ + UNUSED(arena); + UNUSED(trace); +} + + +/* Has Addr */ + +Bool ArenaHasAddr(Arena arena, Addr addr) +{ + Tract tract; + + AVERT(Arena, arena); + return TractOfAddr(&tract, arena, addr); +} + +/* ArenaAddrObject -- return base pointer of managed object */ +Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr) +{ + Tract tract; + + AVER(pReturn != NULL); + AVERT(Arena, arena); + + if (!TractOfAddr(&tract, arena, addr)) { + /* address does not belong to the arena */ + return ResFAIL; + } + + return PoolAddrObject(pReturn, TractPool(tract), addr); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c new file mode 100644 index 00000000000..09915eab903 --- /dev/null +++ b/mps/code/arenacl.c @@ -0,0 +1,502 @@ +/* arenacl.c: ARENA CLASS USING CLIENT MEMORY + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: . + * + * .improve.remember: One possible performance improvement is to + * remember (a conservative approximation to) the indices of the first + * and last free pages in each chunk, and start searching from these + * in ChunkAlloc. See request.epcore.170534_. + * + * .. _request.epcore.170534: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/170534 + */ + +#include "boot.h" +#include "tract.h" +#include "bt.h" +#include "mpm.h" +#include "mpsacl.h" + +SRCID(arenacl, "$Id$"); + +DECLARE_CLASS(Arena, ClientArena, AbstractArena); + + +/* ClientArenaStruct -- Client Arena Structure */ + +#define ClientArenaSig ((Sig)0x519A6EC7) /* SIGnature AREna CLient */ + +typedef struct ClientArenaStruct { + ArenaStruct arenaStruct; /* generic arena structure */ + Sig sig; /* design.mps.sig.field.end.outer */ +} ClientArenaStruct; +typedef struct ClientArenaStruct *ClientArena; + + +/* CLChunk -- chunk structure */ + +typedef struct ClientChunkStruct *ClientChunk; + +#define ClientChunkSig ((Sig)0x519A6C2C) /* SIGnature ARena CLient Chunk */ + +typedef struct ClientChunkStruct { + ChunkStruct chunkStruct; /* generic chunk */ + Size freePages; /* number of free pages in chunk */ + Addr pageBase; /* base of first managed page in chunk */ + Sig sig; /* design.mps.sig.field.end.outer */ +} ClientChunkStruct; + +#define ClientChunk2Chunk(clchunk) (&(clchunk)->chunkStruct) +#define Chunk2ClientChunk(chunk) PARENT(ClientChunkStruct, chunkStruct, chunk) + + +/* ClientChunkClientArena -- get the client arena from a client chunk */ + +#define ClientChunkClientArena(clchunk) \ + Arena2ClientArena(ChunkArena(ClientChunk2Chunk(clchunk))) + + +/* ClientChunkCheck -- check the consistency of a client chunk */ + +ATTRIBUTE_UNUSED +static Bool ClientChunkCheck(ClientChunk clChunk) +{ + Chunk chunk; + + CHECKS(ClientChunk, clChunk); + chunk = ClientChunk2Chunk(clChunk); + CHECKD(Chunk, chunk); + CHECKL(clChunk->freePages <= chunk->pages); + /* check they don't overlap (knowing the order) */ + CHECKL((Addr)(chunk + 1) < (Addr)chunk->allocTable); + return TRUE; +} + + +/* ClientArenaCheck -- check the consistency of a client arena */ + +ATTRIBUTE_UNUSED +static Bool ClientArenaCheck(ClientArena clientArena) +{ + Arena arena = MustBeA(AbstractArena, clientArena); + + /* See */ + CHECKL(arena->committed <= arena->reserved); + CHECKL(arena->spareCommitted == 0); + + return TRUE; +} + + +/* clientChunkCreate -- create a ClientChunk */ + +static Res clientChunkCreate(Chunk *chunkReturn, ClientArena clientArena, + Addr base, Addr limit) +{ + Arena arena = MustBeA(AbstractArena, clientArena); + ClientChunk clChunk; + Chunk chunk; + Addr alignedBase; + BootBlockStruct bootStruct; + BootBlock boot = &bootStruct; + Res res; + void *p; + + AVER(chunkReturn != NULL); + AVER(base != (Addr)0); + AVER(limit != (Addr)0); + AVER(limit > base); + + /* Initialize boot block. */ + /* Chunk has to be page-aligned, and the boot allocs must be within it. */ + alignedBase = AddrAlignUp(base, ArenaGrainSize(arena)); + AVER(alignedBase < limit); + res = BootBlockInit(boot, (void *)alignedBase, (void *)limit); + if (res != ResOK) + goto failBootInit; + + /* Allocate the chunk. */ + /* TODO: Add reference to design. */ + res = BootAlloc(&p, boot, sizeof(ClientChunkStruct), MPS_PF_ALIGN); + if (res != ResOK) + goto failChunkAlloc; + clChunk = p; + chunk = ClientChunk2Chunk(clChunk); + + res = ChunkInit(chunk, arena, alignedBase, + AddrAlignDown(limit, ArenaGrainSize(arena)), + AddrOffset(base, limit), boot); + if (res != ResOK) + goto failChunkInit; + + arena->committed += ChunkPagesToSize(chunk, chunk->allocBase); + + BootBlockFinish(boot); + + clChunk->sig = ClientChunkSig; + AVERT(ClientChunk, clChunk); + *chunkReturn = chunk; + return ResOK; + +failChunkInit: +failChunkAlloc: +failBootInit: + return res; +} + + +/* ClientChunkInit -- initialize a ClientChunk */ + +static Res ClientChunkInit(Chunk chunk, BootBlock boot) +{ + Res res; + ClientChunk clChunk; + void *p; + + /* chunk is supposed to be uninitialized, so don't check it. */ + clChunk = Chunk2ClientChunk(chunk); + AVERT(BootBlock, boot); + + /* TODO: An old comment claimed this is too large. + Does it fail to exclude the page table or something? */ + clChunk->freePages = chunk->pages; + + /* Put the page table as late as possible, as in VM systems we don't want */ + /* to map it. */ + res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize); + if (res != ResOK) + return res; + chunk->pageTable = p; + + return ResOK; +} + + +/* clientChunkDestroy -- destroy a ClientChunk */ + +static Bool clientChunkDestroy(Tree tree, void *closure) +{ + Arena arena; + Chunk chunk; + ClientChunk clChunk; + Size size; + + AVERT(Tree, tree); + AVER(closure == UNUSED_POINTER); + UNUSED(closure); + + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + arena = ChunkArena(chunk); + AVERT(Arena, arena); + clChunk = Chunk2ClientChunk(chunk); + AVERT(ClientChunk, clChunk); + AVER(chunk->pages == clChunk->freePages); + + size = ChunkPagesToSize(chunk, chunk->allocBase); + AVER(arena->committed >= size); + arena->committed -= size; + + clChunk->sig = SigInvalid; + ChunkFinish(chunk); + + return TRUE; +} + + +/* ClientChunkFinish -- finish a ClientChunk */ + +static void ClientChunkFinish(Chunk chunk) +{ + /* Can't check chunk as it's not valid anymore. */ + UNUSED(chunk); +} + + +/* ClientArenaVarargs -- parse obsolete varargs */ + +static void ClientArenaVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_ARENA_SIZE; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_ARENA_CL_BASE; + args[1].val.addr = va_arg(varargs, Addr); + args[2].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* ClientArenaInit -- create and initialize the client arena + * + * .init.memory: Creates the arena structure in the chuck given, and + * makes the first chunk from the memory left over. + * .arena.init: Once the arena has been allocated, we call ArenaInit + * to do the generic part of init. + */ + +ARG_DEFINE_KEY(ARENA_CL_BASE, Addr); + +static Res ClientArenaCreate(Arena *arenaReturn, ArgList args) +{ + Arena arena; + ClientArena clientArena; + Size size; + Size clArenaSize; /* aligned size of ClientArenaStruct */ + Addr base, limit, chunkBase; + Align grainSize = 1; + Res res; + Chunk chunk; + mps_arg_s arg; + + AVER(arenaReturn != NULL); + AVERT(ArgList, args); + + ArgRequire(&arg, args, MPS_KEY_ARENA_SIZE); + size = arg.val.size; + ArgRequire(&arg, args, MPS_KEY_ARENA_CL_BASE); + base = arg.val.addr; + if (ArgPick(&arg, args, MPS_KEY_ARENA_GRAIN_SIZE)) + grainSize = arg.val.size; + grainSize = SizeAlignUp(grainSize, ARENA_CLIENT_GRAIN_SIZE); + grainSize = SizeAlignUp(grainSize, ProtGranularity()); + + AVER(base != (Addr)0); + AVERT(ArenaGrainSize, grainSize); + + if (size < grainSize * MPS_WORD_WIDTH) + /* Not enough room for a full complement of zones. */ + return ResMEMORY; + + clArenaSize = SizeAlignUp(sizeof(ClientArenaStruct), MPS_PF_ALIGN); + if (size < clArenaSize) + return ResMEMORY; + + limit = AddrAdd(base, size); + + /* allocate the arena */ + base = AddrAlignUp(base, MPS_PF_ALIGN); + clientArena = (ClientArena)base; + chunkBase = AddrAlignUp(AddrAdd(base, clArenaSize), MPS_PF_ALIGN); + if (chunkBase > limit) + return ResMEMORY; + + arena = CouldBeA(AbstractArena, clientArena); + + res = NextMethod(Arena, ClientArena, init)(arena, grainSize, args); + if (res != ResOK) + goto failSuperInit; + SetClassOfPoly(arena, CLASS(ClientArena)); + AVER(clientArena == MustBeA(ClientArena, arena)); + + /* have to have a valid arena before calling ChunkCreate */ + clientArena->sig = ClientArenaSig; + + res = clientChunkCreate(&chunk, clientArena, chunkBase, limit); + if (res != ResOK) + goto failChunkCreate; + arena->primary = chunk; + + /* Set the zone shift to divide the initial chunk into the same */ + /* number of zones as will fit into a reference set (the number of */ + /* bits in a word). Note that some zones are discontiguous in the */ + /* arena if the size is not a power of 2. */ + arena->zoneShift = SizeFloorLog2(size >> MPS_WORD_SHIFT); + AVER(ArenaGrainSize(arena) == ChunkPageSize(arena->primary)); + + EVENT7(ArenaCreateCL, arena, size, base, grainSize, + ClassOfPoly(Arena, arena), ArenaGlobals(arena)->systemPools, + arena->serial); + AVERT(ClientArena, clientArena); + *arenaReturn = arena; + return ResOK; + +failChunkCreate: + NextMethod(Inst, ClientArena, finish)(MustBeA(Inst, arena)); +failSuperInit: + AVER(res != ResOK); + return res; +} + + +/* ClientArenaDestroy -- destroy the arena */ + +static void ClientArenaDestroy(Arena arena) +{ + ClientArena clientArena = MustBeA(ClientArena, arena); + + /* Destroy all chunks, including the primary. See + * */ + arena->primary = NULL; + TreeTraverseAndDelete(&arena->chunkTree, clientChunkDestroy, + UNUSED_POINTER); + + clientArena->sig = SigInvalid; + + /* Destroying the chunks should leave nothing behind. */ + AVER(arena->reserved == 0); + AVER(arena->committed == 0); + + NextMethod(Inst, ClientArena, finish)(MustBeA(Inst, arena)); +} + + +/* ClientArenaExtend -- extend the arena */ + +static Res ClientArenaExtend(Arena arena, Addr base, Size size) +{ + ClientArena clientArena = MustBeA(ClientArena, arena); + Chunk chunk; + + AVER(base != (Addr)0); + AVER(size > 0); + + return clientChunkCreate(&chunk, clientArena, base, AddrAdd(base, size)); +} + + +/* ClientArenaPagesMarkAllocated -- Mark the pages allocated */ + +static Res ClientArenaPagesMarkAllocated(Arena arena, Chunk chunk, + Index baseIndex, Count pages, + Pool pool) +{ + Index i; + ClientChunk clChunk; + + AVERT(Arena, arena); + AVERT(Chunk, chunk); + clChunk = Chunk2ClientChunk(chunk); + AVERT(ClientChunk, clChunk); + AVER(chunk->allocBase <= baseIndex); + AVER(pages > 0); + AVER(baseIndex + pages <= chunk->pages); + AVERT(Pool, pool); + + for (i = 0; i < pages; ++i) + PageAlloc(chunk, baseIndex + i, pool); + + arena->committed += ChunkPagesToSize(chunk, pages); + AVER(clChunk->freePages >= pages); + clChunk->freePages -= pages; + + return ResOK; +} + + +/* ClientChunkPageMapped -- determine if a page is mapped */ + +static Bool ClientChunkPageMapped(Chunk chunk, Index index) +{ + UNUSED(chunk); + UNUSED(index); + + AVERT(Chunk, chunk); + AVER(index < chunk->pages); + + return TRUE; +} + + +/* ClientArenaFree - free a region in the arena */ + +static void ClientArenaFree(Addr base, Size size, Pool pool) +{ + Arena arena; + Chunk chunk = NULL; /* suppress "may be used uninitialized" */ + Size pages; + Index pi, baseIndex, limitIndex; + Bool foundChunk; + ClientChunk clChunk; + + AVER(base != NULL); + AVER(size > (Size)0); + AVERT(Pool, pool); + arena = PoolArena(pool); + AVERC(ClientArena, arena); + AVER(SizeIsAligned(size, ChunkPageSize(arena->primary))); + AVER(AddrIsAligned(base, ChunkPageSize(arena->primary))); + + foundChunk = ChunkOfAddr(&chunk, arena, base); + AVER(foundChunk); + clChunk = Chunk2ClientChunk(chunk); + AVERT(ClientChunk, clChunk); + + pages = ChunkSizeToPages(chunk, size); + baseIndex = INDEX_OF_ADDR(chunk, base); + limitIndex = baseIndex + pages; + AVER(baseIndex < limitIndex); + AVER(limitIndex <= chunk->pages); + + for(pi = baseIndex; pi < limitIndex; pi++) { + Tract tract = PageTract(ChunkPage(chunk, pi)); + + AVER(TractPool(tract) == pool); + TractFinish(tract); + } + + AVER(BTIsSetRange(chunk->allocTable, baseIndex, limitIndex)); + BTResRange(chunk->allocTable, baseIndex, limitIndex); + + AVER(arena->committed >= size); + arena->committed -= size; + clChunk->freePages += pages; +} + + +/* ClientArenaClass -- The Client arena class definition */ + +DEFINE_CLASS(Arena, ClientArena, klass) +{ + INHERIT_CLASS(klass, ClientArena, AbstractArena); + klass->size = sizeof(ClientArenaStruct); + klass->varargs = ClientArenaVarargs; + klass->create = ClientArenaCreate; + klass->destroy = ClientArenaDestroy; + klass->extend = ClientArenaExtend; + klass->pagesMarkAllocated = ClientArenaPagesMarkAllocated; + klass->free = ClientArenaFree; + klass->chunkInit = ClientChunkInit; + klass->chunkFinish = ClientChunkFinish; + klass->chunkPageMapped = ClientChunkPageMapped; + AVERT(ArenaClass, klass); +} + + +/* mps_arena_class_cl -- return the arena class CL */ + +mps_arena_class_t mps_arena_class_cl(void) +{ + return (mps_arena_class_t)CLASS(ClientArena); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arenacv.c b/mps/code/arenacv.c new file mode 100644 index 00000000000..1ff7b7822d9 --- /dev/null +++ b/mps/code/arenacv.c @@ -0,0 +1,517 @@ +/* arenacv.c: ARENA COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .coverage: At the moment, we're only trying to cover the new code + * (partial mapping of the page table and vm overflow). + * + * .note.tract-size: If the page size is divisible by sizeof(TractStruct), many + * test cases end up being essentially identical -- there just aren't that + * many different cases then. + * + * .improve.gap-below: Could test different-sized gaps below the tract + * being allocated; this requires using two adjacent zones. + */ + +#include "mpm.h" +#include "poolmvff.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpsavm.h" +#include "mpsacl.h" + +#include /* printf */ +#include /* malloc */ + + +#define tractsSIZE 500 + + +/* testAllocAndIterate -- Test arena allocation and iteration + * + * .tract-seg: Test allocation and iteration, using both low-level + * tracts and higher-level segments. To do this, contrive a set of + * allocation and iteration functions which are interchangeable. + */ + +/* Type definitions for the interchangability interface */ + + +/* AllocInfo -- interchangeable info about allocated regions */ + +typedef struct AllocInfoStruct *AllocInfo; + +typedef struct AllocInfoStruct { + union { + struct { + Addr base; + Size size; + Pool pool; + } tractData; + struct { + Seg seg; + } segData; + } the; +} AllocInfoStruct; + +typedef Res (*AllocFun)(AllocInfoStruct *aiReturn, LocusPref pref, + Size size, Pool pool); + +typedef void (*FreeFun)(AllocInfo ai); + +typedef Bool (*FirstFun)(AllocInfoStruct *aiReturn, Arena arena); + +typedef Bool (*NextFun)(AllocInfoStruct *nextReturn, AllocInfo ai, + Arena arena); + +typedef Count (*UnitsFun)(Count pages); + +typedef void (*TestFun)(AllocInfo ai, Arena arena); + +typedef void (*CopyFun)(AllocInfoStruct *toReturn, AllocInfo from); + + +/* AllocatorClass -- encapsulates an allocation mechanism */ + +typedef struct AllocatorClassStruct *AllocatorClass; + +typedef struct AllocatorClassStruct { + AllocFun alloc; /* allocation method */ + FreeFun free; /* deallocation method */ + FirstFun first; /* find first block for iteration */ + NextFun next; /* find next block for iteration */ + UnitsFun units; /* number of iteration objects for pages */ + TestFun test; /* consistency check a region */ + CopyFun copy; /* copy an AllocationInfo object */ +} AllocatorClassStruct; + + +/* tractSearchInChunk -- find a tract in a chunk + * + * .tract-search: Searches for a tract in the chunk starting at page + * index i, return FALSE if there is none. + */ + +static Bool tractSearchInChunk(Tract *tractReturn, Chunk chunk, Index i) +{ + AVER_CRITICAL(chunk->allocBase <= i); + AVER_CRITICAL(i <= chunk->pages); + + while (i < chunk->pages + && !(BTGet(chunk->allocTable, i) + && PageIsAllocated(ChunkPage(chunk, i)))) { + ++i; + } + if (i == chunk->pages) + return FALSE; + AVER(i < chunk->pages); + *tractReturn = PageTract(ChunkPage(chunk, i)); + return TRUE; +} + + +/* tractSearch -- find next tract above address + * + * Searches for the next tract in increasing address order. + * The tract returned is the next one along from addr (i.e., + * it has a base address bigger than addr and no other tract + * with a base address bigger than addr has a smaller base address). + * + * Returns FALSE if there is no tract to find (end of the arena). + */ + +static Bool tractSearch(Tract *tractReturn, Arena arena, Addr addr) +{ + Bool b; + Chunk chunk; + Tree tree; + + b = ChunkOfAddr(&chunk, arena, addr); + if (b) { + Index i; + + i = INDEX_OF_ADDR(chunk, addr); + /* There are fewer pages than addresses, therefore the */ + /* page index can never wrap around */ + AVER_CRITICAL(i+1 != 0); + + if (tractSearchInChunk(tractReturn, chunk, i+1)) { + return TRUE; + } + } + while (TreeFindNext(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), + ChunkCompare)) + { + chunk = ChunkOfTree(tree); + addr = chunk->base; + /* Start from allocBase to skip the tables. */ + if (tractSearchInChunk(tractReturn, chunk, chunk->allocBase)) { + return TRUE; + } + } + return FALSE; +} + + +/* Implementation of the tract-based interchangability interface */ + +static Res allocAsTract(AllocInfoStruct *aiReturn, LocusPref pref, + Size size, Pool pool) +{ + Res res; + Addr base; + res = ArenaAlloc(&base, pref, size, pool); + if (res == ResOK) { + aiReturn->the.tractData.base = base; + aiReturn->the.tractData.size = size; + aiReturn->the.tractData.pool = pool; + } + return res; +} + +static void freeAsTract(AllocInfo ai) +{ + ArenaFree(ai->the.tractData.base, + ai->the.tractData.size, + ai->the.tractData.pool); +} + +static Bool firstAsTract(AllocInfoStruct *aiReturn, Arena arena) +{ + Bool res; + Tract tract; + res = tractSearch(&tract, arena, 0); + if (res) { + aiReturn->the.tractData.base = TractBase(tract); + aiReturn->the.tractData.size = ArenaGrainSize(arena);; + aiReturn->the.tractData.pool = TractPool(tract); + } + return res; +} + +static Bool nextAsTract(AllocInfoStruct *nextReturn, AllocInfo ai, + Arena arena) +{ + Bool res; + Tract tract; + res = tractSearch(&tract, arena, ai->the.tractData.base); + if (res) { + nextReturn->the.tractData.base = TractBase(tract); + nextReturn->the.tractData.size = ArenaGrainSize(arena);; + nextReturn->the.tractData.pool = TractPool(tract); + } + return res; +} + +static Count unitsAsTract(Count pages) +{ + return pages; /* one tract for each page */ +} + + +static void testAsTract(AllocInfo ai, Arena arena) +{ + /* Test TractOfAddr */ + Tract tract; + Addr base; + Bool found; + + found = TractOfAddr(&tract, arena, ai->the.tractData.base); + cdie(found, "TractOfAddr"); + base = TractBase(tract); + cdie(base == ai->the.tractData.base, "base"); + +} + +static void copyAsTract(AllocInfoStruct *toReturn, AllocInfo from) +{ + toReturn->the.tractData.base = from->the.tractData.base; + toReturn->the.tractData.size = from->the.tractData.size; + toReturn->the.tractData.pool = from->the.tractData.pool; +} + +static AllocatorClassStruct allocatorTractStruct = { + allocAsTract, + freeAsTract, + firstAsTract, + nextAsTract, + unitsAsTract, + testAsTract, + copyAsTract +}; + + +/* Implementation of the segment-based interchangability interface */ + +static Res allocAsSeg(AllocInfoStruct *aiReturn, LocusPref pref, + Size size, Pool pool) +{ + Res res; + Seg seg; + res = SegAlloc(&seg, CLASS(Seg), pref, size, pool, argsNone); + if (res == ResOK) { + aiReturn->the.segData.seg = seg; + } + return res; +} + +static void freeAsSeg(AllocInfo ai) +{ + SegFree(ai->the.segData.seg); +} + +static Bool firstAsSeg(AllocInfoStruct *aiReturn, Arena arena) +{ + Bool res; + Seg seg; + res = SegFirst(&seg, arena); + if (res) { + aiReturn->the.segData.seg = seg; + } + return res; +} + +static Bool nextAsSeg(AllocInfoStruct *nextReturn, AllocInfo ai, + Arena arena) +{ + Bool res; + Seg seg; + res = SegNext(&seg, arena, ai->the.segData.seg); + if (res) { + nextReturn->the.segData.seg = seg; + } + return res; +} + +static Count unitsAsSeg(Count pages) +{ + if (0 == pages) + return 0; /* can't have a zero length seg */ + else + return 1; /* one seg no matter how many pages */ +} + +static void testAsSeg(AllocInfo ai, Arena arena) +{ + /* Test size functions */ + Seg seg = ai->the.segData.seg; + Addr base, limit; + Size size; + + UNUSED(arena); + base = SegBase(seg); + limit = SegLimit(seg); + size = SegSize(seg); + cdie(size == AddrOffset(base, limit), "size"); +} + +static void copyAsSeg(AllocInfoStruct *toReturn, AllocInfo from) +{ + toReturn->the.segData.seg = from->the.segData.seg; +} + +static AllocatorClassStruct allocatorSegStruct = { + allocAsSeg, + freeAsSeg, + firstAsSeg, + nextAsSeg, + unitsAsSeg, + testAsSeg, + copyAsSeg +}; + + +/* The main function can use either tracts or segs */ + +static void testAllocAndIterate(Arena arena, Pool pool, + Size pageSize, Count numPerPage, + AllocatorClass allocator) +{ + AllocInfoStruct offsetRegion, gapRegion, newRegion, topRegion; + LocusPrefStruct pref; + Count offset, gap, new; + ZoneSet zone = (ZoneSet)2; + int i; + + LocusPrefInit(&pref); + + /* Testing the behaviour with various sizes of gaps in the page table. */ + + /* Assume the allocation strategy is first-fit. The idea of the tests is */ + /* to allocate a region of memory, then deallocate a gap in the middle, */ + /* then allocate a new region that fits in the gap with various amounts */ + /* left over. Like this: */ + /* |-offsetRegion-||----gapRegion----||-topRegion-| */ + /* |-offsetRegion-||-newRegion-| |-topRegion-| */ + /* This is done with three different sizes of offsetRegion, in two */ + /* different zones to ensure that all page boundary cases are tested. */ + for(i = 0; i < 2; ++i) { /* zone loop */ + for(offset = 0; offset <= 2*numPerPage; offset += numPerPage) { + if(offset != 0) + die(allocator->alloc(&offsetRegion, &pref, offset * pageSize, pool), + "offsetRegion"); + for(gap = numPerPage+1; gap <= 3 * (numPerPage+1); + gap += (numPerPage+1)) { + die(allocator->alloc(&gapRegion, &pref, gap * pageSize, pool), + "gapRegion"); + die(allocator->alloc(&topRegion, &pref, pageSize, pool), + "topRegion"); + allocator->free(&gapRegion); + for(new = 1; new <= gap; new += numPerPage) { + AllocInfoStruct thisRegion, nextRegion; + Count regionNum, expected; + Res enoughRegions; + + die(allocator->alloc(&newRegion, &pref, new * pageSize, pool), + "newRegion"); + + /* Test iterators */ + cdie(allocator->first(&thisRegion, arena), "first"); + regionNum = 1; + while (allocator->next(&nextRegion, &thisRegion, arena)) { + regionNum++; + allocator->copy(&thisRegion, &nextRegion); + } + + /* Should be able to iterate over at least offset, new, top */ + expected = + allocator->units(offset) + + allocator->units(new) + + allocator->units(1); + + if (regionNum >= expected) + enoughRegions = ResOK; + else + enoughRegions = ResFAIL; + + die(enoughRegions, "Not enough regions"); + + allocator->free(&newRegion); + } + + allocator->free(&topRegion); + } + if(offset != 0) { + allocator->test(&offsetRegion, arena); + allocator->free(&offsetRegion); + } + } + LocusPrefExpress(&pref, LocusPrefZONESET, &zone); + } +} + + +static void testPageTable(ArenaClass klass, Size size, Addr addr, Bool zoned) +{ + Arena arena; Pool pool; + Size pageSize; + Count tractsPerPage; + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_CL_BASE, addr); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, zoned); + die(ArenaCreate(&arena, klass, args), "ArenaCreate"); + } MPS_ARGS_END(args); + + die(PoolCreate(&pool, arena, PoolClassMVFF(), argsNone), "PoolCreate"); + + pageSize = ArenaGrainSize(arena); + tractsPerPage = pageSize / sizeof(TractStruct); + printf("%ld tracts per page in the page table.\n", (long)tractsPerPage); + + /* test tract allocation and iteration */ + testAllocAndIterate(arena, pool, pageSize, tractsPerPage, + &allocatorTractStruct); + + /* test segment allocation and iteration */ + testAllocAndIterate(arena, pool, pageSize, tractsPerPage, + &allocatorSegStruct); + + die(ArenaDescribe(arena, mps_lib_get_stdout(), 0), "ArenaDescribe"); + die(ArenaDescribeTracts(arena, mps_lib_get_stdout(), 0), + "ArenaDescribeTracts"); + + PoolDestroy(pool); + ArenaDestroy(arena); +} + + +/* testSize -- test arena size overflow + * + * Just try allocating larger arenas, doubling the size each time, until + * it fails, then check the error code. + */ + +static void testSize(Size size) +{ + ArenaClass klass = (ArenaClass)mps_arena_class_vm(); + Arena arena; + Res res; + + do { + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, size); + res = ArenaCreate(&arena, klass, args); + } MPS_ARGS_END(args); + if (res == ResOK) + ArenaDestroy(arena); + else + die((res == ResRESOURCE) ? ResOK : res, "right error code"); + size *= 2; + } while (size == 0); +} + + +#define TEST_ARENA_SIZE ((Size)16<<22) + + +int main(int argc, char *argv[]) +{ + void *block; + + testlib_init(argc, argv); + + testPageTable((ArenaClass)mps_arena_class_vm(), TEST_ARENA_SIZE, 0, TRUE); + testPageTable((ArenaClass)mps_arena_class_vm(), TEST_ARENA_SIZE, 0, FALSE); + + block = malloc(TEST_ARENA_SIZE); + cdie(block != NULL, "malloc"); + testPageTable((ArenaClass)mps_arena_class_cl(), TEST_ARENA_SIZE, block, FALSE); + + testSize(TEST_ARENA_SIZE); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c new file mode 100644 index 00000000000..50708e95638 --- /dev/null +++ b/mps/code/arenavm.c @@ -0,0 +1,1360 @@ +/* arenavm.c: VIRTUAL MEMORY ARENA CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * + * DESIGN + * + * .design: , and + * + * .vm.addr-is-star: In this file, Addr is compatible with C + * pointers, and Count with size_t (Index), because all refer to the + * virtual address space. + * + * + * IMPROVEMENTS + * + * .improve.table.zone-zero: It would be better to make sure that the + * page tables are in zone zero, since that zone is least useful for + * GC. (But it would change how pagesFindFreeInZones avoids allocating + * over the tables, see .alloc.skip.) + */ + +#include "boot.h" +#include "bt.h" +#include "cbs.h" +#include "mpm.h" +#include "mpsavm.h" +#include "poolmfs.h" +#include "sa.h" +#include "tract.h" +#include "vm.h" + +SRCID(arenavm, "$Id$"); + + +/* VMChunk -- chunks for VM arenas */ + +typedef struct VMChunkStruct *VMChunk; + +#define VMChunkSig ((Sig)0x519A6B3C) /* SIGnature ARena VM Chunk */ + +typedef struct VMChunkStruct { + ChunkStruct chunkStruct; /* generic chunk */ + VMStruct vmStruct; /* virtual memory descriptor */ + Addr overheadMappedLimit; /* limit of pages mapped for overhead */ + SparseArrayStruct pages; /* to manage backing store of page table */ + Sig sig; /* design.mps.sig.field.end.outer */ +} VMChunkStruct; + +#define VMChunk2Chunk(vmchunk) (&(vmchunk)->chunkStruct) +#define VMChunkVM(vmchunk) (&(vmchunk)->vmStruct) +#define Chunk2VMChunk(chunk) PARENT(VMChunkStruct, chunkStruct, chunk) + + +/* VMChunkVMArena -- get the VM arena from a VM chunk */ + +#define VMChunkVMArena(vmchunk) \ + MustBeA(VMArena, ChunkArena(VMChunk2Chunk(vmchunk))) + + +/* VMArena + * + * for description. + */ + +typedef struct VMArenaStruct *VMArena; + +#define VMArenaSig ((Sig)0x519A6EB3) /* SIGnature AREna VM */ + +typedef struct VMArenaStruct { /* VM arena structure */ + ArenaStruct arenaStruct; + VMStruct vmStruct; /* VM descriptor for VM containing arena */ + char vmParams[VMParamSize]; /* VM parameter block */ + Size extendBy; /* desired arena increment */ + Size extendMin; /* minimum arena increment */ + ArenaVMExtendedCallback extended; + ArenaVMContractedCallback contracted; + MFSStruct cbsBlockPoolStruct; /* stores blocks for CBSs */ + CBSStruct spareLandStruct; /* spare memory */ + Sig sig; /* design.mps.sig.field.end.outer */ +} VMArenaStruct; + +#define VMArenaVM(vmarena) (&(vmarena)->vmStruct) +#define VMArenaCBSBlockPool(vmarena) MFSPool(&(vmarena)->cbsBlockPoolStruct) +#define VMArenaSpareLand(vmarena) CBSLand(&(vmarena)->spareLandStruct) + + +/* Forward declarations */ + +static void VMFree(Addr base, Size size, Pool pool); +static Size VMPurgeSpare(Arena arena, Size size); +static Size vmArenaUnmapSpare(Arena arena, Size size, Chunk filter); +DECLARE_CLASS(Arena, VMArena, AbstractArena); +static void VMCompact(Arena arena, Trace trace); +static void pageDescUnmap(VMChunk vmChunk, Index basePI, Index limitPI); + + +/* VMChunkCheck -- check the consistency of a VM chunk */ + +ATTRIBUTE_UNUSED +static Bool VMChunkCheck(VMChunk vmchunk) +{ + Chunk chunk; + + CHECKS(VMChunk, vmchunk); + chunk = VMChunk2Chunk(vmchunk); + CHECKD(Chunk, chunk); + CHECKD(VM, VMChunkVM(vmchunk)); + CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk)))); + CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable); + CHECKD(SparseArray, &vmchunk->pages); + /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck + makes sure they're where they're expected to be (in the chunk). */ + CHECKL(chunk->base < (Addr)vmchunk->pages.mapped); + CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <= + vmchunk->overheadMappedLimit); + CHECKL(chunk->base < (Addr)vmchunk->pages.pages); + CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <= + vmchunk->overheadMappedLimit); + /* .improve.check-table: Could check the consistency of the tables. */ + + return TRUE; +} + + +/* addrOfPageDesc -- address of the page descriptor (as an Addr) */ + +#define addrOfPageDesc(chunk, index) \ + ((Addr)&(chunk)->pageTable[index]) + + +/* PageTablePageIndex + * + * Maps from a page base address for a page occupied by the page table + * to the index of that page in the range of pages occupied by the + * page table. So that + * PageTablePageIndex(chunk, (Addr)chunk->pageTable) == 0 + * and + * PageTablePageIndex(chunk, + * AddrAlignUp(addrOfPageDesc(chunk->pages), pageSize) + * == chunk->pageTablePages + */ +#define PageTablePageIndex(chunk, pageAddr) \ + ChunkSizeToPages(chunk, AddrOffset((Addr)(chunk)->pageTable, pageAddr)) + + +/* TablePageIndexBase + * + * Takes a page table page index (i.e., the index of a page occupied + * by the page table, where the page occupied by chunk->pageTable is + * index 0) and returns the base address of that page. + * (Reverse of mapping defined by PageTablePageIndex.) + */ +#define TablePageIndexBase(chunk, index) \ + AddrAdd((Addr)(chunk)->pageTable, ChunkPagesToSize(chunk, index)) + + +/* VMArenaCheck -- check the consistency of an arena structure */ + +ATTRIBUTE_UNUSED +static Bool VMArenaCheck(VMArena vmArena) +{ + Arena arena; + VMChunk primary; + + CHECKS(VMArena, vmArena); + arena = MustBeA(AbstractArena, vmArena); + CHECKD(Arena, arena); + + CHECKL(vmArena->extendBy > 0); + CHECKL(vmArena->extendMin <= vmArena->extendBy); + + if (arena->primary != NULL) { + primary = Chunk2VMChunk(arena->primary); + CHECKD(VMChunk, primary); + /* We could iterate over all chunks accumulating an accurate */ + /* count of committed, but we don't have all day. */ + CHECKL(VMMapped(VMChunkVM(primary)) <= arena->committed); + } + + CHECKD(Pool, VMArenaCBSBlockPool(vmArena)); + CHECKD(Land, VMArenaSpareLand(vmArena)); + CHECKL((LandSize)(VMArenaSpareLand(vmArena)) == arena->spareCommitted); + + /* TODO: Add sig to VMParamsStruct so it can be checked. */ + + return TRUE; +} + + +/* VMArenaDescribe -- describe the VMArena + */ +static Res VMArenaDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Arena arena = CouldBeA(AbstractArena, inst); + VMArena vmArena = CouldBeA(VMArena, arena); + Res res; + + if (!TESTC(VMArena, vmArena)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, VMArena, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "extendBy: $U\n", (WriteFU)vmArena->extendBy, + "extendMin: $U\n", (WriteFU)vmArena->extendMin, + NULL); + if(res != ResOK) + return res; + + res = LandDescribe(VMArenaSpareLand(vmArena), stream, depth + 2); + if (res != ResOK) + return res; + + /* TODO: incomplete -- some fields are not Described */ + + return ResOK; +} + + + +/* VM indirect functions + * + * These functions should be used to map and unmap within the arena. + * They are responsible for maintaining vmArena->committed, and for + * checking that the commit limit does not get exceeded. + */ +static Res vmArenaMap(VMArena vmArena, VM vm, Addr base, Addr limit) +{ + Arena arena = MustBeA(AbstractArena, vmArena); + Size size = AddrOffset(base, limit); + Res res; + + /* no checking as function is local to module */ + + /* committed can't overflow (since we can't commit more memory than */ + /* address space), but we're paranoid. */ + AVER(arena->committed < arena->committed + size); + /* check against commit limit */ + if (arena->commitLimit < arena->committed + size) + return ResCOMMIT_LIMIT; + + res = VMMap(vm, base, limit); + if (res != ResOK) + return res; + arena->committed += size; + return ResOK; +} + + +static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit) +{ + Arena arena = MustBeA(AbstractArena, vmArena); + Size size = AddrOffset(base, limit); + + /* no checking as function is local to module */ + AVER(size <= arena->committed); + + VMUnmap(vm, base, limit); + arena->committed -= size; +} + + +/* chunkUnmapRange -- unmap range of addresses in a chunk */ + +static void chunkUnmapRange(Chunk chunk, Addr base, Addr limit) +{ + VMArena vmArena; + VMChunk vmChunk; + Index basePI, limitPI, i; + + AVERT(Chunk, chunk); + AVER(base < limit); + + vmArena = MustBeA(VMArena, ChunkArena(chunk)); + vmChunk = Chunk2VMChunk(chunk); + basePI = INDEX_OF_ADDR(chunk, base); + limitPI = INDEX_OF_ADDR(chunk, limit); + + for (i = basePI; i < limitPI; ++i) + PageInit(chunk, i); + vmArenaUnmap(vmArena, VMChunkVM(vmChunk), base, limit); + pageDescUnmap(vmChunk, basePI, limitPI); +} + + +/* VMChunkCreate -- create a chunk + * + * chunkReturn, return parameter for the created chunk. + * vmArena, the parent VMArena. + * size, approximate amount of virtual address that the chunk should reserve. + */ +static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) +{ + Arena arena = MustBeA(AbstractArena, vmArena); + Res res; + Addr base, limit, chunkStructLimit; + VMStruct vmStruct; + VM vm = &vmStruct; + BootBlockStruct bootStruct; + BootBlock boot = &bootStruct; + VMChunk vmChunk; + void *p; + + AVER(chunkReturn != NULL); + AVERT(VMArena, vmArena); + AVER(size > 0); + + res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams); + if (res != ResOK) + goto failVMInit; + + base = VMBase(vm); + limit = VMLimit(vm); + + res = BootBlockInit(boot, (void *)base, (void *)limit); + if (res != ResOK) + goto failBootInit; + + /* .overhead.chunk-struct: Allocate and map the chunk structure. */ + res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); + if (res != ResOK) + goto failChunkAlloc; + vmChunk = p; + /* Calculate the limit of the grain where the chunkStruct resides. */ + chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena)); + res = vmArenaMap(vmArena, vm, base, chunkStructLimit); + if (res != ResOK) + goto failChunkMap; + vmChunk->overheadMappedLimit = chunkStructLimit; + + /* Copy VM descriptor into its place in the chunk. */ + VMCopy(VMChunkVM(vmChunk), vm); + res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, + VMReserved(VMChunkVM(vmChunk)), boot); + if (res != ResOK) + goto failChunkInit; + + BootBlockFinish(boot); + + vmChunk->sig = VMChunkSig; + AVERT(VMChunk, vmChunk); + + *chunkReturn = VMChunk2Chunk(vmChunk); + return ResOK; + +failChunkInit: + VMUnmap(vm, VMBase(vm), chunkStructLimit); +failChunkMap: +failChunkAlloc: +failBootInit: + VMFinish(vm); +failVMInit: + return res; +} + + +/* VMChunkInit -- initialize a VMChunk */ + +static Res VMChunkInit(Chunk chunk, BootBlock boot) +{ + VMChunk vmChunk; + Addr overheadLimit; + void *p; + Res res; + BT saMapped, saPages; + + /* chunk is supposed to be uninitialized, so don't check it. */ + vmChunk = Chunk2VMChunk(chunk); + AVERT(BootBlock, boot); + + /* .overhead.sa-mapped: Chunk overhead for sparse array 'mapped' table. */ + res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN); + if (res != ResOK) + goto failSaMapped; + saMapped = p; + + /* .overhead.sa-pages: Chunk overhead for sparse array 'pages' table. */ + res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN); + if (res != ResOK) + goto failSaPages; + saPages = p; + + overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot)); + + /* .overhead.page-table: Put the page table as late as possible, as + * in VM systems we don't want to map it. */ + res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize); + if (res != ResOK) + goto failAllocPageTable; + chunk->pageTable = p; + + /* Map memory for the bit tables. */ + if (vmChunk->overheadMappedLimit < overheadLimit) { + overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk)); + res = vmArenaMap(VMChunkVMArena(vmChunk), VMChunkVM(vmChunk), + vmChunk->overheadMappedLimit, overheadLimit); + if (res != ResOK) + goto failTableMap; + vmChunk->overheadMappedLimit = overheadLimit; + } + + SparseArrayInit(&vmChunk->pages, + chunk->pageTable, + sizeof(PageUnion), + chunk->pages, + saMapped, saPages, VMChunkVM(vmChunk)); + + return ResOK; + + /* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */ +failTableMap: +failSaPages: +failAllocPageTable: +failSaMapped: + return res; +} + + +/* vmChunkDestroy -- destroy a VMChunk */ + +static Bool vmChunkDestroy(Tree tree, void *closure) +{ + Chunk chunk; + VMChunk vmChunk; + Arena arena; + Addr base; + Size size; + VMArena vmArena; + + AVERT(Tree, tree); + AVER(closure == UNUSED_POINTER); + UNUSED(closure); + + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + vmChunk = Chunk2VMChunk(chunk); + AVERT(VMChunk, vmChunk); + arena = ChunkArena(chunk); + vmArena = MustBeA(VMArena, arena); + base = chunk->base; + size = ChunkSize(chunk); + + (*vmArena->contracted)(arena, base, size); + + (void)vmArenaUnmapSpare(arena, size, chunk); + + SparseArrayFinish(&vmChunk->pages); + + vmChunk->sig = SigInvalid; + ChunkFinish(chunk); + + return TRUE; +} + + +/* VMChunkFinish -- finish a VMChunk */ + +static void VMChunkFinish(Chunk chunk) +{ + VMStruct vmStruct; + VM vm = &vmStruct; + VMChunk vmChunk = Chunk2VMChunk(chunk); + + /* Copy VM descriptor to stack-local storage so that we can continue + * using the descriptor after the VM has been unmapped. */ + VMCopy(vm, VMChunkVM(vmChunk)); + + vmArenaUnmap(VMChunkVMArena(vmChunk), vm, + VMBase(vm), vmChunk->overheadMappedLimit); + + /* No point in finishing the other fields, since they are unmapped. */ + + VMFinish(vm); +} + + +/* VMArenaVarargs -- parse obsolete varargs */ + +static void VMArenaVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_ARENA_SIZE; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* VMArenaTrivExtended -- trivial callback for VM arena extension */ + +static void vmArenaTrivExtended(Arena arena, Addr base, Size size) +{ + AVERT(Arena, arena); + AVER(base != 0); + AVER(size > 0); + UNUSED(arena); + UNUSED(base); + UNUSED(size); +} + +/* VMArenaTrivContracted -- trivial callback for VM arena contraction */ + +static void vmArenaTrivContracted(Arena arena, Addr base, Size size) +{ + AVERT(Arena, arena); + AVER(base != 0); + AVER(size > 0); + UNUSED(arena); + UNUSED(base); + UNUSED(size); +} + + +/* vmArenaChunkSize -- compute chunk size + * + * Compute the size of the smallest chunk that has size bytes of usable + * address space (that is, after all overheads are accounted for). + * + * If successful, update *chunkSizeReturn with the computed chunk size + * and return ResOK. If size is too large for a chunk, leave + * *chunkSizeReturn unchanged and return ResRESOURCE. + */ +static Res vmArenaChunkSize(Size *chunkSizeReturn, VMArena vmArena, Size size) +{ + Size grainSize; /* Arena grain size. */ + Shift grainShift; /* The corresponding Shift. */ + Count pages; /* Number of usable pages in chunk. */ + Size pageTableSize; /* Size of the page table. */ + Count pageTablePages; /* Number of pages in the page table. */ + Size chunkSize; /* Size of the chunk. */ + Size overhead; /* Total overheads for the chunk. */ + + AVER(chunkSizeReturn != NULL); + AVERT(VMArena, vmArena); + AVER(size > 0); + + grainSize = ArenaGrainSize(MustBeA(AbstractArena, vmArena)); + grainShift = SizeLog2(grainSize); + + overhead = 0; + do { + chunkSize = size + overhead; + AVER(SizeIsAligned(chunkSize, grainSize)); + + /* See .overhead.chunk-struct. */ + overhead = SizeAlignUp(sizeof(VMChunkStruct), MPS_PF_ALIGN); + + /* See , */ + pages = chunkSize >> grainShift; + overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN); + + /* See .overhead.sa-mapped. */ + overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN); + + /* See .overhead.sa-pages. */ + pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), grainSize); + pageTablePages = pageTableSize >> grainShift; + overhead += SizeAlignUp(BTSize(pageTablePages), MPS_PF_ALIGN); + + /* See .overhead.page-table. */ + overhead = SizeAlignUp(overhead, grainSize); + overhead += SizeAlignUp(pageTableSize, grainSize); + + if (SizeMAX - overhead < size) + return ResRESOURCE; + } while (chunkSize < size + overhead); + + *chunkSizeReturn = chunkSize; + return ResOK; +} + + +/* VMArenaCreate -- create and initialize the VM arena + * + * .arena.init: Once the arena has been allocated, we call ArenaInit + * to do the generic part of init. + */ + +ARG_DEFINE_KEY(arena_extended, Fun); +#define vmKeyArenaExtended (&_mps_key_arena_extended) +ARG_DEFINE_KEY(arena_contracted, Fun); +#define vmKeyArenaContracted (&_mps_key_arena_contracted) + +static Res VMArenaCreate(Arena *arenaReturn, ArgList args) +{ + Size size = VM_ARENA_SIZE_DEFAULT; /* initial arena size */ + Align grainSize = MPS_PF_ALIGN; /* arena grain size */ + Size pageSize = PageSize(); /* operating system page size */ + Size chunkSize; /* size actually created */ + Size vmArenaSize; /* aligned size of VMArenaStruct */ + Res res; + VMArena vmArena; + Arena arena; + VMStruct vmStruct; + VM vm = &vmStruct; + Chunk chunk; + mps_arg_s arg; + char vmParams[VMParamSize]; + + AVER(arenaReturn != NULL); + AVERT(ArgList, args); + + if (ArgPick(&arg, args, MPS_KEY_ARENA_GRAIN_SIZE)) + grainSize = arg.val.size; + if (grainSize < pageSize) + /* Make it easier to write portable programs by rounding up. */ + grainSize = pageSize; + AVERT(ArenaGrainSize, grainSize); + + if (ArgPick(&arg, args, MPS_KEY_ARENA_SIZE)) + size = arg.val.size; + if (size < grainSize * MPS_WORD_WIDTH) + /* There has to be enough room in the chunk for a full complement of + zones. Make it easier to write portable programs by rounding up. */ + size = grainSize * MPS_WORD_WIDTH; + + /* Parse remaining arguments, if any, into VM parameters. We must do + this into some stack-allocated memory for the moment, since we + don't have anywhere else to put it. It gets copied later. */ + res = VMParamFromArgs(vmParams, sizeof(vmParams), args); + if (res != ResOK) + goto failVMInit; + + /* Create a VM to hold the arena and map it. Store descriptor on the + stack until we have the arena to put it in. */ + vmArenaSize = SizeAlignUp(sizeof(VMArenaStruct), MPS_PF_ALIGN); + res = VMInit(vm, vmArenaSize, grainSize, vmParams); + if (res != ResOK) + goto failVMInit; + res = VMMap(vm, VMBase(vm), VMLimit(vm)); + if (res != ResOK) + goto failVMMap; + vmArena = (VMArena)VMBase(vm); + + arena = CouldBeA(AbstractArena, vmArena); + + res = NextMethod(Arena, VMArena, init)(arena, grainSize, args); + if (res != ResOK) + goto failArenaInit; + SetClassOfPoly(arena, CLASS(VMArena)); + AVER(vmArena == MustBeA(VMArena, arena)); + + arena->reserved = VMReserved(vm); + arena->committed = VMMapped(vm); + + /* Initialize a pool to hold the CBS blocks for the spare memory + land. This pool can't be allowed to extend itself using + ArenaAlloc because it is needed to implement ArenaAlloc (in the + case where allocation hits the commit limit and so spare memory + needs to be purged), so MFSExtendSelf is set to FALSE. Failures + to extend are handled where the spare memory land is used. */ + MPS_ARGS_BEGIN(piArgs) { + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(RangeTreeStruct)); + MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, grainSize); + MPS_ARGS_ADD(piArgs, MFSExtendSelf, FALSE); + res = PoolInit(VMArenaCBSBlockPool(vmArena), arena, PoolClassMFS(), piArgs); + } MPS_ARGS_END(piArgs); + AVER(res == ResOK); /* no allocation, no failure expected */ + if (res != ResOK) + goto failMFSInit; + + /* Initialise spare land. */ + MPS_ARGS_BEGIN(liArgs) { + MPS_ARGS_ADD(liArgs, CBSBlockPool, VMArenaCBSBlockPool(vmArena)); + res = LandInit(VMArenaSpareLand(vmArena), CLASS(CBS), arena, + grainSize, arena, liArgs); + } MPS_ARGS_END(liArgs); + AVER(res == ResOK); /* no allocation, no failure expected */ + if (res != ResOK) + goto failLandInit; + ++ ArenaGlobals(arena)->systemPools; + + /* Copy VM descriptor into its place in the arena. */ + VMCopy(VMArenaVM(vmArena), vm); + + /* Copy the stack-allocated VM parameters into their home in the VMArena. */ + AVER(sizeof(vmArena->vmParams) == sizeof(vmParams)); + (void)mps_lib_memcpy(vmArena->vmParams, vmParams, sizeof(vmArena->vmParams)); + + /* */ + vmArena->extendBy = size; + vmArena->extendMin = 0; + + vmArena->extended = vmArenaTrivExtended; + if (ArgPick(&arg, args, vmKeyArenaExtended)) + vmArena->extended = (ArenaVMExtendedCallback)arg.val.fun; + + vmArena->contracted = vmArenaTrivContracted; + if (ArgPick(&arg, args, vmKeyArenaContracted)) + vmArena->contracted = (ArenaVMContractedCallback)arg.val.fun; + + /* have to have a valid arena before calling ChunkCreate */ + vmArena->sig = VMArenaSig; + res = VMChunkCreate(&chunk, vmArena, size); + if (res != ResOK) + goto failChunkCreate; + +#if defined(AVER_AND_CHECK_ALL) + /* Check the computation of the chunk size in vmArenaChunkSize, now + * that we have the actual chunk for comparison. Note that + * vmArenaChunkSize computes the smallest size with a given number + * of usable bytes -- the actual chunk may be one grain larger. */ + { + Size usableSize, computedChunkSize; + usableSize = AddrOffset(PageIndexBase(chunk, chunk->allocBase), + chunk->limit); + res = vmArenaChunkSize(&computedChunkSize, vmArena, usableSize); + AVER(res == ResOK); + AVER(computedChunkSize == ChunkSize(chunk) + || computedChunkSize + grainSize == ChunkSize(chunk)); + } +#endif + + /* .zoneshift: Set the zone shift to divide the chunk into the same */ + /* number of stripes as will fit into a reference set (the number of */ + /* bits in a word). Fail if the chunk is so small stripes are smaller */ + /* than pages. Note that some zones are discontiguous in the chunk if */ + /* the size is not a power of 2. . */ + chunkSize = ChunkSize(chunk); + arena->zoneShift = SizeFloorLog2(chunkSize >> MPS_WORD_SHIFT); + AVER(ChunkPageSize(chunk) == ArenaGrainSize(arena)); + + AVERT(VMArena, vmArena); + EVENT7(ArenaCreateVM, arena, size, chunkSize, grainSize, + ClassOfPoly(Arena, arena), ArenaGlobals(arena)->systemPools, + arena->serial); + + vmArena->extended(arena, chunk->base, chunkSize); + + *arenaReturn = arena; + return ResOK; + +failChunkCreate: + LandFinish(VMArenaSpareLand(vmArena)); +failLandInit: + PoolFinish(VMArenaCBSBlockPool(vmArena)); +failMFSInit: + NextMethod(Inst, VMArena, finish)(MustBeA(Inst, arena)); +failArenaInit: + VMUnmap(vm, VMBase(vm), VMLimit(vm)); +failVMMap: + VMFinish(vm); +failVMInit: + return res; +} + + +static void vmArenaMFSFreeExtent(Pool pool, Addr base, Size size, void *closure) +{ + Chunk chunk = NULL; /* suppress "may be used uninitialized" */ + Bool foundChunk; + + AVERT(Pool, pool); + AVER(closure == UNUSED_POINTER); + UNUSED(closure); + + foundChunk = ChunkOfAddr(&chunk, PoolArena(pool), base); + AVER(foundChunk); + chunkUnmapRange(chunk, base, AddrAdd(base, size)); +} + + +static void VMArenaDestroy(Arena arena) +{ + VMArena vmArena = MustBeA(VMArena, arena); + Land spareLand = VMArenaSpareLand(vmArena); + VMStruct vmStruct; + VM vm = &vmStruct; + + /* Unmap all remaining spare memory. */ + VMPurgeSpare(arena, LandSize(spareLand)); + AVER(LandSize(spareLand) == 0); + AVER(arena->spareCommitted == 0); + + /* The CBS block pool can't free its own memory via ArenaFree + because that would attempt to insert the freed memory into the + spare memory land, which uses blocks from the block pool. */ + MFSFinishExtents(VMArenaCBSBlockPool(vmArena), vmArenaMFSFreeExtent, + UNUSED_POINTER); + PoolFinish(VMArenaCBSBlockPool(vmArena)); + + /* Destroy all chunks, including the primary. See + * */ + arena->primary = NULL; + TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy, UNUSED_POINTER); + AVER(arena->chunkTree == TreeEMPTY); + + /* Must wait until the chunks are destroyed, since vmChunkDestroy + calls vmArenaUnmapSpare which uses the spare land. */ + LandFinish(VMArenaSpareLand(vmArena)); + + /* Destroying the chunks must leave only the arena's own VM. */ + AVER(arena->reserved == VMReserved(VMArenaVM(vmArena))); + AVER(arena->committed == VMMapped(VMArenaVM(vmArena))); + + vmArena->sig = SigInvalid; + + NextMethod(Inst, VMArena, finish)(MustBeA(Inst, arena)); + + /* Copy VM descriptor to stack-local storage so that we can continue + * using the descriptor after the VM has been unmapped. */ + VMCopy(vm, VMArenaVM(vmArena)); + VMUnmap(vm, VMBase(vm), VMLimit(vm)); + VMFinish(vm); + + EVENT1(ArenaDestroy, vmArena); +} + + +/* VMArenaGrow -- Extend the arena by making a new chunk + * + * size specifies how much we wish to allocate after the extension. + * pref specifies the preference for the location of the allocation. + */ +static Res VMArenaGrow(Arena arena, LocusPref pref, Size size) +{ + VMArena vmArena = MustBeA(VMArena, arena); + Chunk newChunk; + Size chunkSize; + Size chunkMin; + Res res; + + /* TODO: Ensure that extended arena will be able to satisfy pref. */ + AVERT(LocusPref, pref); + UNUSED(pref); + + res = vmArenaChunkSize(&chunkMin, vmArena, size); + if (res != ResOK) + return res; + chunkSize = vmArena->extendBy; + + EVENT3(VMArenaExtendStart, size, chunkSize, ArenaReserved(arena)); + + /* .chunk-create.fail: If we fail, try again with a smaller size */ + { + unsigned fidelity = 8; /* max fraction of addr-space we may 'waste' */ + Size chunkHalf; + Size sliceSize; + + if (vmArena->extendMin > chunkMin) + chunkMin = vmArena->extendMin; + if (chunkSize < chunkMin) + chunkSize = chunkMin; + + res = ResRESOURCE; + for(;; chunkSize = chunkHalf) { + chunkHalf = chunkSize / 2; + sliceSize = chunkHalf / fidelity; + AVER(sliceSize > 0); + + /* remove slices, down to chunkHalf but no further */ + for(; chunkSize > chunkHalf; chunkSize -= sliceSize) { + if(chunkSize < chunkMin) { + EVENT2(VMArenaExtendFail, chunkMin, ArenaReserved(arena)); + return res; + } + res = VMChunkCreate(&newChunk, vmArena, chunkSize); + if(res == ResOK) + goto vmArenaGrow_Done; + } + } + } + +vmArenaGrow_Done: + EVENT2(VMArenaExtendDone, chunkSize, ArenaReserved(arena)); + vmArena->extended(arena, + newChunk->base, + AddrOffset(newChunk->base, newChunk->limit)); + + return res; +} + + +/* spareRangeRelease -- release a range of spare memory in a chunk + * + * Temporarily leaves data structures in an inconsistent state (the + * spare memory is still marked as SPARE in the chunk's page table, + * but it is no longer in the spare memory land). The caller must + * either allocate the memory or unmap it. + */ + +static void spareRangeRelease(VMChunk vmChunk, Index piBase, Index piLimit) +{ + Chunk chunk = VMChunk2Chunk(vmChunk); + Arena arena = ChunkArena(chunk); + VMArena vmArena = VMChunkVMArena(vmChunk); + Land spareLand = VMArenaSpareLand(vmArena); + RangeStruct range, containingRange; + Res res; + + AVER(piBase < piLimit); + RangeInit(&range, PageIndexBase(chunk, piBase), + PageIndexBase(chunk, piLimit)); + + res = LandDelete(&containingRange, spareLand, &range); + if (res != ResOK) { + /* Range could not be deleted from the spare memory land because + it splits the containing range and so needs to allocate a + block but the block pool is full. Use the first grain of the + containing range to extend the block pool. */ + Addr extendBase = RangeBase(&containingRange); + Index extendBasePI = INDEX_OF_ADDR(chunk, extendBase); + Addr extendLimit = AddrAdd(extendBase, ArenaGrainSize(arena)); + RangeStruct extendRange; + AVER(res == ResLIMIT); + RangeInit(&extendRange, extendBase, extendLimit); + AVER(!RangesOverlap(&extendRange, &range)); + res = LandDelete(&containingRange, spareLand, &extendRange); + AVER(res == ResOK); + AVER(arena->spareCommitted >= RangeSize(&extendRange)); + arena->spareCommitted -= RangeSize(&extendRange); + PageAlloc(chunk, extendBasePI, VMArenaCBSBlockPool(vmArena)); + MFSExtend(VMArenaCBSBlockPool(vmArena), extendBase, extendLimit); + res = LandDelete(&containingRange, spareLand, &range); + AVER(res == ResOK); + } + AVER(arena->spareCommitted >= RangeSize(&range)); + arena->spareCommitted -= RangeSize(&range); +} + + +static Res pageDescMap(VMChunk vmChunk, Index basePI, Index limitPI) +{ + Size before = VMMapped(VMChunkVM(vmChunk)); + Arena arena = MustBeA(AbstractArena, VMChunkVMArena(vmChunk)); + Res res = SparseArrayMap(&vmChunk->pages, basePI, limitPI); + Size after = VMMapped(VMChunkVM(vmChunk)); + AVER(before <= after); + arena->committed += after - before; + return res; +} + +static void pageDescUnmap(VMChunk vmChunk, Index basePI, Index limitPI) +{ + Size size, after; + Size before = VMMapped(VMChunkVM(vmChunk)); + Arena arena = MustBeA(AbstractArena, VMChunkVMArena(vmChunk)); + SparseArrayUnmap(&vmChunk->pages, basePI, limitPI); + after = VMMapped(VMChunkVM(vmChunk)); + AVER(after <= before); + size = before - after; + AVER(arena->committed >= size); + arena->committed -= size; +} + + +/* pagesMarkAllocated -- Mark the pages allocated */ + +static Res pagesMarkAllocated(VMArena vmArena, VMChunk vmChunk, + Index basePI, Count pages, Pool pool) +{ + Index cursor, i, j, k; + Index limitPI; + Chunk chunk = VMChunk2Chunk(vmChunk); + Res res; + + limitPI = basePI + pages; + AVER(limitPI <= chunk->pages); + + /* NOTE: We could find a reset bit range in vmChunk->pages.pages in order + to skip across hundreds of pages at once. That could speed up really + big block allocations (hundreds of pages long). */ + + cursor = basePI; + while (BTFindLongResRange(&j, &k, vmChunk->pages.mapped, cursor, limitPI, 1)) { + if (cursor < j) + spareRangeRelease(vmChunk, cursor, j); + for (i = cursor; i < j; ++i) + PageAlloc(chunk, i, pool); + res = pageDescMap(vmChunk, j, k); + if (res != ResOK) + goto failSAMap; + res = vmArenaMap(vmArena, VMChunkVM(vmChunk), + PageIndexBase(chunk, j), PageIndexBase(chunk, k)); + if (res != ResOK) + goto failVMMap; + for (i = j; i < k; ++i) { + PageInit(chunk, i); + PageAlloc(chunk, i, pool); + } + cursor = k; + if (cursor == limitPI) + return ResOK; + } + if (cursor < limitPI) + spareRangeRelease(vmChunk, cursor, limitPI); + for (i = cursor; i < limitPI; ++i) + PageAlloc(chunk, i, pool); + return ResOK; + +failVMMap: + pageDescUnmap(vmChunk, j, k); +failSAMap: + /* Region from basePI to j was allocated but can't be used */ + if (basePI < j) { + VMFree(PageIndexBase(chunk, basePI), + ChunkPagesToSize(chunk, j - basePI), + pool); + } + return res; +} + +static Res VMPagesMarkAllocated(Arena arena, Chunk chunk, + Index baseIndex, Count pages, Pool pool) +{ + Res res; + VMArena vmArena = MustBeA(VMArena, arena); + + AVERT(Arena, arena); + AVERT(Chunk, chunk); + AVER(chunk->allocBase <= baseIndex); + AVER(pages > 0); + AVER(baseIndex + pages <= chunk->pages); + AVERT(Pool, pool); + + res = pagesMarkAllocated(vmArena, + Chunk2VMChunk(chunk), + baseIndex, + pages, + pool); + /* TODO: Could this loop be pushed down into vmArenaMap? */ + while (res != ResOK) { + /* Try purging spare pages in the hope that the OS will give them back + at the new address. Will eventually run out of spare pages, so this + loop will terminate. */ + /* TODO: Investigate implementing VMRemap so that we can guarantee + success if we have enough spare pages. */ + if (VMPurgeSpare(arena, pages * ChunkPageSize(chunk)) == 0) + break; + res = pagesMarkAllocated(vmArena, + Chunk2VMChunk(chunk), + baseIndex, + pages, + pool); + } + return res; +} + + +static Bool VMChunkPageMapped(Chunk chunk, Index index) +{ + VMChunk vmChunk; + AVERT(Chunk, chunk); + AVER(index < chunk->pages); + vmChunk = Chunk2VMChunk(chunk); + return BTGet(vmChunk->pages.mapped, index); +} + + +/* vmArenaUnmapSpare -- unmap spare memory + * + * The size is the desired amount to unmap, and the amount that was + * unmapped is returned. If filter is not NULL, then only memory + * within that chunk is unmapped. + */ + +typedef struct VMArenaUnmapSpareClosureStruct { + Arena arena; /* arena owning the spare memory */ + Size size; /* desired amount of spare memory to unmap */ + Chunk filter; /* NULL or chunk to unmap from */ + Size unmapped; /* actual amount unmapped */ +} VMArenaUnmapSpareClosureStruct, *VMArenaUnmapSpareClosure; + +static Bool vmArenaUnmapSpareRange(Bool *deleteReturn, Land land, Range range, + void *p) +{ + VMArenaUnmapSpareClosure closure = p; + Arena arena; + Chunk chunk = NULL; /* suppress "may be used uninitialized" */ + Bool foundChunk; + + AVER(deleteReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(p != NULL); + + arena = closure->arena; + foundChunk = ChunkOfAddr(&chunk, arena, RangeBase(range)); + AVER(foundChunk); + + if (closure->filter == NULL || closure->filter == chunk) { + Size size = RangeSize(range); + chunkUnmapRange(chunk, RangeBase(range), RangeLimit(range)); + AVER(arena->spareCommitted >= size); + arena->spareCommitted -= size; + closure->unmapped += size; + *deleteReturn = TRUE; + } + + return closure->unmapped < closure->size; +} + +static Size vmArenaUnmapSpare(Arena arena, Size size, Chunk filter) +{ + VMArena vmArena = MustBeA(VMArena, arena); + Land spareLand = VMArenaSpareLand(vmArena); + VMArenaUnmapSpareClosureStruct closure; + + if (filter != NULL) + AVERT(Chunk, filter); + + closure.arena = arena; + closure.size = size; + closure.filter = filter; + closure.unmapped = 0; + (void)LandIterateAndDelete(spareLand, vmArenaUnmapSpareRange, &closure); + + AVER(LandSize(spareLand) == arena->spareCommitted); + + return closure.unmapped; +} + +static Size VMPurgeSpare(Arena arena, Size size) +{ + return vmArenaUnmapSpare(arena, size, NULL); +} + + +/* VMFree -- free a region in the arena */ + +static void VMFree(Addr base, Size size, Pool pool) +{ + Arena arena; + VMArena vmArena; + Land spareLand; + Chunk chunk = NULL; /* suppress "may be used uninitialized" */ + Count pages; + Index pi, piBase, piLimit; + Bool foundChunk; + Size spareCommitted; + RangeStruct range, containingRange; + Res res; + + AVER(base != NULL); + AVER(size > (Size)0); + AVERT(Pool, pool); + arena = PoolArena(pool); + vmArena = MustBeA(VMArena, arena); + spareLand = VMArenaSpareLand(vmArena); + + /* All chunks have same pageSize. */ + AVER(SizeIsAligned(size, ChunkPageSize(arena->primary))); + AVER(AddrIsAligned(base, ChunkPageSize(arena->primary))); + + foundChunk = ChunkOfAddr(&chunk, arena, base); + AVER(foundChunk); + + /* Calculate the number of pages in the region */ + pages = ChunkSizeToPages(chunk, size); + piBase = INDEX_OF_ADDR(chunk, base); + piLimit = piBase + pages; + AVER(piBase < piLimit); + AVER(piLimit <= chunk->pages); + + /* Finish each Tract in the region. */ + for(pi = piBase; pi < piLimit; ++pi) { + Page page = ChunkPage(chunk, pi); + Tract tract = PageTract(page); + AVER(TractPool(tract) == pool); + + TractFinish(tract); + } + BTResRange(chunk->allocTable, piBase, piLimit); + + /* Freed range is now spare memory, so add it to spare memory land. */ + RangeInitSize(&range, base, size); + res = LandInsert(&containingRange, spareLand, &range); + if (res != ResOK) { + /* The freed range could not be inserted into the spare memory + land because the block pool is full. Allocate the first grain + of the freed range and use it to extend the block pool. */ + Addr extendLimit = AddrAdd(base, ArenaGrainSize(arena)); + res = ArenaFreeLandDelete(arena, base, extendLimit); + if (res != ResOK) { + /* Give up and unmap the memory immediately. */ + chunkUnmapRange(chunk, RangeBase(&range), RangeLimit(&range)); + return; + } + PageAlloc(chunk, INDEX_OF_ADDR(chunk, base), VMArenaCBSBlockPool(vmArena)); + MFSExtend(VMArenaCBSBlockPool(vmArena), base, extendLimit); + + /* Adjust the freed range and try again. This time the insertion + must succeed since we just extended the block pool. */ + RangeSetBase(&range, extendLimit); + AVERT(Range, &range); + if (!RangeIsEmpty(&range)) { + res = LandInsert(&containingRange, spareLand, &range); + AVER(res == ResOK); + } + } + arena->spareCommitted += RangeSize(&range); + + /* Consider returning memory to the OS. */ + /* Purging spare memory can cause page descriptors to be unmapped, + causing ArenaCommitted to fall, so we can't be sure to unmap + enough in one pass. This somewhat contradicts the goal of having + spare committed memory, which is to reduce the amount of mapping + and unmapping, but we need to do this in order to be able to + check the spare committed invariant. */ + spareCommitted = ArenaSpareCommitted(arena); + while (spareCommitted > ArenaSpareCommitLimit(arena)) { + Size toPurge = spareCommitted - ArenaSpareCommitLimit(arena); + /* Purge at least half of the spare memory, not just the extra + sliver, so that we return a reasonable amount of memory in one + go, and avoid lots of small unmappings, each of which has an + overhead. */ + /* TODO: Consider making this time-based. */ + /* TODO: Consider making this smarter about the overheads tradeoff. */ + Size minPurge = ArenaSpareCommitted(arena) / 2; + Size newSpareCommitted; + if (toPurge < minPurge) + toPurge = minPurge; + VMPurgeSpare(arena, toPurge); + newSpareCommitted = ArenaSpareCommitted(arena); + AVER(newSpareCommitted < spareCommitted); + spareCommitted = newSpareCommitted; + } + AVER(ArenaCurrentSpare(arena) <= ArenaSpare(arena)); + + /* TODO: Chunks are only destroyed when ArenaCompact is called, and + that is only called from traceReclaim. Should consider destroying + chunks here. See job003815. */ +} + + +/* vmChunkCompact -- delete chunk if empty and not primary */ + +static Bool vmChunkCompact(Tree tree, void *closure) +{ + Chunk chunk; + Arena arena = closure; + + AVERT(Tree, tree); + + chunk = ChunkOfTree(tree); + AVERT(Chunk, chunk); + if(chunk != arena->primary + && BTIsResRange(chunk->allocTable, 0, chunk->pages)) + { + vmChunkDestroy(tree, UNUSED_POINTER); + return TRUE; + } else { + /* Keep this chunk. */ + return FALSE; + } +} + + +static void VMCompact(Arena arena, Trace trace) +{ + STATISTIC_DECL(Size vmem1) + + AVERT(Trace, trace); + + STATISTIC(vmem1 = ArenaReserved(arena)); + + /* Destroy chunks that are completely free, but not the primary + * chunk. + * TODO: add hysteresis here. See job003815. */ + TreeTraverseAndDelete(&arena->chunkTree, vmChunkCompact, arena); + + STATISTIC({ + Size vmem0 = trace->preTraceArenaReserved; + Size vmem2 = ArenaReserved(arena); + + /* VMCompact event: emit for collections where chunks were gained + * or lost during the collection. */ + if (vmem0 != vmem1 || vmem1 != vmem2) + EVENT3(VMCompact, vmem0, vmem1, vmem2); + }); +} + +mps_res_t mps_arena_vm_growth(mps_arena_t mps_arena, + size_t mps_desired, size_t mps_minimum) +{ + Arena arena = (Arena)mps_arena; + Size desired = (Size)mps_desired; + Size minimum = (Size)mps_minimum; + VMArena vmArena; + + ArenaEnter(arena); + + AVERT(Arena, arena); + vmArena = MustBeA(VMArena, arena); + + /* Must desire at least the minimum increment! */ + AVER(desired >= minimum); + + vmArena->extendBy = desired; + vmArena->extendMin = minimum; + + ArenaLeave(arena); + + return MPS_RES_OK; +} + + +/* VMArenaClass -- The VM arena class definition */ + +DEFINE_CLASS(Arena, VMArena, klass) +{ + INHERIT_CLASS(klass, VMArena, AbstractArena); + klass->instClassStruct.describe = VMArenaDescribe; + klass->size = sizeof(VMArenaStruct); + klass->varargs = VMArenaVarargs; + klass->create = VMArenaCreate; + klass->destroy = VMArenaDestroy; + klass->purgeSpare = VMPurgeSpare; + klass->grow = VMArenaGrow; + klass->free = VMFree; + klass->chunkInit = VMChunkInit; + klass->chunkFinish = VMChunkFinish; + klass->compact = VMCompact; + klass->pagesMarkAllocated = VMPagesMarkAllocated; + klass->chunkPageMapped = VMChunkPageMapped; + AVERT(ArenaClass, klass); +} + + +/* mps_arena_class_vm -- return the arena class VM */ + +mps_arena_class_t mps_arena_class_vm(void) +{ + return (mps_arena_class_t)CLASS(VMArena); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arg.c b/mps/code/arg.c new file mode 100644 index 00000000000..43fd4c9c415 --- /dev/null +++ b/mps/code/arg.c @@ -0,0 +1,240 @@ +/* arg.c: ARGUMENT LISTS + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#include "config.h" +#include "check.h" +#include "mpm.h" +#include "dbgpool.h" + +SRCID(arg, "$Id$"); + + +/* ArgCheckCant -- default argument checker + * + * This is a default value for the KeyStruct check field for keywords + * that don't have any meaningful checking they can do. + */ + +Bool ArgCheckCant(Arg arg) +{ + UNUSED(arg); + return TRUE; +} + +static Bool ArgCheckShouldnt(Arg arg) +{ + UNUSED(arg); + NOTREACHED; + return FALSE; +} + +Bool ArgCheckFormat(Arg arg) +{ + CHECKD(Format, arg->val.format); + return TRUE; +} + +Bool ArgCheckChain(Arg arg) +{ + CHECKD(Chain, arg->val.chain); + return TRUE; +} + +Bool ArgCheckSize(Arg arg) +{ + UNUSED(arg); /* TODO: Add and call SizeCheck */ + return TRUE; +} + +Bool ArgCheckAddr(Arg arg) +{ + UNUSED(arg); /* TODO: Add and call AddrCheck */ + return TRUE; +} + +Bool ArgCheckPoolDebugOptions(Arg arg) +{ + CHECKD_NOSIG(PoolDebugOptions, (PoolDebugOptions)arg->val.pool_debug_options); + return TRUE; +} + +Bool ArgCheckFun(Arg arg) +{ + /* TODO: Fix potential pun here on Harvard architectures where + function pointers are not compatible with other pointers. */ + CHECKL(FUNCHECK(arg->val.addr_method)); + return TRUE; +} + +Bool ArgCheckAlign(Arg arg) +{ + CHECKL(AlignCheck(arg->val.align)); + return TRUE; +} + +Bool ArgCheckBool(Arg arg) +{ + CHECKL(BoolCheck(arg->val.b)); + return TRUE; +} + +Bool ArgCheckCount(Arg arg) +{ + UNUSED(arg); /* TODO: Add and call CountCheck */ + return TRUE; +} + +Bool ArgCheckPointer(Arg arg) +{ + CHECKL(arg != NULL); + return TRUE; +} + +Bool ArgCheckRankSet(Arg arg) +{ + CHECKL(COMPATTYPE(RankSet, unsigned)); + CHECKL(RankSetCheck(arg->val.u)); + return TRUE; +} + +Bool ArgCheckRank(Arg arg) +{ + CHECKL(RankCheck(arg->val.rank)); + return TRUE; +} + +Bool ArgCheckdouble(Arg arg) +{ + /* Don't call isfinite() here because it's not in C89, and because + infinity is a valid value for MPS_KEY_PAUSE_TIME. */ + UNUSED(arg); + return TRUE; +} + +Bool ArgCheckPool(Arg arg) +{ + CHECKD(Pool, arg->val.pool); + return TRUE; +} + + +ARG_DEFINE_KEY(ARGS_END, Shouldnt); + +ArgStruct mps_args_none[] = {{MPS_KEY_ARGS_END, {0}}}; + + +/* KeyCheck -- check the validity of an argument key */ + +Bool KeyCheck(Key key) +{ + CHECKS(Key, key); + CHECKL(key->name != NULL); + CHECKL(FUNCHECK(key->check)); + return TRUE; +} + + +Bool ArgCheck(Arg arg) +{ + CHECKL(arg != NULL); + CHECKD(Key, arg->key); + CHECKL(arg->key->check(arg)); + return TRUE; +} + + +/* ArgCheck -- check the validity of an argument list */ + +Bool ArgListCheck(ArgList args) +{ + Index i; + CHECKL(args != NULL); + for (i = 0; args[i].key != MPS_KEY_ARGS_END; ++i) { + CHECKL(i < MPS_ARGS_MAX); + CHECKD_NOSIG(Arg, &args[i]); + } + return TRUE; +} + + +/* ArgPick -- try to pick an argument out of the argument list by keyword */ + +Bool ArgPick(ArgStruct *argOut, ArgList args, Key key) +{ + Index i; + + AVER(argOut != NULL); + AVERT(ArgList, args); + AVERT(Key, key); + + for (i = 0; args[i].key != MPS_KEY_ARGS_END; ++i) + if (args[i].key == key) + goto found; + return FALSE; + +found: + AVERT(Arg, &args[i]); + *argOut = args[i]; + for(;;) { + args[i] = args[i + 1]; + if (args[i].key == MPS_KEY_ARGS_END) + break; + ++i; + } + return TRUE; +} + + +/* ArgRequire -- take a required argument out of the argument list by keyword */ + +void ArgRequire(ArgStruct *argOut, ArgList args, Key key) +{ + Bool b = ArgPick(argOut, args, key); + ASSERT(b, key->name); +} + + +/* ArgTrivVarargs -- class method to ignore deprecated varargs */ + +void ArgTrivVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + UNUSED(varargs); + args[0].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/arg.h b/mps/code/arg.h new file mode 100644 index 00000000000..b764cb86f4b --- /dev/null +++ b/mps/code/arg.h @@ -0,0 +1,91 @@ +/* arg.h: Keyword argument lists + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef arg_h +#define arg_h + +#include "mpmtypes.h" + + +/* KeyStruct -- keyword argument structure + * + * NOTE: Whether or not to have an argument checking field and functions + * could be conditional on build variety. Checking arguments isn't on + * the critical path, but this might save space if the MPS is being + * deployed in a tight memory situation. + */ + +#define KeySig ((Sig)0x519CE111) /* SIGnature KEYyy */ +typedef struct mps_key_s { + Sig sig; /* design.mps.sig.field */ + const char *name; + Bool (*check)(Arg arg); +} KeyStruct; + +#define ARG_DEFINE_KEY(id, type) \ + extern const KeyStruct _mps_key_##id; \ + const KeyStruct _mps_key_##id = {KeySig, "MPS_KEY_" #id, ArgCheck##type} + +#define argsNone mps_args_none + +extern Bool KeyCheck(Key key); +extern Bool ArgCheck(Arg arg); +extern Bool ArgListCheck(ArgList args); + +extern Bool ArgPick(ArgStruct *argOut, ArgList args, Key key); +extern void ArgRequire(ArgStruct *argOut, ArgList args, Key key); +extern void ArgTrivVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs); + +extern Bool ArgCheckCant(Arg arg); +extern Bool ArgCheckFormat(Arg arg); +extern Bool ArgCheckChain(Arg arg); +extern Bool ArgCheckSize(Arg arg); +extern Bool ArgCheckAddr(Arg arg); +extern Bool ArgCheckPoolDebugOptions(Arg arg); +extern Bool ArgCheckFun(Arg arg); +extern Bool ArgCheckAlign(Arg arg); +extern Bool ArgCheckBool(Arg arg); +extern Bool ArgCheckCount(Arg arg); +extern Bool ArgCheckPointer(Arg arg); +extern Bool ArgCheckRankSet(Arg arg); +extern Bool ArgCheckRank(Arg arg); +extern Bool ArgCheckdouble(Arg arg); +extern Bool ArgCheckPool(Arg arg); + + +#endif /* arg_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/awlut.c b/mps/code/awlut.c new file mode 100644 index 00000000000..30631f02eb7 --- /dev/null +++ b/mps/code/awlut.c @@ -0,0 +1,396 @@ +/* awlut.c: POOL CLASS AWL UNIT TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: see + */ + +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpsavm.h" +#include "fmtdy.h" +#include "testlib.h" +#include "testthr.h" +#include "mpslib.h" +#include "mps.h" +#include "mpstd.h" + +#include /* printf */ +#include /* strlen */ + + +#define testArenaSIZE ((size_t)64<<20) +#define TABLE_SLOTS 49 +#define ITERATIONS 5000 +#define CHATTER 100 + + +static mps_word_t bogus_class; + +#define UNINIT 0x041412ED + +#define DYLAN_ALIGN 4 /* depends on value defined in fmtdy.c */ + + +/* size_tAlignUp -- align w up to alignment a */ + +#define size_tAlignUp(w, a) (((w) + (a) - 1) & ~((size_t)(a) - 1)) + + +static mps_word_t wrapper_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* Extra word */ + (mps_word_t)4<<2|2, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8), /* V */ + (mps_word_t)1<<2|1, /* VL */ + 1 /* patterns */ +}; + + +static mps_word_t string_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + 0, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|(mps_word_t)3<<3|4, /* V */ + 1 /* VL */ +}; + +static mps_word_t table_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + (mps_word_t)1<<2|1, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|2, /* V */ + 1 /* VL */ +}; + + +static void initialise_wrapper(mps_word_t *wrapper) +{ + wrapper[0] = (mps_word_t)&wrapper_wrapper; + wrapper[1] = (mps_word_t)&bogus_class; +} + + +/* alloc_string - create a dylan string object + * + * create a dylan string object (byte vector) whose contents + * are the string s (including the terminating NUL) + * .assume.dylan-obj + */ + +static mps_word_t *alloc_string(const char *s, mps_ap_t ap) +{ + size_t l; + size_t objsize; + void *p; + mps_word_t *object; + + l = strlen(s)+1; + /* number of words * sizeof word */ + objsize = (2 + (l+sizeof(mps_word_t)-1)/sizeof(mps_word_t)) + * sizeof(mps_word_t); + objsize = size_tAlignUp(objsize, DYLAN_ALIGN); + do { + size_t i; + char *s2; + + die(mps_reserve(&p, ap, objsize), "Reserve Leaf\n"); + object = p; + object[0] = (mps_word_t)string_wrapper; + object[1] = l << 2 | 1; + s2 = (char *)&object[2]; + for(i = 0; i < l; ++i) { + s2[i] = s[i]; + } + } while(!mps_commit(ap, p, objsize)); + return object; +} + + +/* alloc_table -- create a table with n variable slots + * + * .assume.dylan-obj + */ + +static mps_word_t *alloc_table(size_t n, mps_ap_t ap) +{ + size_t objsize; + void *p; + mps_word_t *object; + + objsize = (3 + n) * sizeof(mps_word_t); + objsize = size_tAlignUp(objsize, MPS_PF_ALIGN); + do { + size_t i; + + die(mps_reserve(&p, ap, objsize), "Reserve Table\n"); + object = p; + object[0] = (mps_word_t)table_wrapper; + object[1] = 0; + object[2] = n << 2 | 1; + for(i = 0; i < n; ++i) { + object[3+i] = 0; + } + } while(!mps_commit(ap, p, objsize)); + return object; +} + + +/* gets the nth slot from a table + * .assume.dylan-obj + */ +static mps_word_t *table_slot(mps_word_t *table, size_t n) +{ + return (mps_word_t *)table[3+n]; +} + + +/* sets the nth slot in a table + * .assume.dylan-obj + */ +static void set_table_slot(mps_word_t *table, size_t n, mps_word_t *p) +{ + cdie(table[0] == (mps_word_t)table_wrapper, "set_table_slot"); + table[3+n] = (mps_word_t)p; +} + + +/* links two tables together via their link slot + * (1st fixed part slot) + */ +static void table_link(mps_word_t *t1, mps_word_t *t2) +{ + cdie(t1[0] == (mps_word_t)table_wrapper, "table_link 1"); + cdie(t2[0] == (mps_word_t)table_wrapper, "table_link 2"); + t1[1] = (mps_word_t)t2; + t2[1] = (mps_word_t)t1; +} + + +typedef struct tables_s { + mps_arena_t arena; + mps_word_t *weaktable; + mps_word_t *exacttable; + mps_word_t *preserve[TABLE_SLOTS]; /* preserves objects in the weak */ + /* table by referring to them */ + mps_ap_t weakap, exactap, bogusap, leafap; +} tables_s, *tables_t; + + +/* populate -- populate the weak table in a thread + * + * We use a thread to populate the table to avoid leaving any + * references to objects in the table in registers, so that we can + * test their weakness properly. + */ + +static void *populate(void *state) +{ + tables_t tables = state; + size_t i; + mps_thr_t me; + mps_root_t root; + + die(mps_thread_reg(&me, tables->arena), "mps_thread_reg(populate)"); + die(mps_root_create_thread(&root, tables->arena, me, &state), "mps_root_create_thread(populate)"); + + tables->exacttable = alloc_table(TABLE_SLOTS, tables->exactap); + tables->weaktable = alloc_table(TABLE_SLOTS, tables->weakap); + table_link(tables->exacttable, tables->weaktable); + + for(i = 0; i < TABLE_SLOTS; ++i) { + mps_word_t *string; + if (rnd() % 2 == 0) { + string = alloc_string("iamalive", tables->leafap); + tables->preserve[i] = string; + } else { + string = alloc_string("iamdead", tables->leafap); + tables->preserve[i] = 0; + } + set_table_slot(tables->weaktable, i, string); + string = alloc_string("iamexact", tables->leafap); + set_table_slot(tables->exacttable, i, string); + } + + mps_root_destroy(root); + mps_thread_dereg(me); + + return NULL; +} + +static void test(mps_arena_t arena, + mps_ap_t leafap, mps_ap_t exactap, mps_ap_t weakap, + mps_ap_t bogusap) +{ + tables_s tables; + size_t i, j; + testthr_t thr; + void *p; + + /* Leave bogusap between reserve and commit for the duration */ + die(mps_reserve(&p, bogusap, 64), "Reserve bogus"); + + tables.arena = arena; + tables.exactap = exactap; + tables.weakap = weakap; + tables.leafap = leafap; + tables.bogusap = bogusap; + + /* We using a thread for its pararallel execution, so just create + and wait for it to finish. */ + testthr_create(&thr, populate, &tables); + testthr_join(&thr, NULL); + + for(j = 0; j < ITERATIONS; ++j) { + for(i = 0; i < TABLE_SLOTS; ++i) { + (void)alloc_string("spong", leafap); + } + } + + die(mps_arena_collect(arena), "mps_arena_collect"); + mps_arena_release(arena); + + for(i = 0; i < TABLE_SLOTS; ++i) { + if (tables.preserve[i] == 0) { + if (table_slot(tables.weaktable, i)) { + error("Strongly unreachable weak table entry found, " + "slot %"PRIuLONGEST".\n", (ulongest_t)i); + } else { + if (table_slot(tables.exacttable, i) != 0) { + error("Weak table entry deleted, but corresponding " + "exact table entry not deleted, slot %"PRIuLONGEST".\n", + (ulongest_t)i); + } + } + } + } + + (void)mps_commit(bogusap, p, 64); +} + + +/* setup -- set up pools for the test + * + * guff serves two purposes: + * - a pseudo stack base for the stack root. + * - pointer to a guff structure, which packages some values needed + * (arena and thr mostly) + */ + +struct guff_s { + mps_arena_t arena; + mps_thr_t thr; +}; + +ATTRIBUTE_NOINLINE +static void setup(struct guff_s *guff) +{ + mps_arena_t arena; + mps_pool_t leafpool; + mps_pool_t tablepool; + mps_fmt_t dylanfmt; + mps_fmt_t dylanweakfmt; + mps_ap_t leafap, exactap, weakap, bogusap; + mps_root_t stack; + mps_thr_t thr; + + arena = guff->arena; + thr = guff->thr; + + die(mps_root_create_thread(&stack, arena, thr, guff), + "Root Create\n"); + die(mps_fmt_create_A(&dylanfmt, arena, dylan_fmt_A()), + "Format Create\n"); + die(mps_fmt_create_A(&dylanweakfmt, arena, dylan_fmt_A_weak()), + "Format Create (weak)\n"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, dylanfmt); + die(mps_pool_create_k(&leafpool, arena, mps_class_lo(), args), + "Leaf Pool Create\n"); + } MPS_ARGS_END(args); + die(mps_pool_create(&tablepool, arena, mps_class_awl(), dylanweakfmt, + dylan_weak_dependent), + "Table Pool Create\n"); + die(mps_ap_create(&leafap, leafpool, mps_rank_exact()), + "Leaf AP Create\n"); + die(mps_ap_create(&exactap, tablepool, mps_rank_exact()), + "Exact AP Create\n"); + die(mps_ap_create(&weakap, tablepool, mps_rank_weak()), + "Weak AP Create\n"); + die(mps_ap_create(&bogusap, tablepool, mps_rank_exact()), + "Bogus AP Create\n"); + + test(arena, leafap, exactap, weakap, bogusap); + + mps_ap_destroy(bogusap); + mps_ap_destroy(weakap); + mps_ap_destroy(exactap); + mps_ap_destroy(leafap); + mps_pool_destroy(tablepool); + mps_pool_destroy(leafpool); + mps_fmt_destroy(dylanweakfmt); + mps_fmt_destroy(dylanfmt); + mps_root_destroy(stack); +} + + +int main(int argc, char *argv[]) +{ + struct guff_s guff; + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + initialise_wrapper(wrapper_wrapper); + initialise_wrapper(string_wrapper); + initialise_wrapper(table_wrapper); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create\n"); + die(mps_thread_reg(&thread, arena), "thread_reg"); + guff.arena = arena; + guff.thr = thread; + setup(&guff); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/awluthe.c b/mps/code/awluthe.c new file mode 100644 index 00000000000..0cd848006f6 --- /dev/null +++ b/mps/code/awluthe.c @@ -0,0 +1,398 @@ +/* awluthe.c: POOL CLASS AWL UNIT TEST WITH OBJECT HEADERS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: see + */ + +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpsavm.h" +#include "fmthe.h" +#include "fmtdy.h" +#include "testlib.h" +#include "testthr.h" +#include "mpslib.h" +#include "mps.h" +#include "mpstd.h" + +#include /* strlen */ +#include /* printf */ + + +#define testArenaSIZE ((size_t)64<<20) +#define TABLE_SLOTS 49 +#define ITERATIONS 5000 +#define CHATTER 100 + + +static mps_word_t bogus_class; + +#define UNINIT 0x041412ED + +#define DYLAN_ALIGN 4 /* depends on value defined in fmtdy.c */ + + +/* size_tAlignUp -- align w up to alignment a */ + +#define size_tAlignUp(w, a) (((w) + (a) - 1) & ~((size_t)(a) - 1)) + + +static mps_word_t wrapper_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* Extra word */ + (mps_word_t)4<<2|2, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8), /* V */ + (mps_word_t)1<<2|1, /* VL */ + 1 /* patterns */ +}; + + +static mps_word_t string_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + 0, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|(mps_word_t)3<<3|4, /* V */ + 1 /* VL */ +}; + +static mps_word_t table_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + (mps_word_t)1<<2|1, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|2, /* V */ + 1 /* VL */ +}; + + +static void initialise_wrapper(mps_word_t *wrapper) +{ + wrapper[0] = (mps_word_t)&wrapper_wrapper; + wrapper[1] = (mps_word_t)&bogus_class; +} + + +/* alloc_string - create a dylan string object + * + * create a dylan string object (byte vector) whose contents + * are the string s (including the terminating NUL) + * .assume.dylan-obj + */ + +static mps_word_t *alloc_string(const char *s, mps_ap_t ap) +{ + size_t l; + size_t objsize; + void *p; + mps_word_t *object; + + l = strlen(s)+1; + /* number of words * sizeof word */ + objsize = (2 + (l+sizeof(mps_word_t)-1)/sizeof(mps_word_t)) + * sizeof(mps_word_t); + objsize = size_tAlignUp(objsize, DYLAN_ALIGN); + do { + size_t i; + char *s2; + + die(mps_reserve(&p, ap, objsize + headerSIZE), "Reserve Leaf\n"); + object = (mps_word_t *)((char *)p + headerSIZE); + object[0] = (mps_word_t)string_wrapper; + object[1] = l << 2 | 1; + s2 = (char *)&object[2]; + for(i = 0; i < l; ++i) { + s2[i] = s[i]; + } + ((int*)p)[0] = realHeader; + ((int*)p)[1] = 0xED0ED; + } while(!mps_commit(ap, p, objsize + headerSIZE)); + return object; +} + + +/* alloc_table -- create a table with n variable slots + * + * .assume.dylan-obj + */ + +static mps_word_t *alloc_table(size_t n, mps_ap_t ap) +{ + size_t objsize; + void *p; + mps_word_t *object; + + objsize = (3 + n) * sizeof(mps_word_t); + objsize = size_tAlignUp(objsize, MPS_PF_ALIGN); + do { + size_t i; + + die(mps_reserve(&p, ap, objsize + headerSIZE), "Reserve Table\n"); + object = (mps_word_t *)((char *)p + headerSIZE); + object[0] = (mps_word_t)table_wrapper; + object[1] = 0; + object[2] = n << 2 | 1; + for(i = 0; i < n; ++i) { + object[3+i] = 0; + } + ((int*)p)[0] = realHeader; + ((int*)p)[1] = 0xED0ED; + } while(!mps_commit(ap, p, objsize + headerSIZE)); + return object; +} + + +/* gets the nth slot from a table + * .assume.dylan-obj + */ +static mps_word_t *table_slot(mps_word_t *table, size_t n) +{ + return (mps_word_t *)table[3+n]; +} + + +/* sets the nth slot in a table + * .assume.dylan-obj + */ +static void set_table_slot(mps_word_t *table, size_t n, mps_word_t *p) +{ + cdie(table[0] == (mps_word_t)table_wrapper, "set_table_slot"); + table[3+n] = (mps_word_t)p; +} + + +/* links two tables together via their link slot + * (1st fixed part slot) + */ +static void table_link(mps_word_t *t1, mps_word_t *t2) +{ + cdie(t1[0] == (mps_word_t)table_wrapper, "table_link 1"); + cdie(t2[0] == (mps_word_t)table_wrapper, "table_link 2"); + t1[1] = (mps_word_t)t2; + t2[1] = (mps_word_t)t1; +} + + +typedef struct tables_s { + mps_arena_t arena; + mps_word_t *weaktable; + mps_word_t *exacttable; + mps_word_t *preserve[TABLE_SLOTS]; /* preserves objects in the weak */ + /* table by referring to them */ + mps_ap_t weakap, exactap, bogusap, leafap; +} tables_s, *tables_t; + +/* populate -- populate the weak table in a thread + * + * We use a thread to populate the table to avoid leaving any + * references to objects in the table in registers, so that we can + * test their weakness properly. + */ + +static void *populate(void *state) +{ + tables_t tables = state; + size_t i; + mps_thr_t me; + mps_root_t root; + + die(mps_thread_reg(&me, tables->arena), "mps_thread_reg(populate)"); + die(mps_root_create_thread(&root, tables->arena, me, &state), "mps_root_create_thread(populate)"); + + tables->exacttable = alloc_table(TABLE_SLOTS, tables->exactap); + tables->weaktable = alloc_table(TABLE_SLOTS, tables->weakap); + table_link(tables->exacttable, tables->weaktable); + + for(i = 0; i < TABLE_SLOTS; ++i) { + mps_word_t *string; + if (rnd() % 2 == 0) { + string = alloc_string("iamalive", tables->leafap); + tables->preserve[i] = string; + } else { + string = alloc_string("iamdead", tables->leafap); + tables->preserve[i] = 0; + } + set_table_slot(tables->weaktable, i, string); + string = alloc_string("iamexact", tables->leafap); + set_table_slot(tables->exacttable, i, string); + } + + mps_root_destroy(root); + mps_thread_dereg(me); + + return NULL; +} + +static void test(mps_arena_t arena, + mps_ap_t leafap, mps_ap_t exactap, mps_ap_t weakap, + mps_ap_t bogusap) +{ + tables_s tables; + size_t i, j; + testthr_t thr; + void *p; + + /* Leave bogusap between reserve and commit for the duration */ + die(mps_reserve(&p, bogusap, 64), "Reserve bogus"); + + tables.arena = arena; + tables.exactap = exactap; + tables.weakap = weakap; + tables.leafap = leafap; + tables.bogusap = bogusap; + + /* We using a thread for its pararallel execution, so just create + and wait for it to finish. */ + testthr_create(&thr, populate, &tables); + testthr_join(&thr, NULL); + + for(j = 0; j < ITERATIONS; ++j) { + for(i = 0; i < TABLE_SLOTS; ++i) { + (void)alloc_string("spong", leafap); + } + } + + die(mps_arena_collect(arena), "mps_arena_collect"); + mps_arena_release(arena); + + for(i = 0; i < TABLE_SLOTS; ++i) { + if (tables.preserve[i] == 0) { + if (table_slot(tables.weaktable, i)) { + error("Strongly unreachable weak table entry found, " + "slot %"PRIuLONGEST".\n", (ulongest_t)i); + } else { + if (table_slot(tables.exacttable, i) != 0) { + error("Weak table entry deleted, but corresponding " + "exact table entry not deleted, slot %"PRIuLONGEST".\n", + (ulongest_t)i); + } + } + } + } + + (void)mps_commit(bogusap, p, 64); +} + + +/* setup -- set up pools for the test + * + * guff serves two purposes: + * - a pseudo stack base for the stack root. + * - pointer to a guff structure, which packages some values needed + * (arena and thr mostly) + */ + +struct guff_s { + mps_arena_t arena; + mps_thr_t thr; +}; + +ATTRIBUTE_NOINLINE +static void setup(struct guff_s *guff) +{ + mps_arena_t arena; + mps_pool_t leafpool; + mps_pool_t tablepool; + mps_fmt_t dylanfmt; + mps_fmt_t dylanweakfmt; + mps_ap_t leafap, exactap, weakap, bogusap; + mps_root_t stack; + mps_thr_t thr; + + arena = guff->arena; + thr = guff->thr; + + die(mps_root_create_thread(&stack, arena, thr, guff), + "Root Create\n"); + die(EnsureHeaderFormat(&dylanfmt, arena), "EnsureHeaderFormat"); + die(EnsureHeaderWeakFormat(&dylanweakfmt, arena), "EnsureHeaderWeakFormat"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, dylanfmt); + die(mps_pool_create_k(&leafpool, arena, mps_class_lo(), args), + "Leaf Pool Create\n"); + } MPS_ARGS_END(args); + die(mps_pool_create(&tablepool, arena, mps_class_awl(), dylanweakfmt, + dylan_weak_dependent), + "Table Pool Create\n"); + die(mps_ap_create(&leafap, leafpool, mps_rank_exact()), + "Leaf AP Create\n"); + die(mps_ap_create(&exactap, tablepool, mps_rank_exact()), + "Exact AP Create\n"); + die(mps_ap_create(&weakap, tablepool, mps_rank_weak()), + "Weak AP Create\n"); + die(mps_ap_create(&bogusap, tablepool, mps_rank_exact()), + "Bogus AP Create\n"); + + test(arena, leafap, exactap, weakap, bogusap); + + mps_ap_destroy(bogusap); + mps_ap_destroy(weakap); + mps_ap_destroy(exactap); + mps_ap_destroy(leafap); + mps_pool_destroy(tablepool); + mps_pool_destroy(leafpool); + mps_fmt_destroy(dylanweakfmt); + mps_fmt_destroy(dylanfmt); + mps_root_destroy(stack); +} + + +int main(int argc, char *argv[]) +{ + struct guff_s guff; + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + initialise_wrapper(wrapper_wrapper); + initialise_wrapper(string_wrapper); + initialise_wrapper(table_wrapper); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create\n"); + die(mps_thread_reg(&thread, arena), "thread_reg"); + guff.arena = arena; + guff.thr = thread; + setup(&guff); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/awlutth.c b/mps/code/awlutth.c new file mode 100644 index 00000000000..ac74335af67 --- /dev/null +++ b/mps/code/awlutth.c @@ -0,0 +1,355 @@ +/* awlutth.c: THREADING UNIT TEST USING POOL CLASS AWL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: see + */ + +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpsavm.h" +#include "fmtdy.h" +#include "testlib.h" +#include "testthr.h" +#include "mpslib.h" +#include "mps.h" +#include "mpstd.h" + +#include /* printf, puts */ +#include /* strlen */ + + +#define testArenaSIZE ((size_t)64<<20) +#define TABLE_SLOTS 50 +#define ITERATIONS 5000 +#define CHATTER 100 +/* The number that a half of all numbers generated from rnd are less + * than. Hence, probability a-half, or P a-half */ +/* see */ +#define P_A_HALF (1024uL*1024uL*1024uL - 1) /* 2^30 - 1 */ + + +static mps_word_t bogus_class; + +#define UNINIT 0x041412ED + +#define DYLAN_ALIGN 4 /* depends on value defined in fmtdy.c */ + + +static mps_word_t wrapper_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* Extra word */ + (mps_word_t)4<<2|2, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8), /* V (version 2) */ + (mps_word_t)1<<2|1, /* VL */ + 1 /* patterns */ +}; + + +static mps_word_t string_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + 0, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|(mps_word_t)3<<3|4, /* V */ + 1 /* VL */ +}; + +static mps_word_t table_wrapper[] = { + UNINIT, /* wrapper */ + UNINIT, /* class */ + 0, /* extra word */ + (mps_word_t)1<<2|1, /* F */ + (mps_word_t)2<<(MPS_WORD_WIDTH - 8)|2, /* V */ + 1 /* VL */ +}; + + +static void initialise_wrapper(mps_word_t *wrapper) +{ + wrapper[0] = (mps_word_t)&wrapper_wrapper; + wrapper[1] = (mps_word_t)&bogus_class; +} + + +/* create a dylan string object (byte vector) whose contents + * are the string s (including the terminating NUL) + * .assume.dylan-obj */ +static mps_word_t *alloc_string(const char *s, mps_ap_t ap) +{ + size_t l; + size_t objsize; + void *p; + mps_word_t *object; + + l = strlen(s)+1; + /* number of words * sizeof word */ + objsize = (2 + (l+sizeof(mps_word_t)-1)/sizeof(mps_word_t)) * + sizeof(mps_word_t); + objsize = (objsize + DYLAN_ALIGN-1)/DYLAN_ALIGN*DYLAN_ALIGN; + do { + size_t i; + char *s2; + + die(mps_reserve(&p, ap, objsize), "Reserve Leaf\n"); + object = p; + object[0] = (mps_word_t)string_wrapper; + object[1] = l << 2 | 1; + s2 = (char *)&object[2]; + for(i = 0; i < l; ++i) { + s2[i] = s[i]; + } + } while(!mps_commit(ap, p, objsize)); + return object; +} + + +/* alloc_table -- create a table with n variable slots + * + * .assume.dylan-obj + */ +static mps_word_t *alloc_table(unsigned long n, mps_ap_t ap) +{ + size_t objsize; + void *p; + mps_word_t *object; + objsize = (3 + n) * sizeof(mps_word_t); + objsize = (objsize + MPS_PF_ALIGN-1)/MPS_PF_ALIGN*MPS_PF_ALIGN; + do { + unsigned long i; + + die(mps_reserve(&p, ap, objsize), "Reserve Table\n"); + object = p; + object[0] = (mps_word_t)table_wrapper; + object[1] = 0; + object[2] = n << 2 | 1; + for(i = 0; i < n; ++i) { + object[3+i] = 0; + } + } while(!mps_commit(ap, p, objsize)); + return object; +} + + +/* gets the nth slot from a table + * .assume.dylan-obj + */ +static mps_word_t *table_slot(mps_word_t *table, unsigned long n) +{ + return (mps_word_t *)table[3+n]; +} + +/* sets the nth slot in a table + * .assume.dylan-obj + */ +static void set_table_slot(mps_word_t *table, + unsigned long n, mps_word_t *p) +{ + cdie(table[0] == (mps_word_t)table_wrapper, "set_table_slot"); + table[3+n] = (mps_word_t)p; +} + +/* links two tables together via their link slot + * (1st fixed part slot) + */ +static void table_link(mps_word_t *t1, mps_word_t *t2) +{ + cdie(t1[0] == (mps_word_t)table_wrapper, "table_link 1"); + cdie(t2[0] == (mps_word_t)table_wrapper, "table_link 2"); + t1[1] = (mps_word_t)t2; + t2[1] = (mps_word_t)t1; +} + + +static void test(mps_ap_t leafap, mps_ap_t exactap, mps_ap_t weakap, + mps_ap_t bogusap) +{ + mps_word_t *weaktable; + mps_word_t *exacttable; + mps_word_t *preserve[TABLE_SLOTS]; /* preserves objects in the weak */ + /* table by referring to them */ + unsigned long i, j; + void *p; + + exacttable = alloc_table(TABLE_SLOTS, exactap); + weaktable = alloc_table(TABLE_SLOTS, weakap); + table_link(exacttable, weaktable); + + /* Leave bogusap between reserve and commit for the duration */ + die(mps_reserve(&p, bogusap, 64), "Reserve bogus"); + + for(i = 0; i < TABLE_SLOTS; ++i) { + mps_word_t *string; + if(rnd() < P_A_HALF) { + string = alloc_string("iamalive", leafap); + preserve[i] = string; + } else { + string = alloc_string("iamdead", leafap); + preserve[i] = 0; + } + set_table_slot(weaktable, i, string); + string = alloc_string("iamexact", leafap); + set_table_slot(exacttable, i, string); + } + + for(j = 0; j < ITERATIONS; ++j) { + for(i = 0; i < TABLE_SLOTS; ++i) { + mps_word_t *string; + + string = alloc_string("spong", leafap); + UNUSED(string); + } + } + + for(i = 0; i < TABLE_SLOTS; ++i) { + if(preserve[i] == 0) { + if(table_slot(weaktable, i)) { + fprintf(stdout, + "Strongly unreachable weak table entry found, " + "slot %lu.\n", + i); + } else { + if(table_slot(exacttable, i) != 0) { + fprintf(stdout, + "Weak table entry deleted, but corresponding " + "exact table entry not deleted, slot %lu.\n", + i); + } + } + } + } + + (void)mps_commit(bogusap, p, 64); + puts("A okay\n"); +} + + +struct guff_s { + mps_arena_t arena; + mps_thr_t thr; +}; + +/* guff serves two purposes: + * A pseudo stack base for the stack root. + * Pointer to a guff structure, which packages some values needed + * (arena and thr mostly) */ +ATTRIBUTE_NOINLINE +static void setup(struct guff_s *guff) +{ + mps_arena_t arena; + mps_pool_t leafpool; + mps_pool_t tablepool; + mps_fmt_t dylanfmt; + mps_fmt_t dylanweakfmt; + mps_ap_t leafap, exactap, weakap, bogusap; + mps_root_t stack; + mps_thr_t thr; + + arena = guff->arena; + thr = guff->thr; + + die(mps_root_create_thread(&stack, arena, thr, guff), + "Root Create\n"); + die(mps_fmt_create_A(&dylanfmt, arena, dylan_fmt_A()), + "Format Create\n"); + die(mps_fmt_create_A(&dylanweakfmt, arena, dylan_fmt_A_weak()), + "Format Create (weak)\n"); + die(mps_pool_create(&leafpool, arena, mps_class_lo(), dylanfmt), + "Leaf Pool Create\n"); + die(mps_pool_create(&tablepool, arena, mps_class_awl(), dylanweakfmt, + dylan_weak_dependent), + "Table Pool Create\n"); + die(mps_ap_create(&leafap, leafpool, mps_rank_exact()), + "Leaf AP Create\n"); + die(mps_ap_create(&exactap, tablepool, mps_rank_exact()), + "Exact AP Create\n"); + die(mps_ap_create(&weakap, tablepool, mps_rank_weak()), + "Weak AP Create\n"); + die(mps_ap_create(&bogusap, tablepool, mps_rank_exact()), + "Bogus AP Create\n"); + + test(leafap, exactap, weakap, bogusap); + + mps_ap_destroy(bogusap); + mps_ap_destroy(weakap); + mps_ap_destroy(exactap); + mps_ap_destroy(leafap); + mps_pool_destroy(tablepool); + mps_pool_destroy(leafpool); + mps_fmt_destroy(dylanweakfmt); + mps_fmt_destroy(dylanfmt); + mps_root_destroy(stack); +} + + +static void *setup_thr(void *v) +{ + struct guff_s guff; + mps_arena_t arena = v; + mps_thr_t thread; + + die(mps_thread_reg(&thread, arena), "thread_reg"); + guff.arena = arena; + guff.thr = thread; + setup(&guff); + mps_thread_dereg(thread); + + return NULL; +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + testthr_t thread1; + + testlib_init(argc, argv); + + initialise_wrapper(wrapper_wrapper); + initialise_wrapper(string_wrapper); + initialise_wrapper(table_wrapper); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create\n"); + testthr_create(&thread1, setup_thr, arena); + setup_thr(arena); + testthr_join(&thread1, NULL); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/boot.c b/mps/code/boot.c new file mode 100644 index 00000000000..143e84c70d8 --- /dev/null +++ b/mps/code/boot.c @@ -0,0 +1,155 @@ +/* boot.c: BOOTSTRAP ALLOCATOR + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .overview: A structure and protocols for allocating memory from a + * given block. Very simple, it basically just increments a pointer. + * + * .boot.c: The Bootstrap Allocator is used to allocate C structures + * for use in the implementation, not client objects. Therefore, + * we use "C types" (void *, size_t) not "client types" (Addr, Size). + */ + +#include "boot.h" +#include "mpm.h" + +SRCID(boot, "$Id$"); + + +#define BootBlockSig ((Sig)0x519B002B) /* SIGnature BOOT Block */ + + +/* BootBlockCheck -- check a BootBlock structure */ + +Bool BootBlockCheck(BootBlock boot) +{ + CHECKS(BootBlock, boot); + CHECKL(boot->base != NULL); + CHECKL(boot->alloc != NULL); + CHECKL(boot->limit != NULL); + CHECKL(boot->base <= boot->alloc); + CHECKL(boot->alloc <= boot->limit); + CHECKL(boot->base < boot->limit); + + return TRUE; +} + + +/* BootBlockInit -- initialize a BootBlock + * + * boot: a pointer to the structure to be initialized + * (must have been allocated by the caller, probably on the stack). + * base: a pointer to the base of the memory to be allocated from + * from (the memory need not be committed) + * limit: a pointer to the limit of the memory to be allocated from + */ + +Res BootBlockInit(BootBlockStruct *boot, void *base, void *limit) +{ + /* Can't check boot as we are supposed to be initializing it */ + AVER(boot != NULL); + AVER(base != NULL); + AVER(limit != NULL); + AVER(base < limit); + + boot->base = base; + boot->alloc = base; + boot->limit = limit; + boot->sig = BootBlockSig; + + AVERT(BootBlock, boot); + return ResOK; +} + + +/* BootBlockFinish -- finish a BootBlock structure */ + +void BootBlockFinish(BootBlock boot) +{ + AVERT(BootBlock, boot); + + boot->base = boot->alloc = boot->limit = NULL; + boot->sig = SigInvalid; +} + + +/* BootAllocated + * + * Returns the total amount allocated using this descriptor + */ +size_t BootAllocated(BootBlock boot) +{ + AVERT(BootBlock, boot); + + return PointerOffset(boot->base, boot->alloc); +} + + +/* BootAlloc -- allocate from BootBlock structure + * + * preturn: The returned pointer, see .boot.c. + * boot: must have been initialized with BootBlockInit(). + * size: size of requested object, see .boot.c. + * align: required alignment of object, see .boot.c. + */ + +Res BootAlloc(void **pReturn, BootBlock boot, size_t size, size_t align) +{ + void *blockBase, *blockLimit; /* base, limit of candidate block */ + + AVER(pReturn != NULL); + AVERT(BootBlock, boot); + AVER(size > 0); + AVERT(Align, (Align)align); + + /* Align alloc pointer up and bounds check. */ + blockBase = PointerAlignUp(boot->alloc, align); + if(boot->limit <= blockBase || blockBase < boot->alloc) { + return ResMEMORY; + } + blockLimit = PointerAdd(blockBase, size); + /* Following checks that the ordering constraint holds: */ + /* boot->alloc <= blockBase < blockLimit <= boot->limit */ + /* (if it doesn't hold then something overallocated/wrapped round) */ + if(blockBase < boot->alloc || + blockLimit <= blockBase || + boot->limit < blockLimit) { + return ResMEMORY; + } + + /* Fits! So allocate it */ + boot->alloc = blockLimit; + *pReturn = blockBase; + return ResOK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/boot.h b/mps/code/boot.h new file mode 100644 index 00000000000..eb8532cb742 --- /dev/null +++ b/mps/code/boot.h @@ -0,0 +1,64 @@ +/* boot.h: BOOTSTRAP ALLOCATOR INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .overview: A protocol for allocating memory from a given block. + */ + +#ifndef boot_h +#define boot_h + +#include "mpmtypes.h" + + +/* BootBlockStruct -- descriptor of the block to allocate from */ + +typedef struct BootBlockStruct +{ + Sig sig; /* design.mps.sig.field */ + void *base; + void *alloc; + void *limit; +} BootBlockStruct; + + +extern Res BootBlockInit(BootBlockStruct *boot, void *base, void *limit); +extern void BootBlockFinish(BootBlock boot); +extern Res BootAlloc(void **pReturn, BootBlock boot, size_t size, + size_t align); +extern size_t BootAllocated(BootBlock boot); +extern Bool BootBlockCheck(BootBlock boot); + + +#endif /* boot_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/bt.c b/mps/code/bt.c new file mode 100644 index 00000000000..7cc9cbd41b0 --- /dev/null +++ b/mps/code/bt.c @@ -0,0 +1,1064 @@ +/* bt.c: BIT TABLES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * READERSHIP + * + * .readership: Any MPS developer + * + * DESIGN + * + * .design: see + * + * .aver.critical: The function BTIsResRange (and anything it calls) + * is on the critical path because it is + * called by NailboardIsResRange, which is called for every object in + * a nailboarded segment when the segment is scanned or reclaimed; see + * . + */ + +#include "bt.h" +#include "config.h" +#include "check.h" +#include "mpm.h" + +SRCID(bt, "$Id$"); + + +/* BTIndexAlignUp, BTIndexAlignDown -- Align bit-table indices + * + * Align bit-table indices up and down to word boundaries + */ + +#define BTIndexAlignUp(index) (IndexAlignUp((index), MPS_WORD_WIDTH)) +#define BTIndexAlignDown(index) (IndexAlignDown((index), MPS_WORD_WIDTH)) + + +/* BTMask -- generate sub-word masks + * + * Create a mask with only specified bits set + */ + +/* Return a word mask of bits set only from base and above */ +#define BTMaskLow(base) (~(Word)0 << (base)) + +/* Return a word mask of bits set only below limit */ +#define BTMaskHigh(limit) (~(Word)0 >> (MPS_WORD_WIDTH - (limit))) + +/* Return a word mask of bits set only in requested range */ +#define BTMask(base,limit) (BTMaskHigh((limit)) & BTMaskLow((base))) + + +/* BTWordIndex, BTBitIndex -- Decode BT indexes + * + * Return word and bit indexes from index + */ + +#define BTWordIndex(index) ((index) >> MPS_WORD_SHIFT) +#define BTBitIndex(index) ((index) & (MPS_WORD_WIDTH - 1)) + + +/* BTIsSmallRange -- test range size + * + * Predicate to determine whether a range is sufficiently small + * that it's not worth trying to separate words and odd bits. + * The choice of what counts as "sufficiently small" is made + * for efficiency reasons. Empirical evidence indicates that + * a good choice is ranges of size 6 or less. + */ + +#define BTIsSmallRange(base,limit) ((base) + 6 >= (limit)) + + +/* ACT_ON_RANGE -- macro to act on a base-limit range + * + * Three actions should be provided: + * - single_action(btIndex) - operates on a single bit + * - bits_action(wordIndex, base, limit) -- operates on part-words + * - word_action(wordIndex) -- Operates on full words in range + * WORD_ACTIONs should not use break or continue. + * + * If the range is small enough it will be processed a single + * bit at a time. Larger ranges are processed as words where + * possible, and part-words for boundary bits. + */ + +#define ACT_ON_RANGE(base,limit,single_action, \ + bits_action,word_action) \ + BEGIN \ + if (BTIsSmallRange((base), (limit))) { \ + /* Small ranges are processed most efficiently bit-by-bit */ \ + Index actBit; \ + for (actBit = (base); actBit < (limit); ++actBit) { \ + single_action(actBit); \ + } \ + } else { \ + Index actInnerBase = BTIndexAlignUp((base)); \ + if (actInnerBase > (limit)) { /* no inner range */ \ + /* Must have base < limit otherwise caught by small range case */ \ + /* And see .aver.critical. */ \ + AVER_CRITICAL((base) < (limit)); \ + bits_action(BTWordIndex((base)), \ + BTBitIndex((base)), \ + BTBitIndex((limit))); \ + } else { \ + Index actInnerLimit = BTIndexAlignDown((limit)); \ + Index actWordIndex, actWordBase, actWordLimit; \ +\ + actWordBase = BTWordIndex(actInnerBase); \ + actWordLimit = BTWordIndex(actInnerLimit); \ +\ + if ((base) < actInnerBase) { \ + bits_action(actWordBase-1, \ + BTBitIndex((base)), \ + MPS_WORD_WIDTH); \ + } \ +\ + for (actWordIndex = actWordBase; actWordIndex < actWordLimit; \ + ++actWordIndex) { \ + word_action(actWordIndex); \ + } \ +\ + if ((limit) > actInnerLimit) { \ + bits_action(actWordLimit, 0, BTBitIndex((limit))); \ + } \ + } \ + } \ + END + + +/* ACT_ON_RANGE_HIGH -- macro to act on a base-limit range + * + * in reverse order. Usage as for ACT_ON_RANGE + */ + +#define ACT_ON_RANGE_HIGH(base,limit,single_action, \ + bits_action,word_action) \ + BEGIN \ + if (BTIsSmallRange((base), (limit))) { \ + /* Small ranges are processed most efficiently bit-by-bit */ \ + Index actBit; \ + for (actBit = (limit); actBit > (base); --actBit) { \ + single_action(actBit - 1); \ + } \ + } else { \ + Index actInnerBase = BTIndexAlignUp((base)); \ + if (actInnerBase > (limit)) { /* no inner range */ \ + AVER((base) < (limit)); /* caught by small range case */ \ + bits_action(BTWordIndex((base)), \ + BTBitIndex((base)), \ + BTBitIndex((limit))); \ + } else { \ + Index actInnerLimit = BTIndexAlignDown((limit)); \ + Index actWordIndex, actWordBase, actWordLimit; \ +\ + actWordBase = BTWordIndex(actInnerBase); \ + actWordLimit = BTWordIndex(actInnerLimit); \ +\ + if ((limit) > actInnerLimit) { \ + bits_action(actWordLimit, 0, BTBitIndex((limit))); \ + } \ +\ + for (actWordIndex = actWordLimit; actWordIndex > actWordBase; \ + --actWordIndex) { \ + word_action(actWordIndex-1); \ + } \ +\ + if ((base) < actInnerBase) { \ + bits_action(actWordBase-1, \ + BTBitIndex((base)), \ + MPS_WORD_WIDTH); \ + } \ + } \ + } \ + END + + + +/* BTCreate -- allocate a BT from the control pool + * + * + */ + +Res BTCreate(BT *btReturn, Arena arena, Count length) +{ + Res res; + BT bt; + void *p; + + AVER(btReturn != NULL); + AVERT(Arena, arena); + AVER(length > 0); + + res = ControlAlloc(&p, arena, BTSize(length)); + if (res != ResOK) + return res; + bt = (BT)p; + + *btReturn = bt; + return ResOK; +} + + +/* BTDestroy -- free a BT to the control pool. + * + * + */ + +void BTDestroy(BT bt, Arena arena, Count length) +{ + AVER(bt != NULL); + AVERT(Arena, arena); + AVER(length > 0); + + ControlFree(arena, bt, BTSize(length)); +} + + +/* BTCheck -- check the validity of a bit table + * + * There's not much that can be checked at present. This is + * discussed in review.impl.c.bt.4. + */ + +Bool BTCheck(BT bt) +{ + AVER(bt != NULL); + AVER(AddrIsAligned((Addr)bt, sizeof(Word))); + return TRUE; +} + + +/* BTSize -- return the size of a BT + * + * + */ + +Size (BTSize)(Count n) +{ + /* check that the expression used in rounding up doesn't overflow */ + AVER(n+MPS_WORD_WIDTH-1 > n); + + return BTSize(n); +} + + +/* BTGet -- get a bit from a BT + * + * + */ + +Bool (BTGet)(BT t, Index i) +{ + AVERT(BT, t); + /* Can't check i */ + + /* see macro in */ + return BTGet(t, i); +} + + +/* BTSet -- set a bit in a BT + * + * + */ + +void (BTSet)(BT t, Index i) +{ + AVERT(BT, t); + /* Can't check i */ + + /* see macro in */ + BTSet(t, i); +} + + +/* BTRes -- reset a bit in a BT + * + * + */ + +void (BTRes)(BT t, Index i) +{ + AVERT(BT, t); + /* Can't check i */ + + /* see macro in */ + BTRes(t, i); +} + + +/* BTSetRange -- set a range of bits in a BT + * + * + */ + +void BTSetRange(BT t, Index base, Index limit) +{ + AVERT(BT, t); + AVER(base < limit); + +#define SINGLE_SET_RANGE(i) \ + BTSet(t, (i)) +#define BITS_SET_RANGE(i,base,limit) \ + t[(i)] |= BTMask((base),(limit)) +#define WORD_SET_RANGE(i) \ + t[(i)] = ~(Word)(0) + + ACT_ON_RANGE(base, limit, SINGLE_SET_RANGE, + BITS_SET_RANGE, WORD_SET_RANGE); +} + + +/* BTIsResRange -- test whether a range of bits is all reset + * + * . + */ + +Bool BTIsResRange(BT bt, Index base, Index limit) +{ + AVERT_CRITICAL(BT, bt); /* See .aver.critical */ + AVER_CRITICAL(base < limit); + /* Can't check range of base or limit */ + +#define SINGLE_IS_RES_RANGE(i) \ + if (BTGet(bt, (i))) return FALSE +#define BITS_IS_RES_RANGE(i,base,limit) \ + if ((bt[(i)] & BTMask((base),(limit))) != (Word)0) return FALSE +#define WORD_IS_RES_RANGE(i) \ + if (bt[(i)] != (Word)0) return FALSE + + ACT_ON_RANGE(base, limit, SINGLE_IS_RES_RANGE, + BITS_IS_RES_RANGE, WORD_IS_RES_RANGE); + return TRUE; +} + + +/* BTIsSetRange -- test whether a range of bits is all set + * + * . + */ + +Bool BTIsSetRange(BT bt, Index base, Index limit) +{ + AVERT(BT, bt); + AVER(base < limit); + /* Can't check range of base or limit */ + +#define SINGLE_IS_SET_RANGE(i) \ + if (!BTGet(bt, (i))) return FALSE +#define BITS_IS_SET_RANGE(i,base,limit) \ + BEGIN \ + Word bactMask = BTMask((base),(limit)); \ + if ((bt[(i)] & bactMask) != bactMask) \ + return FALSE; \ + END +#define WORD_IS_SET_RANGE(i) \ + if (bt[(i)] != ~(Word)0) return FALSE + + ACT_ON_RANGE(base, limit, SINGLE_IS_SET_RANGE, + BITS_IS_SET_RANGE, WORD_IS_SET_RANGE); + return TRUE; +} + + +/* BTResRange -- reset a range of bits in a BT + * + * + */ + +void BTResRange(BT t, Index base, Index limit) +{ + AVERT(BT, t); + AVER(base < limit); + +#define SINGLE_RES_RANGE(i) \ + BTRes(t, (i)) +#define BITS_RES_RANGE(i,base,limit) \ + t[(i)] &= ~(BTMask((base),(limit))) +#define WORD_RES_RANGE(i) t[(i)] = (Word)(0) + + ACT_ON_RANGE(base, limit, SINGLE_RES_RANGE, + BITS_RES_RANGE, WORD_RES_RANGE); +} + + +/* BTFindSet -- find the lowest set bit in a range in a bit table. + * + * Sets foundReturn to false if the range is entirely reset; + * in this case indexReturn is unset. Sets foundReturn to true + * otherwise. + * + * Implemented as a macro for efficiency reasons. + * The macro internally uses the label btFindSetLabel. + * If the macro must be used more than once within a function + * this label must be redefined to avoid a nameclash. E.g. + * #define btFindSetLabel uniqueLabel + * BTFindSet(...) + * #undef btFindSetLabel + */ + +#define BTFindSet(foundReturn,indexReturn,bt,base,limit)\ + BEGIN \ + Bool *bfsFoundReturn = (foundReturn); \ + Index *bfsIndexReturn = (indexReturn); \ + BT bfsBt = (bt); \ + ACT_ON_RANGE((base), (limit), SINGLE_FIND_SET, \ + BITS_FIND_SET, WORD_FIND_SET); \ + *bfsFoundReturn = FALSE; \ +btFindSetLabel:; \ + END + +#define SINGLE_FIND_SET(i) \ + if (BTGet(bfsBt, (i))) { \ + *bfsIndexReturn = (i); \ + *bfsFoundReturn = TRUE; \ + goto btFindSetLabel; \ + } +#define BITS_FIND_SET(wi,base,limit) \ + BEGIN \ + Index bactWi = (wi); \ + ACTION_FIND_SET(bactWi, bfsBt[bactWi], (base), (limit)); \ + END +#define WORD_FIND_SET(wi) \ + BEGIN \ + Index wactWi = (wi); \ + ACTION_FIND_SET(wactWi, bfsBt[wactWi], 0, MPS_WORD_WIDTH); \ + END +#define ACTION_FIND_SET(wi,word,base,limit) \ + ACTION_FIND_SET_BIT((wi),(word),(base),(limit),btFindSetLabel) + + +/* ACTION_FIND_SET_BIT -- Find first set bit in a range + * + * Helper macro to find the low bit in a range of a word. + * Works by first shifting the base of the range to the low + * bits of the word. Then loops performing a binary chop + * over the data looking to see if a bit is set in the lower + * half. If not, it must be in the upper half which is then + * shifted down. The loop completes after using a chop unit + * of a single single bit. + */ + +#define ACTION_FIND_SET_BIT(wi,word,base,limit,label) \ + BEGIN \ + /* no need to mask the low bits which are shifted */ \ + Index actionIndex = (base); \ + Word actionWord = ((word) & BTMaskHigh((limit))) >> actionIndex; \ + Count actionMaskWidth = (MPS_WORD_WIDTH >> 1); \ + Word actionMask = ~(Word)0 >> (MPS_WORD_WIDTH-actionMaskWidth); \ + if (actionWord != (Word)0) { \ + while (actionMaskWidth != (Count)0) { \ + if ((actionWord & actionMask) == (Word)0) { \ + actionIndex += actionMaskWidth; \ + actionWord >>= actionMaskWidth; \ + } \ + actionMaskWidth >>= 1; \ + actionMask >>= actionMaskWidth; \ + } \ + *bfsIndexReturn = ((wi) << MPS_WORD_SHIFT) | actionIndex; \ + *bfsFoundReturn = TRUE; \ + goto label; \ + } \ + END + + +/* BTFindRes -- find the lowest reset bit in a range in a bit table. + * + * Usage as for BTFindSet + * + * Internally uses the label btFindResLabel + * which must be redefined to avoid a nameclash if the macro is + * used twice in a function scope. + */ + +#define BTFindRes(foundReturn,indexReturn,bt,base,limit)\ + BEGIN \ + Bool *bfsFoundReturn = (foundReturn); \ + Index *bfsIndexReturn = (indexReturn); \ + BT bfsBt = (bt); \ + ACT_ON_RANGE((base), (limit), SINGLE_FIND_RES, \ + BITS_FIND_RES, WORD_FIND_RES); \ + *bfsFoundReturn = FALSE; \ +btFindResLabel:; \ + END + +#define SINGLE_FIND_RES(i) \ + if (!BTGet(bfsBt, (i))) { \ + *bfsIndexReturn = (i); \ + *bfsFoundReturn = TRUE; \ + goto btFindResLabel; \ + } +#define BITS_FIND_RES(wi,base,limit) \ + BEGIN \ + Index bactWi = (wi); \ + ACTION_FIND_RES(bactWi,bfsBt[bactWi], (base), (limit)); \ + END +#define WORD_FIND_RES(wi) \ + BEGIN \ + Index wactWi = (wi); \ + ACTION_FIND_RES(wactWi, bfsBt[wactWi], 0, MPS_WORD_WIDTH); \ + END +#define ACTION_FIND_RES(wi,word,base,limit) \ + ACTION_FIND_SET_BIT((wi),~(word),(base),(limit),btFindResLabel) + + +/* BTFindSetHigh -- find the highest set bit in a range in a bit table. + * + * Usage as for BTFindSet + * + * Internally uses the label btFindSetHighLabel + * which must be redefined to avoid a nameclash if the macro is + * used twice in a function scope. + */ + +#define BTFindSetHigh(foundReturn,indexReturn,bt,base,limit)\ + BEGIN \ + Bool *bfsFoundReturn = (foundReturn); \ + Index *bfsIndexReturn = (indexReturn); \ + BT bfsBt = (bt); \ + ACT_ON_RANGE_HIGH((base), (limit), SINGLE_FIND_SET_HIGH, \ + BITS_FIND_SET_HIGH, WORD_FIND_SET_HIGH); \ + *bfsFoundReturn = FALSE; \ +btFindSetHighLabel:; \ + END + +#define SINGLE_FIND_SET_HIGH(i) \ + if (BTGet(bfsBt, (i))) { \ + *bfsIndexReturn = (i); \ + *bfsFoundReturn = TRUE; \ + goto btFindSetHighLabel; \ + } +#define BITS_FIND_SET_HIGH(wi,base,limit) \ + BEGIN \ + Index bactWi = (wi); \ + ACTION_FIND_SET_HIGH(bactWi, bfsBt[bactWi], (base), (limit)); \ + END +#define WORD_FIND_SET_HIGH(wi) \ + BEGIN \ + Index wactWi = (wi); \ + ACTION_FIND_SET_HIGH(wactWi, (bfsBt[wactWi]), 0, MPS_WORD_WIDTH); \ + END +#define ACTION_FIND_SET_HIGH(wi,word,base,limit) \ + ACTION_FIND_SET_BIT_HIGH((wi),(word),(base),(limit),btFindSetHighLabel) + + +/* ACTION_FIND_SET_BIT_HIGH -- Find highest set bit in a range + * + * Helper macro to find the high bit in a range of a word. + * Essentially a mirror image of ACTION_FIND_SET + */ + +#define ACTION_FIND_SET_BIT_HIGH(wi,word,base,limit,label) \ + BEGIN \ + /* no need to mask the high bits which are shifted */ \ + Index actionShift = MPS_WORD_WIDTH - (limit); \ + Index actionIndex = MPS_WORD_WIDTH - 1 - actionShift; \ + Word actionWord = ((word) & BTMaskLow((base))) << actionShift; \ + Count actionMaskWidth = (MPS_WORD_WIDTH >> 1); \ + Word actionMask = ~(Word)0 << (MPS_WORD_WIDTH-actionMaskWidth); \ + if (actionWord != (Word)0) { \ + while (actionMaskWidth != (Count)0) { \ + if ((actionWord & actionMask) == (Word)0) { \ + actionIndex -= actionMaskWidth; \ + actionWord <<= actionMaskWidth; \ + } \ + actionMaskWidth >>= 1; \ + actionMask <<= actionMaskWidth; \ + } \ + *bfsIndexReturn = ((wi) << MPS_WORD_SHIFT) | actionIndex; \ + *bfsFoundReturn = TRUE; \ + goto label; \ + } \ + END + + +/* BTFindResHigh -- find the highest reset bit in a range + * + * Usage as for BTFindSet + * + * Internally uses the label btFindSetHighLabel + * which must be redefined to avoid a nameclash if the macro is + * used twice in a function scope. + */ + +#define BTFindResHigh(foundReturn,indexReturn,bt,base,limit)\ + BEGIN \ + Bool *bfsFoundReturn = (foundReturn); \ + Index *bfsIndexReturn = (indexReturn); \ + BT bfsBt = (bt); \ + ACT_ON_RANGE_HIGH((base), (limit), SINGLE_FIND_RES_HIGH, \ + BITS_FIND_RES_HIGH, WORD_FIND_RES_HIGH); \ + *bfsFoundReturn = FALSE; \ +btFindResHighLabel:; \ + END + +#define SINGLE_FIND_RES_HIGH(i) \ + if (!BTGet(bfsBt, (i))) { \ + *bfsIndexReturn = (i); \ + *bfsFoundReturn = TRUE; \ + goto btFindResHighLabel; \ + } +#define BITS_FIND_RES_HIGH(wi,base,limit) \ + BEGIN \ + Index bactWi = (wi); \ + ACTION_FIND_RES_HIGH(bactWi, bfsBt[bactWi], (base), (limit)); \ + END +#define WORD_FIND_RES_HIGH(wi) \ + BEGIN \ + Index wactWi = (wi); \ + ACTION_FIND_RES_HIGH(wactWi, (bfsBt[wactWi]), 0, MPS_WORD_WIDTH); \ + END +#define ACTION_FIND_RES_HIGH(wi,word,base,limit) \ + ACTION_FIND_SET_BIT_HIGH((wi),~(word),(base),(limit),btFindResHighLabel) + + +/* BTFindResRange -- find a reset range of bits in a bit table + * + * Starts searching at the low end of the search range. + * + * . + */ + +static Bool BTFindResRange(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count minLength, Count maxLength) +{ + Bool foundRes; /* true if a reset bit is found */ + Index resBase; /* base of a candidate reset range */ + Index unseenBase; /* base of testing so far */ + Index minLimit; /* limit of minimal acceptable range */ + Index resLimit; /* limit of search for a candidate range */ + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(BT, bt); + AVER(searchBase < searchLimit); + AVER(minLength > 0); + AVER(minLength <= maxLength); + AVER(maxLength <= searchLimit - searchBase); + + foundRes = FALSE; /* don't know first reset bit */ + minLimit = 0; /* avoid spurious compiler warning */ + resBase = searchBase; /* haven't seen anything yet */ + unseenBase = searchBase; /* haven't seen anything yet */ + resLimit = searchLimit - minLength + 1; + + while (resBase < resLimit) { + Index setIndex; /* index of last set bit found */ + Bool foundSet = FALSE; /* true if a set bit is found */ + + /* Find the first reset bit if it's not already known */ + if (!foundRes) { + BTFindRes(&foundRes, &resBase, bt, unseenBase, resLimit); + if (!foundRes) { + /* failure */ + return FALSE; + } + unseenBase = resBase + 1; + minLimit = resBase + minLength; + } + + /* Look to see if there is any set bit in the minimum range */ + BTFindSetHigh(&foundSet, &setIndex, bt, unseenBase, minLimit); + if (!foundSet) { + /* Found minimum range. Extend it. */ + Index setBase; /* base of search for set bit */ + Index setLimit; /* limit search for set bit */ + foundSet = FALSE; + setBase = minLimit; + setLimit = resBase + maxLength; + if (setLimit > searchLimit) + setLimit = searchLimit; + if (setLimit > setBase) + BTFindSet(&foundSet, &setIndex, bt, setBase, setLimit); + if (!foundSet) + setIndex = setLimit; + + AVER(setIndex - resBase >= minLength); + AVER(setIndex - resBase <= maxLength); + *baseReturn = resBase; + *limitReturn = setIndex; + return TRUE; + + } else { + /* Range was too small. Try again */ + unseenBase = minLimit; + resBase = setIndex + 1; + if (resBase != minLimit) { + /* Already found the start of next candidate range */ + minLimit = resBase + minLength; + /* minLimit might just have gone out of bounds, but in that + * case resBase >= resLimit and so the loop will exit. */ + AVER(minLimit <= searchLimit || resBase >= resLimit); + } else { + foundRes = FALSE; + } + } + } + + /* failure */ + return FALSE; +} + + +/* BTFindResRangeHigh -- find a reset range of bits in a bit table + * + * Starts searching at the high end of the search range. + * + * . + */ + +static Bool BTFindResRangeHigh(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count minLength, + Count maxLength) +{ + Bool foundRes; /* true if a reset bit is found */ + Index resLimit; /* limit of a candidate reset range */ + Index resIndex; /* index of highest reset bit found */ + Index unseenLimit; /* limit of testing so far */ + Index minBase; /* base of minimal acceptable range */ + Index resBase; /* base of search for a candidate range */ + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(BT, bt); + AVER(searchBase < searchLimit); + AVER(minLength > 0); + AVER(minLength <= maxLength); + AVER(maxLength <= searchLimit - searchBase); + + foundRes = FALSE; /* don't know first reset bit */ + minBase = 0; /* avoid spurious compiler warning */ + resLimit = searchLimit; /* haven't seen anything yet */ + unseenLimit = searchLimit; /* haven't seen anything yet */ + resBase = searchBase + minLength -1; + + while (resLimit > resBase) { + Index setIndex; /* index of first set bit found */ + Bool foundSet = FALSE; /* true if a set bit is found */ + + /* Find the first reset bit if it's not already known */ + if (!foundRes) { + /* Look for the limit of a range */ + BTFindResHigh(&foundRes, &resIndex, bt, resBase, unseenLimit); + if (!foundRes) { + /* failure */ + return FALSE; + } + resLimit = resIndex + 1; + unseenLimit = resIndex; + minBase = resLimit - minLength; + } + + /* Look to see if there is any set bit in the minimum range */ + BTFindSet(&foundSet, &setIndex, bt, minBase, unseenLimit); + if (!foundSet) { + /* Found minimum range. Extend it. */ + Index setBase; /* base of search for set bit */ + Index setLimit; /* limit search for set bit */ + Index baseIndex; /* base of reset range found */ + foundSet = FALSE; + setLimit = minBase; + if ((searchBase + maxLength) > resLimit) + setBase = searchBase; + else + setBase = resLimit - maxLength; + if (setLimit > setBase) + BTFindSetHigh(&foundSet, &setIndex, bt, setBase, setLimit); + if (foundSet) + baseIndex = setIndex+1; + else + baseIndex = setBase; + + AVER(resLimit - baseIndex >= minLength); + AVER(resLimit - baseIndex <= maxLength); + *baseReturn = baseIndex; + *limitReturn = resLimit; + return TRUE; + + } else { + /* Range was too small. Try again */ + unseenLimit = minBase; + resLimit = setIndex; + if (resLimit != minBase) { + /* Already found the start of next candidate range. This wraps + * round if minLength > resLimit (all the variables are + * unsigned so this behaviour is defined), but that means that + * resLimit <= resBase and so the loop will exit. */ + AVER(resLimit >= minLength || resLimit <= resBase); + minBase = resLimit - minLength; + } else { + foundRes = FALSE; + } + } + } + + /* failure */ + return FALSE; +} + + +/* BTFindLongResRange -- find long range of reset bits in a bit table + * + * . + */ + +Bool BTFindLongResRange(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count length) +{ + /* All parameters are checked by BTFindResRange. */ + return BTFindResRange(baseReturn, limitReturn, + bt, + searchBase, searchLimit, + length, searchLimit - searchBase); +} + + +/* BTFindLongResRangeHigh -- find long range of reset bits in a bit table + * + * . + */ + +Bool BTFindLongResRangeHigh(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count length) +{ + /* All parameters are checked by BTFindResRangeHigh. */ + return BTFindResRangeHigh(baseReturn, limitReturn, + bt, + searchBase, searchLimit, + length, searchLimit - searchBase); +} + + +/* BTFindShortResRange -- find short range of reset bits in a bit table + * + * . + */ + +Bool BTFindShortResRange(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count length) +{ + /* All parameters are checked by BTFindResRange. */ + return BTFindResRange(baseReturn, limitReturn, + bt, + searchBase, searchLimit, + length, length); +} + +/* BTFindShortResRangeHigh -- find short range of reset bits in a bit table + * + * Starts looking from the top of the search range. + * + * . + */ + +Bool BTFindShortResRangeHigh(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count length) +{ + /* All parameters are checked by BTFindResRangeHigh. */ + return BTFindResRangeHigh(baseReturn, limitReturn, + bt, + searchBase, searchLimit, + length, length); +} + + +/* BTRangesSame -- check that a range of bits in two BTs are the same. + * + * + */ + +Bool BTRangesSame(BT comparand, BT comparator, Index base, Index limit) +{ + AVERT(BT, comparand); + AVERT(BT, comparator); + AVER(base < limit); + +#define SINGLE_RANGES_SAME(i) \ + if (BTGet(comparand, (i)) != BTGet(comparator, (i))) \ + return FALSE +#define BITS_RANGES_SAME(i,base,limit) \ + BEGIN \ + Index bactI = (i); \ + Word bactMask = BTMask((base),(limit)); \ + if ((comparand[bactI] & (bactMask)) != \ + (comparator[bactI] & (bactMask))) \ + return FALSE; \ + END +#define WORD_RANGES_SAME(i) \ + BEGIN \ + Index wactI = (i); \ + if ((comparand[wactI]) != (comparator[wactI])) \ + return FALSE; \ + END + + ACT_ON_RANGE(base, limit, SINGLE_RANGES_SAME, + BITS_RANGES_SAME, WORD_RANGES_SAME); + return TRUE; +} + + +/* BTCopyInvertRange -- copy a range of bits from one BT to another, + * inverting them as you go. + * + * + */ + +void BTCopyInvertRange(BT fromBT, BT toBT, Index base, Index limit) +{ + AVERT(BT, fromBT); + AVERT(BT, toBT); + AVER(fromBT != toBT); + AVER(base < limit); + +#define SINGLE_COPY_INVERT_RANGE(i) \ + if (BTGet(fromBT, (i))) \ + BTRes(toBT, (i)); \ + else \ + BTSet(toBT, (i)) +#define BITS_COPY_INVERT_RANGE(i,base,limit) \ + BEGIN \ + Index bactI = (i); \ + Word bactMask = BTMask((base),(limit)); \ + toBT[bactI] = \ + (toBT[bactI] & ~bactMask) | (~fromBT[bactI] & bactMask); \ + END +#define WORD_COPY_INVERT_RANGE(i) \ + BEGIN \ + Index wactI = (i); \ + toBT[wactI] = ~fromBT[wactI]; \ + END + + ACT_ON_RANGE(base, limit, SINGLE_COPY_INVERT_RANGE, + BITS_COPY_INVERT_RANGE, WORD_COPY_INVERT_RANGE); +} + + +/* BTCopyRange -- copy a range of bits from one BT to another + * + * + */ + +void BTCopyRange(BT fromBT, BT toBT, Index base, Index limit) +{ + AVERT(BT, fromBT); + AVERT(BT, toBT); + AVER(fromBT != toBT); + AVER(base < limit); + +#define SINGLE_COPY_RANGE(i) \ + if (BTGet(fromBT, (i))) \ + BTSet(toBT, (i)); \ + else \ + BTRes(toBT, (i)) +#define BITS_COPY_RANGE(i,base,limit) \ + BEGIN \ + Index bactI = (i); \ + Word bactMask = BTMask((base),(limit)); \ + toBT[bactI] = \ + (toBT[bactI] & ~bactMask) | (fromBT[bactI] & bactMask); \ + END +#define WORD_COPY_RANGE(i) \ + BEGIN \ + Index wactI = (i); \ + toBT[wactI] = fromBT[wactI]; \ + END + + ACT_ON_RANGE(base, limit, SINGLE_COPY_RANGE, + BITS_COPY_RANGE, WORD_COPY_RANGE); +} + + +/* BTCopyOffsetRange -- copy a range of bits from one BT to an + * offset range in another BT + * + * .slow: Can't always use ACT_ON_RANGE because word alignment + * may differ for each range. We could try to be smart about + * detecting similar alignment - but we don't. + * + * + */ + +void BTCopyOffsetRange(BT fromBT, BT toBT, + Index fromBase, Index fromLimit, + Index toBase, Index toLimit) +{ + Index fromBit, toBit; + + AVERT(BT, fromBT); + AVERT(BT, toBT); + AVER(fromBT != toBT); + AVER(fromBase < fromLimit); + AVER(toBase < toLimit); + AVER((fromLimit - fromBase) == (toLimit - toBase)); + + for (ITER_PARALLEL(fromBit = fromBase, toBit = toBase); + fromBit < fromLimit; + ITER_PARALLEL(++fromBit, ++toBit)) + { + if (BTGet(fromBT, fromBit)) + BTSet(toBT, toBit); + else + BTRes(toBT, toBit); + } +} + + +/* BTCountResRange -- count number of reset bits in a range */ + +Count BTCountResRange(BT bt, Index base, Index limit) +{ + Count c = 0; + Index bit; + + AVERT(BT, bt); + AVER(base < limit); + + for (bit = base; bit < limit; ++bit) + if (!BTGet(bt, bit)) + ++c; + return c; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/bt.h b/mps/code/bt.h new file mode 100644 index 00000000000..6af2f7e63f8 --- /dev/null +++ b/mps/code/bt.h @@ -0,0 +1,105 @@ +/* bt.h: Bit Table Interface + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .source: + */ + +#ifndef bt_h +#define bt_h + +#include "mpmtypes.h" + + +/* */ +extern Size (BTSize)(Count length); +#define BTSize(n) (((n) + MPS_WORD_WIDTH-1) / MPS_WORD_WIDTH * sizeof(Word)) + +/* */ +extern Bool (BTGet)(BT bt, Index index); +#define BTGet(a, i) \ + ((Bool)(((a)[((i) >> MPS_WORD_SHIFT)] \ + >> ((i) & ~((Word)-1 << MPS_WORD_SHIFT))) \ + & (Word)1)) + +/* */ +extern void (BTSet)(BT bt, Index index); +#define BTSet(a, i) \ + BEGIN \ + (a)[((i)>>MPS_WORD_SHIFT)] |= (Word)1<<((i)&~((Word)-1< */ +extern void (BTRes)(BT bt, Index index); +#define BTRes(a, i) \ + BEGIN \ + (a)[((i)>>MPS_WORD_SHIFT)] &= \ + ~((Word)1 << ((i) & ~((Word)-1<. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/btcv.c b/mps/code/btcv.c new file mode 100644 index 00000000000..8b91bdc0ae7 --- /dev/null +++ b/mps/code/btcv.c @@ -0,0 +1,599 @@ +/* btss.c: BIT TABLE COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers + * + * .coverage: Direct coverage of BTFind*ResRange*, BTRangesSame, + * BTISResRange, BTIsSetRange, BTCopyRange, BTCopyOffsetRange. + * Reasonable coverage of BTCopyInvertRange, BTResRange, + * BTSetRange, BTRes, BTSet, BTCreate, BTDestroy. + */ + + +#include "mpm.h" +#include "mpsavm.h" +#include "mps.h" +#include "testlib.h" +#include "mpslib.h" + +#include /* printf */ + +SRCID(btcv, "$Id$"); + + +/* bt*Symmetric -- Symmetric operations on bit tables + * + * The operations take 2 bit tables, btlo & bthi. + * They perform the equivalent BT* operation on btlo, and + * a reflected operation on the bits of bthi from the opposite + * direction. + */ + +#define btReflectIndex(btSize, i) (btSize - (i) - 1) +#define btReflectLimit(btSize, i) (btSize - (i)) + + +static void btSetSymmetric(BT btlo, BT bthi, Count btSize, Index i) +{ + BTSet(btlo, i); + BTSet(bthi, btReflectIndex(btSize, i)); +} + +static void btResSymmetric(BT btlo, BT bthi, Count btSize, Index i) +{ + BTRes(btlo, i); + BTRes(bthi, btReflectIndex(btSize, i)); +} + +static void btSetRangeSymmetric(BT btlo, BT bthi, Count btSize, + Index base, Index limit) +{ + BTSetRange(btlo, base, limit); + BTSetRange(bthi, btReflectLimit(btSize, limit), btReflectLimit(btSize, base)); +} + +static void btResRangeSymmetric(BT btlo, BT bthi, Count btSize, + Index base, Index limit) +{ + BTResRange(btlo, base, limit); + BTResRange(bthi, btReflectLimit(btSize, limit), btReflectLimit(btSize, base)); +} + + +typedef Bool (*BTFinderFn)(Index *foundBase_o, Index *foundLimit_o, + BT bt, Index base, Index limit, Count length); + + +/* btTestSingleRange -- Test expectations for calls to BTFind*ResRange* + * + */ + +static void btTestSingleRange(BTFinderFn finder, BT bt, + Index base, Index limit, + Count length, + Bool expect, + Index expectBase, Index expectLimit) +{ + Bool found; + Index foundBase, foundLimit; + + found = finder(&foundBase, &foundLimit, bt, base, limit, length); + cdie(found == expect, "FindResRange result"); + if (expect) { + cdie(foundBase == expectBase, "FindResRange base"); + cdie(foundLimit == expectLimit, "FindResRange limit"); + } +} + + +/* btTestResRange -- Test expectations for calls to BTFindShortResRange + * + * Symmetrically call BTFindShortResRange / BTFindShortResRangeHigh + * and test the expected results + */ + +static void btTestResRange(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length, + Bool expect, + Index expectBase, Index expectLimit) +{ + btTestSingleRange(BTFindShortResRange, btlo, + base, limit, + length, expect, + expectBase, expectLimit); + + btTestSingleRange(BTFindShortResRangeHigh, bthi, + btReflectLimit(btSize, limit), + btReflectLimit(btSize, base), + length, expect, + btReflectLimit(btSize, expectLimit), + btReflectLimit(btSize, expectBase)); +} + + +/* btTestLongResRange -- Test expectations for calls to BTFindLongResRange + * + * Symmetrically call BTFindLongResRange / BTFindLongResRangeHigh + * and test the expected results + */ + +static void btTestLongResRange(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length, + Bool expect, + Index expectBase, Index expectLimit) +{ + btTestSingleRange(BTFindLongResRange, btlo, + base, limit, + length, expect, + expectBase, expectLimit); + + btTestSingleRange(BTFindLongResRangeHigh, bthi, + btReflectLimit(btSize, limit), + btReflectLimit(btSize, base), + length, expect, + btReflectLimit(btSize, expectLimit), + btReflectLimit(btSize, expectBase)); +} + + +/* btAllResTest -- tests with only a reset range + * + * Test finding reset ranges in an all-reset table. + */ + +static void btAllResTest(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length) +{ + btResRangeSymmetric(btlo, bthi, btSize, 0, btSize); + btTestResRange(btlo, bthi, btSize, base, limit, length, + TRUE, base, base + length); + btTestLongResRange(btlo, bthi, btSize, base, limit, length, + TRUE, base, limit); +} + + +/* btNoResTest -- tests with no reset ranges + * + * Test finding reset ranges in an all-set search area of a table. + * Reset the area outside the search to ensure it doesn't get found + * by mistake. + */ + +static void btNoResTest(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length) +{ + btResRangeSymmetric(btlo, bthi, btSize, 0, btSize); + btSetRangeSymmetric(btlo, bthi, btSize, base, limit); + btTestResRange(btlo, bthi, btSize, base, limit, length, + FALSE, 0, 0); + btTestLongResRange(btlo, bthi, btSize, base, limit, length, + FALSE, 0, 0); +} + + +/* btResAndFindTest -- Test finding ranges of given size + * + * Resets the range between resBase & resLimit, and then attempts + * to find it by searching in the range between base & limit. + * Expect to find the range if it's long enough, + */ + +static void btResAndFindTest(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Index resBase, Index resLimit, + Count length) +{ + btResRangeSymmetric(btlo, bthi, btSize, resBase, resLimit); + if ((resLimit - resBase) < length) { + btTestResRange(btlo, bthi, btSize, base, limit, length, + FALSE, 0, 0); + btTestLongResRange(btlo, bthi, btSize, base, limit, length, + FALSE, 0, 0); + } else { + btTestResRange(btlo, bthi, btSize, base, limit, length, + TRUE, resBase, resBase + length); + btTestLongResRange(btlo, bthi, btSize, base, limit, length, + TRUE, resBase, resLimit); + } +} + + + +/* btSingleResTest -- tests with a single reset range + * + * Test finding single ranges of various sizes + */ + +static void btSingleResTest(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length) +{ + Count resLen; + /* choose varying range lengths from too short to longer than needed */ + for (resLen = length - 1; resLen <= length + 1; resLen++) { + if ((resLen > 0) && (resLen < (limit - base -2))) { + /* place the ranges both near the beginning & near the end */ + /* of the search space */ + Index resBase, resLimit; + for (resBase = base; resBase <= base +2; resBase++) { + btResRangeSymmetric(btlo, bthi, btSize, 0, btSize); + btSetRangeSymmetric(btlo, bthi, btSize, base, limit); + btResAndFindTest(btlo, bthi, btSize, base, limit, + resBase, resBase + resLen, length); + } + for (resLimit = limit; resLimit >= limit -2; resLimit--) { + btResRangeSymmetric(btlo, bthi, btSize, 0, btSize); + btSetRangeSymmetric(btlo, bthi, btSize, base, limit); + btResAndFindTest(btlo, bthi, btSize, base, limit, + resLimit - resLen, resLimit, length); + } + } + } +} + + + +/* btDoubleResTest -- Test finding double ranges of various sizes + * + * Set up 2 ranges with various relative positions. The first + * range is always too small. + */ + + +/* Constants describing the type of arrangement of the 2 ranges */ +enum { + ArrangeGAP1 = 0, + ArrangeGAP2 = 1, + ArrangeSPREAD = 2, + ArrangeMAX +}; + +typedef unsigned Arrangement; + +/* Choose a limit for reset range 1 */ +static Index btArrangeRes1(Arrangement arrange, + Index base, Index res2Base, + Count length) +{ + switch (arrange) { + + case ArrangeGAP1: { + /* Gap between ranges is of length 1 */ + return res2Base - 1; + } + + case ArrangeGAP2: { + /* Gap between ranges is of length 2 */ + return res2Base - 2; + } + + case ArrangeSPREAD: { + /* range 1 starts as far before range 2 as possible */ + return base + length; + } + + default: + NOTREACHED; + return 0; /* keep the compiler happy */ + } +} + +/* Constants describing the type of pattern for the first range */ +enum { + PatternLEN1 = 0, + PatternSETMID = 1, + PatternJUSTSMALL = 2, + PatternMAX +}; + +typedef unsigned Pattern; + +/* Choose a limit for reset range 1 */ +static void btResetFirstRange(BT btlo, BT bthi, Count btSize, + Index res1Limit, + Count length, + Pattern pattern) +{ + switch (pattern) { + + case PatternLEN1: { + /* First range is a single reset bit */ + btResSymmetric(btlo, bthi, btSize, res1Limit-1); + return; + } + + case PatternSETMID: { + /* Actually make 2 ranges here by setting a bit in the middle */ + Index mid = res1Limit - length + (length / 2); + btResRangeSymmetric(btlo, bthi, btSize, res1Limit-length, res1Limit); + btSetSymmetric(btlo, bthi, btSize, mid); + return; + } + + case PatternJUSTSMALL: { + /* Range of (length - 1) */ + btResRangeSymmetric(btlo, bthi, btSize, + 1 + res1Limit - length, res1Limit); + return; + } + + default: + NOTREACHED; + } +} + + +static void btDoubleResTest(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length) +{ + Count res2Len; + + if (length < 2) + return; /* no possibility of making the first range too small */ + + /* choose varying range lengths for second res range */ + for (res2Len = length - 1; res2Len <= length + 1; res2Len++) { + if ((res2Len > 0) && (res2Len < (limit - base -2))) { + Index res2Limit; + /* place the second ranges near the end of the search space */ + for (res2Limit = limit; res2Limit >= limit-8; res2Limit--) { + Index res2Base = res2Limit - res2Len; + Arrangement arrange; + /* Pick one of a number of possible arrangements of the ranges */ + for (arrange = ArrangeGAP1; arrange < ArrangeMAX; arrange++) { + Index res1Limit = btArrangeRes1(arrange, base, res2Base, length); + Pattern pat; + /* Pick one of a number of pattern types for range 1 */ + for (pat = PatternLEN1; pat < PatternMAX; pat++) { + btResRangeSymmetric(btlo, bthi, btSize, 0, btSize); + btSetRangeSymmetric(btlo, bthi, btSize, base, limit); + btResetFirstRange(btlo, bthi, btSize, res1Limit, length, pat); + /* Set up range 2 and expect to find it when searching */ + btResAndFindTest(btlo, bthi, btSize, base, limit, + res2Base, res2Limit, length); + } + } + } + } + } +} + + +/* btFindRangeTests -- Test BTFind*ResRange* + * + * Run a variety of FindResRange tests with different table patterns. + */ + +static void btFindRangeTests(BT btlo, BT bthi, Count btSize, + Index base, Index limit, + Count length) +{ + btAllResTest(btlo, bthi, btSize, base, limit, length); + btNoResTest(btlo, bthi, btSize, base, limit, length); + btSingleResTest(btlo, bthi, btSize, base, limit, length); + btDoubleResTest(btlo, bthi, btSize, base, limit, length); +} + + + +/* btIsRangeTests -- Test BTIsResRange & BTIsSetRange + * + * Test ranges which are all reset or set apart from single + * bits near to the base and limit (both inside and outside + * the range). + * + * Test BTRangesSame by using the same bit patterns and comparing + * with an appropriate all-set or all-reset table. + * + * These tests also test BTCopyInvertRange + */ + +static void btIsRangeTests(BT bt1, BT bt2, Count btSize, + Index base, Index limit) +{ + Index minBase, maxLimit, b, l; + + if (base > 0) { + minBase = base - 1; + } else { + minBase = 0; + } + + if (limit < btSize) { + maxLimit = limit + 1; + } else { + maxLimit = btSize; + } + + for (b = minBase; b <= base+1; b++) { + for (l = maxLimit; l >= limit-1; l--) { + /* test a table which is all reset apart from a set bit */ + /* near each of the base and limit of the range in question */ + Bool outside; /* true if set bits are both outside test range */ + + outside = (b < base) && (l > limit); + BTResRange(bt1, 0, btSize); + BTSet(bt1, b); + BTSet(bt1, l - 1); + + /* invert the table for the inverse test */ + BTCopyInvertRange(bt1, bt2, 0, btSize); + + /* Check it with BTIsResRange, and the inverse with BTIsSetRange */ + cdie(BTIsResRange(bt1, base, limit) == outside, "BTISResRange"); + cdie(BTIsSetRange(bt2, base, limit) == outside, "BTISSetRange"); + + /* Check the same range with BTRangesSame on an empty table */ + BTResRange(bt2, 0, btSize); + cdie(BTRangesSame(bt1, bt2, base, limit) == outside, "BTRangeSame"); + + /* Check the inverse with BTRangesSame on a full table */ + BTCopyInvertRange(bt1, bt2, 0, btSize); + BTSetRange(bt1, 0, btSize); + cdie(BTRangesSame(bt1, bt2, base, limit) == outside, "BTRangeSame"); + } + } +} + + +/* btCopyTests -- Test BTCopyRange & BTCopyOffsetRange + * + * Test copying ranges which are all reset or set apart from + * single bits near to the base and limit (both inside and outside + * the range). + * + */ + +static void btCopyTests(BT bt1, BT bt2, Count btSize, + Index base, Index limit) +{ + Index minBase, maxLimit, b, l; + + if (base > 0) { + minBase = base - 1; + } else { + minBase = 0; + } + + if (limit < btSize) { + maxLimit = limit + 1; + } else { + maxLimit = btSize; + } + + for (b = minBase; b <= base+1; b++) { + for (l = maxLimit; l >= limit-1; l--) { + /* initialize a table which is all reset apart from a set bit */ + /* near each of the base and limit of the range in question */ + Bool outside; /* true if set bits are both outside test range */ + + outside = (b < base) && (l > limit); + BTResRange(bt1, 0, btSize); + BTSet(bt1, b); + BTSet(bt1, l - 1); + + /* check copying the region to the bottom of the other table */ + BTCopyOffsetRange(bt1, bt2, base, limit, 0, limit - base); + cdie(BTIsResRange(bt2, 0, limit - base) == outside, "BTIsResRange"); + + /* check copying the region to the top of the other table */ + BTCopyOffsetRange(bt1, bt2, + base, limit, btSize + base - limit, btSize); + cdie(BTIsResRange(bt2, btSize + base - limit, btSize) == outside, + "BTIsResRange"); + + /* check copying the region to the same place in the other table */ + BTCopyOffsetRange(bt1, bt2, base, limit, base, limit); + cdie(BTIsResRange(bt2, base, limit) == outside, "BTIsResRange"); + + /* copy the range and check its the same */ + BTCopyRange(bt1, bt2, base, limit); + cdie(BTRangesSame(bt1, bt2, base, limit), "BTRangeSame"); + + /* invert the table, then copy it and check it again */ + BTCopyInvertRange(bt2, bt1, 0, btSize); + BTCopyRange(bt1, bt2, base, limit); + cdie(BTRangesSame(bt1, bt2, base, limit), "BTRangeSame"); + } + } +} + + + +/* btTests -- Do all the tests + */ + +static void btTests(BT btlo, BT bthi, Count btSize) +{ + Index base, limit; + + /* Perform lots of tests over different subranges */ + for (base = 0; base < MPS_WORD_WIDTH; base++) { + for (limit = btSize; limit > (btSize-MPS_WORD_WIDTH); limit--) { + /* Perform Is*Range tests over those subranges */ + btIsRangeTests(btlo, bthi, btSize, base, limit); + + /* Perform Copy*Range tests over those subranges */ + btCopyTests(btlo, bthi, btSize, base, limit); + + /* Perform FindResRange tests with different lengths */ + btFindRangeTests(btlo, bthi, btSize, base, limit, 1); + btFindRangeTests(btlo, bthi, btSize, base, limit, 2); + btFindRangeTests(btlo, bthi, btSize, base, limit, MPS_WORD_WIDTH - 1); + btFindRangeTests(btlo, bthi, btSize, base, limit, MPS_WORD_WIDTH); + btFindRangeTests(btlo, bthi, btSize, base, limit, MPS_WORD_WIDTH + 1); + btFindRangeTests(btlo, bthi, btSize, base, limit, limit - base -1); + btFindRangeTests(btlo, bthi, btSize, base, limit, limit - base); + } + } +} + + +/* Start the world */ + +#define testArenaSIZE (((size_t)64)<<20) + +int main(int argc, char *argv[]) +{ + mps_arena_t mpsArena; + Arena arena; /* the ANSI arena which we use to allocate the BT */ + BT btlo, bthi; + Count btSize; + + /* tests need 4 whole words plus a few extra bits */ + btSize = MPS_WORD_WIDTH * 4 + 10; + + testlib_init(argc, argv); + + die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + arena = (Arena)mpsArena; /* avoid pun */ + + die((mps_res_t)BTCreate(&btlo, arena, btSize), + "failed to create low bit table"); + + die((mps_res_t)BTCreate(&bthi, arena, btSize), + "failed to create high bit table"); + + btTests(btlo, bthi, btSize); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/bttest.c b/mps/code/bttest.c new file mode 100644 index 00000000000..3713eaa70ed --- /dev/null +++ b/mps/code/bttest.c @@ -0,0 +1,405 @@ +/* bttest.c: BIT TABLE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpstd.h" + +#include /* fflush, fgets, printf, putchar, puts */ +#include /* exit, strtol */ + +SRCID(bttest, "$Id$"); + + +static BT bt; /* the BT which we will use */ +static Size btSize; /* the size of the current BT */ +static Arena arena; /* the arena which we use to allocate the BT */ + + +#define MAX_ARGS 3 + + +static Word args[MAX_ARGS]; +static Count argCount; + + +static Bool argInRange(Index arg) +{ + if (bt == NULL) { + printf("no BT\n"); + return FALSE; + } + if (args[arg] >= btSize) { + printf("out of range\n"); + return FALSE; + } + return TRUE; +} + + +static Bool checkDefaultRange(Index arg) +{ + if (bt == NULL) { + printf("no BT\n"); + return FALSE; + } + if (argCount == arg+1) { + printf("range half-specified\n"); + return FALSE; + } + if (argCount == arg) { /* use default range */ + args[arg] = 0; + args[arg+1] = btSize; + return TRUE; + } + if (args[arg] >= args[arg+1]) { + printf("range ill-formed\n"); + return FALSE; + } + if (args[arg+1] > btSize) { + printf("range too high\n"); + return FALSE; + } + return TRUE; /* explicit valid range */ +} + + +static void quit(void) +{ + exit(0); +} + + +static void destroy(void) +{ + if (bt != NULL) { + BTDestroy(bt, arena, btSize); + bt = NULL; + } else { + printf("No BT to destroy\n"); + } +} + +static void create(void) +{ + Res res; + if (args[0] < 1) { + printf("can't create a BT of size 0\n"); + return; + } + if (bt != NULL) + destroy(); + res = BTCreate(&bt, arena, args[0]); + if (res == ResOK) { + btSize = args[0]; + BTResRange(bt, 0, btSize); + } else { + printf("BTCreate returned %d\n",res); + } +} + + +static void set(void) +{ + if (argInRange(0)) + (BTSet)(bt, args[0]); +} + + +static void reset(void) +{ + if (argInRange(0)) + (BTRes)(bt, args[0]); +} + + +static void get(void) +{ + if (argInRange(0)) { + Bool b = (BTGet)(bt, args[0]); + puts(b ? "TRUE" : "FALSE"); + } +} + + +static void setRange(void) +{ + if (checkDefaultRange(0)) + BTSetRange(bt, args[0], args[1]); +} + + +static void resetRange(void) +{ + if (checkDefaultRange(0)) + BTResRange(bt, args[0], args[1]); +} + + +static void isSetRange(void) +{ + if (checkDefaultRange(0)) { + Bool b = BTIsSetRange(bt, args[0], args[1]); + puts(b ? "TRUE" : "FALSE"); + } +} + + +static void isResRange(void) +{ + if (checkDefaultRange(0)) { + Bool b = BTIsResRange(bt, args[0], args[1]); + puts(b ? "TRUE" : "FALSE"); + } +} + + +static void findShortResRange(void) +{ + if (checkDefaultRange(1)) { + if (args[0] > (args[2] - args[1])) { + printf("can't fit length in range\n"); + } else { + Index base, limit; + Bool b = BTFindShortResRange(&base, &limit, bt, + args[1], args[2], args[0]); + if (b) + printf("%"PRIuLONGEST" - %"PRIuLONGEST"\n", + (ulongest_t)base, (ulongest_t)limit); + else + printf("FALSE\n"); + } + } +} + + +static void findShortResRangeHigh(void) +{ + if (checkDefaultRange(1)) { + if (args[0] > (args[2] - args[1])) { + printf("can't fit length in range\n"); + } else { + Index base, limit; + Bool b = BTFindShortResRangeHigh(&base, &limit, bt, + args[1], args[2], args[0]); + if (b) + printf("%"PRIuLONGEST" - %"PRIuLONGEST"\n", + (ulongest_t)base, (ulongest_t)limit); + else + printf("FALSE\n"); + } + } +} + +static void findLongResRange(void) +{ + if (checkDefaultRange(1)) { + if (args[0] > (args[2] - args[1])) { + printf("can't fit length in range\n"); + } else { + Index base, limit; + Bool b = BTFindLongResRange(&base, &limit, bt, + args[1], args[2], args[0]); + if (b) + printf("%"PRIuLONGEST" - %"PRIuLONGEST"\n", + (ulongest_t)base, (ulongest_t)limit); + else + printf("FALSE\n"); + } + } +} + + +static void help(void) +{ + printf("c create a BT of size 's'\n" + "d destroy the current BT\n" + "s set the bit index 'i'\n" + "r reset the bit index 'i'\n" + "g get the bit index 'i'\n"); + printf("sr [ ] set the specified range\n" + "rr [ ] reset the specified range\n" + "is [ ] is the specified range set?\n" + "ir [ ] is the specified range reset?\n"); + printf("f [ ] find a reset range of length 'l'.\n" + "fh [ ] find a reset range length 'l', working downwards\n" + "fl [ ] find a reset range of length at least 'l'\n" + "q quit\n" + "? print this message\n"); + printf("\n" + "No way of testing BTSize, BTRangesSame, or BTCopyInvertRange.\n"); +} + + +static struct commandShapeStruct { + const char *name; + Count min_args; + Count max_args; + void (*fun)(void); +} commandShapes[] = { + {"c", 1, 1, create}, + {"d", 0, 0, destroy}, + {"s", 1, 1, set}, + {"r", 1, 1, reset}, + {"g", 1, 1, get}, + {"sr", 0, 2, setRange}, + {"rr", 0, 2, resetRange}, + {"is", 0, 2, isSetRange}, + {"ir", 0, 2, isResRange}, + {"f", 1, 3, findShortResRange}, + {"fh", 1, 3, findShortResRangeHigh}, + {"fl", 1, 3, findLongResRange}, + {"?", 0, 0, help}, + {"q", 0, 0, quit}, + { NULL, 0, 0, NULL} +}; + + +typedef struct commandShapeStruct *commandShape; + + +static void obeyCommand(const char *command) +{ + commandShape shape = commandShapes; + while(shape->name != NULL) { + const char *csp = shape->name; + const char *p = command; + while (*csp == *p) { + csp++; + p++; + } + if ((*csp == 0) && ((*p == '\n') || (*p == ' '))) { /* complete match */ + argCount = 0; + while ((*p == ' ') && (argCount < shape->max_args)) { + /* get an argument */ + char *newP; + long l; + l = strtol(p, &newP, 0); + if(l < 0) { /* negative integer */ + printf("negative integer arguments are invalid\n"); + return; + } + args[argCount] = (unsigned long)l; + if (newP == p) { /* strtoul failed */ + printf("couldn't parse an integer argument\n"); + return; + } + p = newP; + ++ argCount; + } + if (argCount < shape->min_args) { + printf("insufficient arguments to command\n"); + } else if (*p != '\n') { + printf("too many arguments to command\n"); + } else { /* do the command */ + shape->fun(); + } + return; + } else { + ++ shape; /* try next command */ + } + } + printf("command not understood\n"); + help(); +} + + +static void showBT(void) +{ + Index i; + char c; + if (bt == NULL) + return; + i = 0; + while((i < btSize) && (i < 50)) { + if (i % 10 == 0) + c = (char)(((i / 10) % 10) + '0'); + else + c = ' '; + putchar(c); + ++ i; + } + putchar('\n'); + i = 0; + while((i < btSize) && (i < 50)) { + c = (char)((i % 10) +'0'); + putchar(c); + ++ i; + } + putchar('\n'); + i = 0; + while(i < btSize) { + if (BTGet(bt,i)) + c = 'O'; + else + c = '.'; + putchar(c); + ++ i; + if (i % 50 == 0) + putchar('\n'); + } + putchar('\n'); +} + + +#define testArenaSIZE (((size_t)64)<<20) + +int main(int argc, char *argv[]) +{ + bt = NULL; + btSize = 0; + + testlib_init(argc, argv); + + die(mps_arena_create((mps_arena_t*)&arena, mps_arena_class_vm(), + testArenaSIZE), + "mps_arena_create"); + while(1) { + char input[100]; + printf("bt test> "); + (void)fflush(stdout); + if (fgets(input, 100, stdin)) { + obeyCommand(input); + showBT(); + } else { + return 0; + } + } +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/buffer.c b/mps/code/buffer.c new file mode 100644 index 00000000000..5cf0f7ed419 --- /dev/null +++ b/mps/code/buffer.c @@ -0,0 +1,1354 @@ +/* buffer.c: ALLOCATION BUFFER IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This is (part of) the implementation of allocation buffers. + * Several macros which also form part of the implementation are in + * . Several macros forming part of should be + * consistent with the macros and functions in this module. + * + * DESIGN + * + * .design: . + * + * .ap.async: The mutator is allowed to change certain AP fields + * asynchronously. Functions that can be called on buffers not + * synchronized with the mutator must take care when reading these + * fields. Such functions are marked with this tag. + * + * TRANSGRESSIONS + * + * .trans.mod: pool->bufferSerial is directly accessed by this module + * because does not provide an interface. + */ + +#include "mpm.h" + +SRCID(buffer, "$Id$"); + + +/* BufferCheck -- check consistency of a buffer + * + * See .ap.async. */ + +Bool BufferCheck(Buffer buffer) +{ + CHECKS(Buffer, buffer); + CHECKC(Buffer, buffer); + CHECKL(buffer->serial < buffer->pool->bufferSerial); /* .trans.mod */ + CHECKU(Arena, buffer->arena); + CHECKU(Pool, buffer->pool); + CHECKL(buffer->arena == buffer->pool->arena); + CHECKD_NOSIG(Ring, &buffer->poolRing); + CHECKL(BoolCheck(buffer->isMutator)); + CHECKL(buffer->fillSize >= 0.0); + CHECKL(buffer->emptySize >= 0.0); + CHECKL(buffer->emptySize <= buffer->fillSize); + CHECKL(buffer->alignment == buffer->pool->alignment); + CHECKL(AlignCheck(buffer->alignment)); + + /* If any of the buffer's fields indicate that it is reset, make */ + /* sure it is really reset. Otherwise, check various properties */ + /* of the non-reset fields. */ + if (buffer->mode & BufferModeTRANSITION) { + /* nothing to check */ + } else if ((buffer->mode & BufferModeATTACHED) == 0 + || buffer->base == (Addr)0 + || buffer->ap_s.init == (Addr)0 + || buffer->ap_s.alloc == (Addr)0 + || buffer->poolLimit == (Addr)0) { + CHECKL((buffer->mode & BufferModeATTACHED) == 0); + CHECKL(buffer->base == (Addr)0); + CHECKL(buffer->initAtFlip == (Addr)0); + CHECKL(buffer->ap_s.init == (Addr)0); + CHECKL(buffer->ap_s.alloc == (Addr)0); + CHECKL(buffer->ap_s.limit == (Addr)0); + /* Nothing reliable to check for lightweight frame state */ + CHECKL(buffer->poolLimit == (Addr)0); + } else { + /* The buffer is attached to a region of memory. */ + /* Check consistency. */ + CHECKL(buffer->mode & BufferModeATTACHED); + + /* These fields should obey the ordering */ + /* base <= init <= alloc <= poolLimit */ + CHECKL((mps_addr_t)buffer->base <= buffer->ap_s.init); + CHECKL(buffer->ap_s.init <= buffer->ap_s.alloc); + CHECKL(buffer->ap_s.alloc <= (mps_addr_t)buffer->poolLimit); + + /* Check that the fields are aligned to the buffer alignment. */ + CHECKL(AddrIsAligned(buffer->base, buffer->alignment)); + CHECKL(AddrIsAligned(buffer->initAtFlip, buffer->alignment)); + CHECKL(AddrIsAligned(buffer->ap_s.init, buffer->alignment)); + CHECKL(AddrIsAligned(buffer->ap_s.alloc, buffer->alignment)); + CHECKL(AddrIsAligned(buffer->ap_s.limit, buffer->alignment)); + CHECKL(AddrIsAligned(buffer->poolLimit, buffer->alignment)); + + /* If the buffer isn't trapped then "limit" should be the limit */ + /* set by the owning pool. Otherwise, "init" is either at the */ + /* same place it was at flip (.commit.before) or has been set */ + /* to "alloc" (.commit.after). Also, when the buffer is */ + /* flipped, initAtFlip should hold the init at flip, which is */ + /* between the base and current init. Otherwise, initAtFlip */ + /* is kept at zero to avoid misuse (see */ + /* request.dylan.170429.sol.zero_). */ + /* .. _request.dylan.170429.sol.zero: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170429 */ + + if (BufferIsTrapped(buffer)) { + /* .check.use-trapped: This checking function uses BufferIsTrapped, */ + /* So BufferIsTrapped can't do checking as that would cause an */ + /* infinite loop. */ + if (buffer->mode & BufferModeFLIPPED) { + CHECKL(buffer->ap_s.init == buffer->initAtFlip + || buffer->ap_s.init == buffer->ap_s.alloc); + CHECKL(buffer->base <= buffer->initAtFlip); + CHECKL(buffer->initAtFlip <= (Addr)buffer->ap_s.init); + } + /* Nothing special to check in the logged mode. */ + } else { + CHECKL(buffer->initAtFlip == (Addr)0); + } + } + + return TRUE; +} + + +/* BufferDescribe -- write out description of buffer + * + * See for structure definitions. */ + +static Res BufferAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Buffer buffer = CouldBeA(Buffer, inst); + Res res; + + if (!TESTC(Buffer, buffer)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, Buffer, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + return WriteF(stream, depth + 2, + "serial $U\n", (WriteFU)buffer->serial, + "Arena $P\n", (WriteFP)buffer->arena, + "Pool $P\n", (WriteFP)buffer->pool, + buffer->isMutator ? "Mutator" : "Internal", " Buffer\n", + "mode $C$C$C$C (TRANSITION, LOGGED, FLIPPED, ATTACHED)\n", + (WriteFC)((buffer->mode & BufferModeTRANSITION) ? 't' : '_'), + (WriteFC)((buffer->mode & BufferModeLOGGED) ? 'l' : '_'), + (WriteFC)((buffer->mode & BufferModeFLIPPED) ? 'f' : '_'), + (WriteFC)((buffer->mode & BufferModeATTACHED) ? 'a' : '_'), + "fillSize $U\n", (WriteFU)buffer->fillSize, + "emptySize $U\n", (WriteFU)buffer->emptySize, + "alignment $W\n", (WriteFW)buffer->alignment, + "base $A\n", (WriteFA)buffer->base, + "initAtFlip $A\n", (WriteFA)buffer->initAtFlip, + "init $A\n", (WriteFA)buffer->ap_s.init, + "alloc $A\n", (WriteFA)buffer->ap_s.alloc, + "limit $A\n", (WriteFA)buffer->ap_s.limit, + "poolLimit $A\n", (WriteFA)buffer->poolLimit, + "alignment $W\n", (WriteFW)buffer->alignment, + "rampCount $U\n", (WriteFU)buffer->rampCount, + NULL); +} + +Res BufferDescribe(Buffer buffer, mps_lib_FILE *stream, Count depth) +{ + return Method(Inst, buffer, describe)(MustBeA(Inst, buffer), stream, depth); +} + + +/* BufferInit -- initialize an allocation buffer */ + +static Res BufferAbsInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) +{ + Arena arena; + + AVER(buffer != NULL); + AVERT(Pool, pool); + AVER(BoolCheck(isMutator)); + AVERT(ArgList, args); + + /* Superclass init */ + InstInit(CouldBeA(Inst, buffer)); + + arena = PoolArena(pool); + + /* Initialize the buffer. See for a definition of + the structure. sig and serial comes later .init.sig-serial */ + buffer->arena = arena; + buffer->pool = pool; + RingInit(&buffer->poolRing); + buffer->isMutator = isMutator; + if (ArenaGlobals(arena)->bufferLogging) { + buffer->mode = BufferModeLOGGED; + } else { + buffer->mode = 0; + } + buffer->fillSize = 0.0; + buffer->emptySize = 0.0; + buffer->alignment = PoolAlignment(pool); + buffer->base = (Addr)0; + buffer->initAtFlip = (Addr)0; + /* In the next three assignments we really mean zero, not NULL, because + the bit pattern is compared. It's pretty unlikely we'll encounter + a platform where this makes a difference. */ + buffer->ap_s.init = (mps_addr_t)0; + buffer->ap_s.alloc = (mps_addr_t)0; + buffer->ap_s.limit = (mps_addr_t)0; + buffer->poolLimit = (Addr)0; + buffer->rampCount = 0; + + /* .init.sig-serial: Now the vanilla stuff is initialized, sign the + buffer and give it a serial number. It can then be safely checked + in subclass methods. */ + buffer->serial = pool->bufferSerial; /* .trans.mod */ + ++pool->bufferSerial; + SetClassOfPoly(buffer, CLASS(Buffer)); + buffer->sig = BufferSig; + AVERT(Buffer, buffer); + + /* Attach the initialized buffer to the pool. */ + RingAppend(&pool->bufferRing, &buffer->poolRing); + + EVENT3(BufferInit, buffer, pool, BOOLOF(buffer->isMutator)); + + return ResOK; +} + +static Res BufferInit(Buffer buffer, BufferClass klass, + Pool pool, Bool isMutator, ArgList args) +{ + AVERT(BufferClass, klass); + return klass->init(buffer, pool, isMutator, args); +} + + +/* BufferCreate -- create an allocation buffer + * + * . + */ + +Res BufferCreate(Buffer *bufferReturn, BufferClass klass, + Pool pool, Bool isMutator, ArgList args) +{ + Res res; + Buffer buffer; + Arena arena; + void *p; + + AVER(bufferReturn != NULL); + AVERT(BufferClass, klass); + AVERT(Pool, pool); + + arena = PoolArena(pool); + + /* Allocate memory for the buffer descriptor structure. */ + res = ControlAlloc(&p, arena, klass->size); + if (res != ResOK) + goto failAlloc; + buffer = p; + + /* Initialize the buffer descriptor structure. */ + res = BufferInit(buffer, klass, pool, isMutator, args); + if (res != ResOK) + goto failInit; + + *bufferReturn = buffer; + return ResOK; + +failInit: + ControlFree(arena, buffer, klass->size); +failAlloc: + return res; +} + + +/* BufferDetach -- detach a buffer from a region */ + +void BufferDetach(Buffer buffer, Pool pool) +{ + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + + if (!BufferIsReset(buffer)) { + Addr init, limit; + Size spare; + + buffer->mode |= BufferModeTRANSITION; + + /* Ask the owning pool to do whatever it needs to before the */ + /* buffer is detached (e.g. copy buffer state into pool state). */ + Method(Pool, pool, bufferEmpty)(pool, buffer); + + /* run any class-specific detachment method */ + Method(Buffer, buffer, detach)(buffer); + + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + spare = AddrOffset(init, limit); + buffer->emptySize += (double)spare; + if (buffer->isMutator) { + ArenaGlobals(buffer->arena)->emptyMutatorSize += (double)spare; + ArenaGlobals(buffer->arena)->allocMutatorSize + += (double)AddrOffset(buffer->base, init); + } else { + ArenaGlobals(buffer->arena)->emptyInternalSize += (double)spare; + } + + /* Reset the buffer. */ + buffer->base = (Addr)0; + buffer->initAtFlip = (Addr)0; + buffer->ap_s.init = (mps_addr_t)0; + buffer->ap_s.alloc = (mps_addr_t)0; + buffer->ap_s.limit = (mps_addr_t)0; + buffer->poolLimit = (Addr)0; + buffer->mode &= + ~(BufferModeATTACHED|BufferModeFLIPPED|BufferModeTRANSITION); + + EVENT2(BufferEmpty, buffer, spare); + } +} + + +/* BufferDestroy -- destroy an allocation buffer + * + * . + */ + +void BufferDestroy(Buffer buffer) +{ + Arena arena; + Size size; + AVERT(Buffer, buffer); + arena = buffer->arena; + size = ClassOfPoly(Buffer, buffer)->size; + BufferFinish(buffer); + ControlFree(arena, buffer, size); +} + + +/* BufferFinish -- finish an allocation buffer */ + +static void BufferAbsFinish(Inst inst) +{ + Buffer buffer = MustBeA(Buffer, inst); + AVERT(Buffer, buffer); + AVER(BufferIsReset(buffer)); + + /* Detach the buffer from its owning pool and unsig it. */ + RingRemove(&buffer->poolRing); + InstFinish(MustBeA(Inst, buffer)); + buffer->sig = SigInvalid; + + /* Finish off the generic buffer fields. */ + RingFinish(&buffer->poolRing); + + EVENT1(BufferFinish, buffer); +} + +void BufferFinish(Buffer buffer) +{ + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + + /* TODO: Consider whether this could move to BufferAbsFinish. */ + BufferDetach(buffer, BufferPool(buffer)); + + Method(Inst, buffer, finish)(MustBeA(Inst, buffer)); +} + + +/* BufferIsReset -- test whether a buffer is in the "reset" state + * + * A buffer is "reset" when it is not attached. In this state, the + * base, init, alloc, and limit pointers are all zero. This condition + * is checked by BufferCheck. */ + +Bool BufferIsReset(Buffer buffer) +{ + AVERT(Buffer, buffer); + + return !(buffer->mode & BufferModeATTACHED); +} + + +/* BufferIsReady -- test whether a buffer is ready for reserve + * + * BufferIsReady returns TRUE if and only if the buffer is not between a + * reserve and commit. The result is only reliable if the client is not + * currently using the buffer, since it may update the alloc and init + * pointers asynchronously. */ + +Bool BufferIsReady(Buffer buffer) +{ + AVERT(Buffer, buffer); + + return buffer->ap_s.init == buffer->ap_s.alloc; +} + + +/* BufferIsMutator -- test whether buffer belongs to mutator + * + * Returns TRUE iff mutator was created for the mutator. */ + +Bool BufferIsMutator(Buffer buffer) +{ + AVERT(Buffer, buffer); + + return buffer->isMutator; +} + + +/* BufferSetUnflipped + * + * Unflip a buffer if it was flipped. */ + +static void BufferSetUnflipped(Buffer buffer) +{ + AVERT(Buffer, buffer); + AVER(buffer->mode & BufferModeFLIPPED); + buffer->mode &= ~BufferModeFLIPPED; + /* restore ap_s.limit if appropriate */ + if (!BufferIsTrapped(buffer)) { + buffer->ap_s.limit = buffer->poolLimit; + } + buffer->initAtFlip = (Addr)0; +} + + +/* BufferSetAllocAddr + * + * Sets the init & alloc pointers of a buffer. */ + +void BufferSetAllocAddr(Buffer buffer, Addr addr) +{ + AVERT(Buffer, buffer); + /* Can't check Addr */ + AVER(BufferIsReady(buffer)); + AVER(buffer->base <= addr); + AVER(buffer->poolLimit >= addr); + + buffer->ap_s.init = addr; + buffer->ap_s.alloc = addr; +} + + +/* BufferFramePush + * + * . + */ + +Res BufferFramePush(AllocFrame *frameReturn, Buffer buffer) +{ + Pool pool; + AVERT(Buffer, buffer); + AVER(frameReturn != NULL); + + + /* Process any flip */ + if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) { + /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */ + if (buffer->mode & BufferModeFLIPPED) { + BufferSetUnflipped(buffer); + } + } + pool = BufferPool(buffer); + return Method(Pool, pool, framePush)(frameReturn, pool, buffer); +} + + +/* BufferFramePop + * + * . + */ + +Res BufferFramePop(Buffer buffer, AllocFrame frame) +{ + Pool pool; + AVERT(Buffer, buffer); + /* frame is of an abstract type & can't be checked */ + pool = BufferPool(buffer); + return Method(Pool, pool, framePop)(pool, buffer, frame); + +} + + + +/* BufferReserve -- reserve memory from an allocation buffer + * + * .reserve: Keep in sync with . + */ + +Res BufferReserve(Addr *pReturn, Buffer buffer, Size size) +{ + Addr next; + + AVER(pReturn != NULL); + AVERT(Buffer, buffer); + AVER(size > 0); + AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); + AVER(BufferIsReady(buffer)); /* */ + + /* Is there enough room in the unallocated portion of the buffer to */ + /* satisfy the request? If so, just increase the alloc marker and */ + /* return a pointer to the area below it. */ + next = AddrAdd(buffer->ap_s.alloc, size); + if (next > (Addr)buffer->ap_s.alloc && + next <= (Addr)buffer->ap_s.limit) { + buffer->ap_s.alloc = next; + *pReturn = buffer->ap_s.init; + return ResOK; + } + + /* If the buffer can't accommodate the request, call "fill". */ + return BufferFill(pReturn, buffer, size); +} + + +/* BufferAttach -- attach a region to a buffer + * + * BufferAttach is entered because of a BufferFill, or because of a Pop + * operation on a lightweight frame. */ + +void BufferAttach(Buffer buffer, Addr base, Addr limit, + Addr init, Size size) +{ + Size filled; + + AVERT(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(AddrAdd(base, size) <= limit); + AVER(base <= init); + AVER(init <= limit); + + /* Set up the buffer to point at the supplied region */ + buffer->mode |= BufferModeATTACHED; + buffer->base = base; + buffer->ap_s.init = init; + buffer->ap_s.alloc = AddrAdd(init, size); + /* only set limit if not logged */ + if ((buffer->mode & BufferModeLOGGED) == 0) { + buffer->ap_s.limit = limit; + } else { + AVER(buffer->ap_s.limit == (Addr)0); + } + AVER(buffer->initAtFlip == (Addr)0); + buffer->poolLimit = limit; + + filled = AddrOffset(init, limit); + buffer->fillSize += (double)filled; + if (buffer->isMutator) { + if (base != init) { /* see */ + Size prealloc = AddrOffset(base, init); + ArenaGlobals(buffer->arena)->allocMutatorSize -= (double)prealloc; + } + ArenaGlobals(buffer->arena)->fillMutatorSize += (double)filled; + } else { + ArenaGlobals(buffer->arena)->fillInternalSize += (double)filled; + } + + /* run any class-specific attachment method */ + Method(Buffer, buffer, attach)(buffer, base, limit, init, size); + + AVERT(Buffer, buffer); + EVENT4(BufferFill, buffer, size, base, filled); +} + + +/* BufferFill -- refill an empty buffer + * + * BufferFill is entered by the "reserve" operation on a buffer if there + * isn't enough room between "alloc" and "limit" to satisfy an + * allocation request. This might be because the buffer has been + * trapped and "limit" has been set to zero. */ + +Res BufferFill(Addr *pReturn, Buffer buffer, Size size) +{ + Res res; + Pool pool; + Addr base, limit, next; + + AVER(pReturn != NULL); + AVERT(Buffer, buffer); + AVER(size > 0); + AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); + AVER(BufferIsReady(buffer)); + + pool = BufferPool(buffer); + + /* If we're here because the buffer was trapped, then we attempt */ + /* the allocation here. */ + if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) { + /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */ + if (buffer->mode & BufferModeFLIPPED) { + BufferSetUnflipped(buffer); + } + + /* .fill.logged: If the buffer is logged then we leave it logged. */ + next = AddrAdd(buffer->ap_s.alloc, size); + if (next > (Addr)buffer->ap_s.alloc && + next <= (Addr)buffer->poolLimit) { + buffer->ap_s.alloc = next; + if (buffer->mode & BufferModeLOGGED) { + EVENT3(BufferReserve, buffer, buffer->ap_s.init, size); + } + *pReturn = buffer->ap_s.init; + return ResOK; + } + } + + /* There really isn't enough room for the allocation now. */ + AVER(AddrAdd(buffer->ap_s.alloc, size) > buffer->poolLimit || + AddrAdd(buffer->ap_s.alloc, size) < (Addr)buffer->ap_s.alloc); + + BufferDetach(buffer, pool); + + /* Ask the pool for some memory. */ + res = Method(Pool, pool, bufferFill)(&base, &limit, pool, buffer, size); + if (res != ResOK) + return res; + + /* Set up the buffer to point at the memory given by the pool */ + /* and do the allocation that was requested by the client. */ + BufferAttach(buffer, base, limit, base, size); + + if (buffer->mode & BufferModeLOGGED) { + EVENT3(BufferReserve, buffer, buffer->ap_s.init, size); + } + + *pReturn = base; + return res; +} + + + +/* BufferCommit -- commit memory previously reserved + * + * .commit: Keep in sync with . */ + +Bool BufferCommit(Buffer buffer, Addr p, Size size) +{ + AVERT(Buffer, buffer); + AVER(size > 0); + AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); + AVER(!BufferIsReady(buffer)); + + /* . */ + /* .commit.before: If a flip occurs before this point, when the */ + /* pool reads "initAtFlip" it will point below the object, so it */ + /* will be trashed and the commit must fail when trip is called. */ + AVER(p == buffer->ap_s.init); + AVER(AddrAdd(buffer->ap_s.init, size) == buffer->ap_s.alloc); + + /* .commit.update: Atomically update the init pointer to declare */ + /* that the object is initialized (though it may be invalid if a */ + /* flip occurred). */ + buffer->ap_s.init = buffer->ap_s.alloc; + + /* .improve.memory-barrier: Memory barrier here on the DEC Alpha */ + /* (and other relaxed memory order architectures). */ + /* .commit.after: If a flip occurs at this point, the pool will */ + /* see "initAtFlip" above the object, which is valid, so it will */ + /* be collected. The commit must succeed when trip is called. */ + /* The pointer "p" will have been fixed up. */ + /* TODO: Check the above assertion and explain why it is so. */ + /* .commit.trip: Trip the buffer if a flip has occurred. */ + if (buffer->ap_s.limit == 0) + return BufferTrip(buffer, p, size); + + /* No flip occurred, so succeed. */ + + return TRUE; +} + + +/* BufferTrip -- act on a trapped buffer + * + * Called from BufferCommit (and its equivalents) when invoked on a + * trapped buffer (indicated by limit == 0). This function can decide + * whether to succeed or fail the commit. */ + +Bool BufferTrip(Buffer buffer, Addr p, Size size) +{ + Pool pool; + + AVERT(Buffer, buffer); + AVER(p != 0); + AVER(size > 0); + AVER(SizeIsAligned(size, buffer->alignment)); + + /* The limit field should be zero, because that's how trip gets */ + /* called. See .commit.trip. */ + AVER(buffer->ap_s.limit == 0); + /* Of course we should be trapped. */ + AVER(BufferIsTrapped(buffer)); + + /* The init and alloc fields should be equal at this point, because */ + /* the step .commit.update has happened. */ + AVER(buffer->ap_s.init == buffer->ap_s.alloc); + + /* The p parameter points at the base address of the allocated */ + /* block, the end of which should now coincide with the init and */ + /* alloc fields. */ + /* Note that we don't _really_ care about p too much. We don't */ + /* do anything else with it apart from these checks. (in particular */ + /* it seems like the algorithms could be modified to cope with the */ + /* case of the object having been copied between Commit updating i */ + /* and testing limit) */ + AVER(AddrAdd(p, size) == buffer->ap_s.init); + + pool = BufferPool(buffer); + + AVER(PoolHasAddr(pool, p)); + + /* .trip.unflip: If the flip occurred before commit set "init" */ + /* to "alloc" (see .commit.before) then the object is invalid */ + /* (won't've been scanned) so undo the allocation and fail commit. */ + /* Otherwise (see .commit.after) the object is valid (will've been */ + /* scanned) so commit can simply succeed. */ + if ((buffer->mode & BufferModeFLIPPED) + && buffer->ap_s.init != buffer->initAtFlip) { + /* Reset just enough state for Reserve/Fill to work. */ + /* The buffer is left trapped and we leave the untrapping */ + /* for the next reserve (which goes out of line to Fill */ + /* (.fill.unflip) because the buffer is still trapped) */ + buffer->ap_s.init = p; + buffer->ap_s.alloc = p; + return FALSE; + } + + /* Emit event including class if logged */ + if (buffer->mode & BufferModeLOGGED) { + Bool b; + Format format; + Addr clientClass; + + b = PoolFormat(&format, buffer->pool); + if (b) { + clientClass = format->klass(p); + } else { + clientClass = (Addr)0; + } + EVENT4(BufferCommit, buffer, p, size, clientClass); + } + return TRUE; +} + + +/* BufferFlip -- trap buffer at GC flip time + * + * .flip: Tells the buffer that a flip has occurred. If the buffer is + * between reserve and commit, and has a rank (i.e. references), and has + * the two-phase protocol, then the object being initialized is + * invalidated by failing the next commit. The buffer code handles this + * automatically (ie the pool implementation is not involved). If the + * buffer is reset there is no effect, since there is no object to + * invalidate. If the buffer is already flipped there is no effect, + * since the object is already invalid by a previous trace. The buffer + * becomes unflipped at the next reserve or commit operation (actually + * reserve because commit is lazy). This is handled by BufferFill + * (.fill.unflip) or BufferTrip (.trip.unflip). */ + +void BufferFlip(Buffer buffer) +{ + AVERT(Buffer, buffer); + + if (BufferRankSet(buffer) != RankSetEMPTY + && (buffer->mode & BufferModeFLIPPED) == 0 + && !BufferIsReset(buffer)) { + AVER(buffer->initAtFlip == (Addr)0); + buffer->initAtFlip = buffer->ap_s.init; + /* TODO: Is a memory barrier required here? */ + buffer->ap_s.limit = (Addr)0; + buffer->mode |= BufferModeFLIPPED; + } +} + + +/* BufferScanLimit -- return limit of data to which to scan + * + * Returns the highest address to which it is safe to scan objects in + * the buffer. When the buffer is not flipped, this is the "init" of + * the AP. When the buffer is flipped, it is the value that "init" had + * at flip time. [Could make BufferScanLimit return the AP "alloc" when + * using ambiguous scanning.] See .ap.async. */ + +Addr BufferScanLimit(Buffer buffer) +{ + if (buffer->mode & BufferModeFLIPPED) { + return buffer->initAtFlip; + } else { + return buffer->ap_s.init; + } +} + + +Seg BufferSeg(Buffer buffer) +{ + AVERT(Buffer, buffer); + return Method(Buffer, buffer, seg)(buffer); +} + + +RankSet BufferRankSet(Buffer buffer) +{ + AVERT(Buffer, buffer); + return Method(Buffer, buffer, rankSet)(buffer); +} + +void BufferSetRankSet(Buffer buffer, RankSet rankset) +{ + AVERT(Buffer, buffer); + AVERT(RankSet, rankset); + Method(Buffer, buffer, setRankSet)(buffer, rankset); +} + + +/* BufferReassignSeg -- adjust the seg of an attached buffer + * + * Used for segment splitting and merging. */ + +void BufferReassignSeg(Buffer buffer, Seg seg) +{ + AVERT(Buffer, buffer); + AVERT(Seg, seg); + AVER(!BufferIsReset(buffer)); + AVER(BufferBase(buffer) >= SegBase(seg)); + AVER(BufferLimit(buffer) <= SegLimit(seg)); + AVER(BufferPool(buffer) == SegPool(seg)); + Method(Buffer, buffer, reassignSeg)(buffer, seg); +} + + +/* BufferIsTrapped + * + * Indicates whether the buffer is trapped - either by MPS or the + * mutator. See .ap.async. */ + +Bool BufferIsTrapped(Buffer buffer) +{ + /* Can't check buffer, see .check.use-trapped */ + return (buffer->mode & (BufferModeFLIPPED|BufferModeLOGGED)) != 0; +} + + +/* Alloc pattern functions + * + * Just represent the two patterns by two different pointers to dummies. */ + +static AllocPatternStruct AllocPatternRampStruct = {'\0'}; + +AllocPattern AllocPatternRamp(void) +{ + return &AllocPatternRampStruct; +} + +static AllocPatternStruct AllocPatternRampCollectAllStruct = {'\0'}; + +AllocPattern AllocPatternRampCollectAll(void) +{ + return &AllocPatternRampCollectAllStruct; +} + +ATTRIBUTE_UNUSED +static Bool AllocPatternCheck(AllocPattern pattern) +{ + CHECKL(pattern == &AllocPatternRampCollectAllStruct + || pattern == &AllocPatternRampStruct); + UNUSED(pattern); /* */ + return TRUE; +} + + +/* BufferRampBegin -- note an entry into a ramp pattern + * + * .ramp.hack: We count the number of times the ap has begun ramp mode + * (and not ended), so we can do reset by ending all the current ramps. */ + +void BufferRampBegin(Buffer buffer, AllocPattern pattern) +{ + Pool pool; + + AVERT(Buffer, buffer); + AVERT(AllocPattern, pattern); + + ++buffer->rampCount; + AVER(buffer->rampCount > 0); + + pool = BufferPool(buffer); + AVERT(Pool, pool); + Method(Pool, pool, rampBegin)(pool, buffer, + pattern == &AllocPatternRampCollectAllStruct); +} + + +/* BufferRampEnd -- note an exit from a ramp pattern */ + +Res BufferRampEnd(Buffer buffer) +{ + Pool pool; + + AVERT(Buffer, buffer); + + if (buffer->rampCount == 0) + return ResFAIL; + --buffer->rampCount; + + pool = BufferPool(buffer); + AVERT(Pool, pool); + Method(Pool, pool, rampEnd)(pool, buffer); + return ResOK; +} + + +/* BufferRampReset -- exit from ramp mode */ + +void BufferRampReset(Buffer buffer) +{ + Pool pool; + + AVERT(Buffer, buffer); + + if (buffer->rampCount == 0) + return; + + pool = BufferPool(buffer); + AVERT(Pool, pool); + do + Method(Pool, pool, rampEnd)(pool, buffer); + while(--buffer->rampCount > 0); +} + + + +/* BufferClass -- support for the basic Buffer class */ + + +/* bufferTrivAttach -- basic buffer attach method */ + +static void bufferTrivAttach(Buffer buffer, Addr base, Addr limit, + Addr init, Size size) +{ + /* No special attach method for simple buffers */ + AVERT(Buffer, buffer); + /* Other parameters are consistency checked in BufferAttach */ + UNUSED(base); + UNUSED(limit); + UNUSED(init); + UNUSED(size); + NOOP; +} + + +/* bufferTrivDetach -- basic buffer detach method */ + +static void bufferTrivDetach(Buffer buffer) +{ + /* No special detach method for simple buffers */ + AVERT(Buffer, buffer); + NOOP; +} + + +/* bufferNoSeg -- basic buffer BufferSeg accessor method + * + * .noseg: basic buffers don't support segments, so this method should + * not be called. */ + +static Seg bufferNoSeg(Buffer buffer) +{ + AVERT(Buffer, buffer); + NOTREACHED; /* .noseg */ + return NULL; +} + + + +/* bufferTrivRankSet -- basic BufferRankSet accessor method */ + +static RankSet bufferTrivRankSet(Buffer buffer) +{ + AVERT(Buffer, buffer); + /* vanilla buffers can only have empty rank set */ + return RankSetEMPTY; +} + + +/* bufferNoSetRankSet -- basic BufferSetRankSet setter method + * + * .norank: basic buffers don't support ranksets, so this method should + * not be called. */ + +static void bufferNoSetRankSet(Buffer buffer, RankSet rankset) +{ + AVERT(Buffer, buffer); + AVERT(RankSet, rankset); + NOTREACHED; /* .norank */ +} + + +/* bufferNoReassignSeg -- basic BufferReassignSeg method + * + * .noseg: basic buffers don't support attachment to segments, so this + * method should not be called. */ + +static void bufferNoReassignSeg(Buffer buffer, Seg seg) +{ + AVERT(Buffer, buffer); + AVERT(Seg, seg); + NOTREACHED; /* .noseg */ +} + + +/* BufferClassCheck -- check the consistency of a BufferClass */ + +Bool BufferClassCheck(BufferClass klass) +{ + CHECKD(InstClass, &klass->instClassStruct); + CHECKL(klass->size >= sizeof(BufferStruct)); + CHECKL(FUNCHECK(klass->varargs)); + CHECKL(FUNCHECK(klass->init)); + CHECKL(FUNCHECK(klass->attach)); + CHECKL(FUNCHECK(klass->detach)); + CHECKL(FUNCHECK(klass->seg)); + CHECKL(FUNCHECK(klass->rankSet)); + CHECKL(FUNCHECK(klass->setRankSet)); + CHECKL(FUNCHECK(klass->reassignSeg)); + + /* Check that buffer classes override sets of related methods. */ + CHECKL((klass->init == BufferAbsInit) + == (klass->instClassStruct.finish == BufferAbsFinish)); + CHECKL((klass->attach == bufferTrivAttach) + == (klass->detach == bufferTrivDetach)); + + CHECKS(BufferClass, klass); + return TRUE; +} + + +/* BufferClass -- the vanilla buffer class definition + * + * . */ + +DEFINE_CLASS(Inst, BufferClass, klass) +{ + INHERIT_CLASS(klass, BufferClass, InstClass); + AVERT(InstClass, klass); +} + +DEFINE_CLASS(Buffer, Buffer, klass) +{ + INHERIT_CLASS(&klass->instClassStruct, Buffer, Inst); + klass->instClassStruct.finish = BufferAbsFinish; + klass->instClassStruct.describe = BufferAbsDescribe; + klass->size = sizeof(BufferStruct); + klass->varargs = ArgTrivVarargs; + klass->init = BufferAbsInit; + klass->attach = bufferTrivAttach; + klass->detach = bufferTrivDetach; + klass->seg = bufferNoSeg; + klass->rankSet = bufferTrivRankSet; + klass->setRankSet = bufferNoSetRankSet; + klass->reassignSeg = bufferNoReassignSeg; + klass->sig = BufferClassSig; + AVERT(BufferClass, klass); +} + + + +/* SegBufClass -- support for the SegBuf subclass */ + + +/* SegBufCheck -- check consistency of a SegBuf */ + +Bool SegBufCheck(SegBuf segbuf) +{ + Buffer buffer; + CHECKS(SegBuf, segbuf); + buffer = MustBeA(Buffer, segbuf); + CHECKD(Buffer, buffer); + CHECKL(RankSetCheck(segbuf->rankSet)); + + if (buffer->mode & BufferModeTRANSITION) { + /* nothing to check */ + } else if ((buffer->mode & BufferModeATTACHED) == 0) { + CHECKL(segbuf->seg == NULL); + } else { + /* The buffer is attached to a segment. */ + CHECKL(segbuf->seg != NULL); + CHECKD(Seg, segbuf->seg); + /* To avoid recursive checking, leave it to SegCheck to make */ + /* sure the buffer and segment fields tally. */ + + if (buffer->mode & BufferModeFLIPPED) { + /* Only buffers that allocate pointers get flipped. */ + CHECKL(segbuf->rankSet != RankSetEMPTY); + } + } + + return TRUE; +} + + +/* segBufInit -- SegBuf init method */ + +static Res segBufInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) +{ + SegBuf segbuf; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Buffer, SegBuf, init)(buffer, pool, isMutator, args); + if (res != ResOK) + return res; + segbuf = CouldBeA(SegBuf, buffer); + + segbuf->seg = NULL; + segbuf->rankSet = RankSetEMPTY; + + SetClassOfPoly(buffer, CLASS(SegBuf)); + segbuf->sig = SegBufSig; + AVERC(SegBuf, segbuf); + + EVENT3(BufferInitSeg, buffer, pool, BOOLOF(buffer->isMutator)); + return ResOK; +} + + +/* segBufFinish -- SegBuf finish method */ + +static void segBufFinish(Inst inst) +{ + Buffer buffer = MustBeA(Buffer, inst); + SegBuf segbuf = MustBeA(SegBuf, buffer); + AVER(BufferIsReset(buffer)); + segbuf->sig = SigInvalid; + NextMethod(Inst, SegBuf, finish)(inst); +} + + +/* segBufAttach -- SegBuf attach method */ + +static void segBufAttach(Buffer buffer, Addr base, Addr limit, + Addr init, Size size) +{ + SegBuf segbuf = MustBeA(SegBuf, buffer); + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Arena arena; + Bool found; + + /* Other parameters are consistency checked in BufferAttach */ + UNUSED(init); + UNUSED(size); + + arena = BufferArena(buffer); + found = SegOfAddr(&seg, arena, base); + AVER(found); + AVER(segbuf->seg == NULL); + AVER(!SegHasBuffer(seg)); + AVER(SegBase(seg) <= base); + AVER(limit <= SegLimit(seg)); + + /* attach the buffer to the segment */ + SegSetBuffer(seg, buffer); + segbuf->seg = seg; + + AVERT(SegBuf, segbuf); +} + + +/* segBufDetach -- SegBuf detach method */ + +static void segBufDetach(Buffer buffer) +{ + SegBuf segbuf = MustBeA(SegBuf, buffer); + Seg seg = segbuf->seg; + SegUnsetBuffer(seg); + segbuf->seg = NULL; +} + + +/* segBufSeg -- BufferSeg accessor method for SegBuf instances */ + +static Seg segBufSeg(Buffer buffer) +{ + SegBuf segbuf = MustBeA(SegBuf, buffer); + return segbuf->seg; +} + + +/* segBufRankSet -- BufferRankSet accessor for SegBuf instances */ + +static RankSet segBufRankSet(Buffer buffer) +{ + SegBuf segbuf = MustBeA(SegBuf, buffer); + return segbuf->rankSet; +} + + +/* segBufSetRankSet -- BufferSetRankSet setter method for SegBuf */ + +static void segBufSetRankSet(Buffer buffer, RankSet rankset) +{ + SegBuf segbuf = MustBeA(SegBuf, buffer); + AVERT(RankSet, rankset); + segbuf->rankSet = rankset; +} + + +/* segBufReassignSeg -- BufferReassignSeg method for SegBuf + * + * Used to support segment merging and splitting. + * + * .invseg: On entry the buffer is attached to an invalid segment, which + * can't be checked. The method is called to make the attachment valid. */ + +static void segBufReassignSeg(Buffer buffer, Seg seg) +{ + SegBuf segbuf = CouldBeA(SegBuf, buffer); + AVERT(Seg, seg); + /* Can't check segbuf on entry. See .invseg */ + AVER(NULL != segbuf->seg); + AVER(seg != segbuf->seg); + segbuf->seg = seg; + AVERT(SegBuf, segbuf); +} + + +/* segBufDescribe -- describe method for SegBuf */ + +static Res segBufDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Buffer buffer = CouldBeA(Buffer, inst); + SegBuf segbuf = CouldBeA(SegBuf, buffer); + Res res; + + if (!TESTC(SegBuf, segbuf)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, SegBuf, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + return WriteF(stream, depth + 2, + "Seg $P\n", (WriteFP)segbuf->seg, + "rankSet $U\n", (WriteFU)segbuf->rankSet, + NULL); +} + + +/* SegBufClass -- SegBuf class definition + * + * Supports an association with a single segment when attached. See + * . */ + +DEFINE_CLASS(Buffer, SegBuf, klass) +{ + INHERIT_CLASS(klass, SegBuf, Buffer); + klass->instClassStruct.finish = segBufFinish; + klass->instClassStruct.describe = segBufDescribe; + klass->size = sizeof(SegBufStruct); + klass->init = segBufInit; + klass->attach = segBufAttach; + klass->detach = segBufDetach; + klass->seg = segBufSeg; + klass->rankSet = segBufRankSet; + klass->setRankSet = segBufSetRankSet; + klass->reassignSeg = segBufReassignSeg; + AVERT(BufferClass, klass); +} + + +/* RankBufClass -- support for the RankBufClass subclass */ + + +/* rankBufVarargs -- parse obsolete varargs into keywords */ + +static void rankBufVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_RANK; + args[0].val.rank = va_arg(varargs, Rank); + args[1].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + +/* rankBufInit -- RankBufClass init method */ + +static Res rankBufInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) +{ + Rank rank = BUFFER_RANK_DEFAULT; + Res res; + ArgStruct arg; + + AVERT(ArgList, args); + if (ArgPick(&arg, args, MPS_KEY_RANK)) + rank = arg.val.rank; + AVERT(Rank, rank); + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Buffer, RankBuf, init)(buffer, pool, isMutator, args); + if (res != ResOK) + return res; + + BufferSetRankSet(buffer, RankSetSingle(rank)); + + SetClassOfPoly(buffer, CLASS(RankBuf)); + AVERC(RankBuf, buffer); + + EVENT4(BufferInitRank, buffer, pool, BOOLOF(buffer->isMutator), rank); + + return ResOK; +} + + +/* RankBufClass -- RankBufClass class definition + * + * A subclass of SegBufClass, sharing structure for instances. + * + * Supports initialization to a rank supplied at creation time. */ + +DEFINE_CLASS(Buffer, RankBuf, klass) +{ + INHERIT_CLASS(klass, RankBuf, SegBuf); + klass->varargs = rankBufVarargs; + klass->init = rankBufInit; + AVERT(BufferClass, klass); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/cbs.c b/mps/code/cbs.c new file mode 100644 index 00000000000..ac936ad1347 --- /dev/null +++ b/mps/code/cbs.c @@ -0,0 +1,1248 @@ +/* cbs.c: COALESCING BLOCK STRUCTURE IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .intro: This is a portable implementation of coalescing block + * structures. + * + * .purpose: CBSs are used to manage potentially unbounded collections + * of memory blocks. + * + * .sources: . + * + * .critical: In manual-allocation-bound programs using MVFF, many of + * these functions are on the critical paths via mps_alloc (and then + * PoolAlloc, MVFFAlloc, failoverFind*, cbsFind*) and mps_free (and + * then MVFFFree, failoverInsert, cbsInsert). + */ + +#include "cbs.h" +#include "rangetree.h" +#include "range.h" +#include "splay.h" +#include "meter.h" +#include "poolmfs.h" +#include "mpm.h" + +SRCID(cbs, "$Id$"); + + +#define cbsSplay(cbs) (&((cbs)->splayTreeStruct)) +#define cbsOfSplay(splay) PARENT(CBSStruct, splayTreeStruct, splay) +#define cbsFastBlockOfTree(tree) \ + PARENT(CBSFastBlockStruct, rangeTreeStruct, RangeTreeOfTree(tree)) +#define cbsFastBlockNode(block) (&(block)->rangeTreeStruct) +#define cbsZonedBlockOfTree(tree) \ + PARENT(CBSZonedBlockStruct, cbsFastBlockStruct, cbsFastBlockOfTree(tree)) +#define cbsZonedBlockNode(block) cbsFastBlockNode(&(block)->cbsFastBlockStruct) +#define cbsBlockPool(cbs) RVALUE((cbs)->blockPool) + + +/* CBSCheck -- Check CBS */ + +Bool CBSCheck(CBS cbs) +{ + /* See .enter-leave.simple. */ + Land land; + CHECKS(CBS, cbs); + land = CBSLand(cbs); + CHECKD(Land, land); + CHECKD(SplayTree, cbsSplay(cbs)); + CHECKD(Pool, cbs->blockPool); + CHECKL(cbs->blockStructSize > 0); + CHECKL(BoolCheck(cbs->ownPool)); + CHECKL(SizeIsAligned(cbs->size, LandAlignment(land))); + STATISTIC(CHECKL((cbs->size == 0) == (cbs->treeSize == 0))); + + return TRUE; +} + + +/* cbsTestNode, cbsTestTree -- test for nodes larger than the S parameter */ + +static Bool cbsTestNode(SplayTree splay, Tree tree, void *closure) +{ + RangeTree block; + Size *sizeP = closure; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(sizeP != NULL); + AVER_CRITICAL(*sizeP > 0); + AVER_CRITICAL(IsA(CBSFast, cbsOfSplay(splay))); + + block = RangeTreeOfTree(tree); + + return RangeTreeSize(block) >= *sizeP; +} + +static Bool cbsTestTree(SplayTree splay, Tree tree, + void *closure) +{ + CBSFastBlock block; + Size *sizeP = closure; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(sizeP != NULL); + AVER_CRITICAL(*sizeP > 0); + AVER_CRITICAL(IsA(CBSFast, cbsOfSplay(splay))); + + block = cbsFastBlockOfTree(tree); + + return block->maxSize >= *sizeP; +} + + +/* cbsUpdateFastNode -- update size info after restructuring */ + +static void cbsUpdateFastNode(SplayTree splay, Tree tree) +{ + Size maxSize; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(IsA(CBSFast, cbsOfSplay(splay))); + + maxSize = RangeTreeSize(RangeTreeOfTree(tree)); + + if (TreeHasLeft(tree)) { + Size size = cbsFastBlockOfTree(TreeLeft(tree))->maxSize; + if (size > maxSize) + maxSize = size; + } + + if (TreeHasRight(tree)) { + Size size = cbsFastBlockOfTree(TreeRight(tree))->maxSize; + if (size > maxSize) + maxSize = size; + } + + cbsFastBlockOfTree(tree)->maxSize = maxSize; +} + + +/* cbsUpdateZonedNode -- update size and zone info after restructuring */ + +static void cbsUpdateZonedNode(SplayTree splay, Tree tree) +{ + ZoneSet zones; + CBSZonedBlock zonedBlock; + RangeTree block; + Arena arena; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(IsA(CBSZoned, cbsOfSplay(splay))); + + cbsUpdateFastNode(splay, tree); + + zonedBlock = cbsZonedBlockOfTree(tree); + block = cbsZonedBlockNode(zonedBlock); + arena = LandArena(CBSLand(cbsOfSplay(splay))); + zones = ZoneSetOfRange(arena, RangeTreeBase(block), RangeTreeLimit(block)); + + if (TreeHasLeft(tree)) + zones = ZoneSetUnion(zones, cbsZonedBlockOfTree(TreeLeft(tree))->zones); + + if (TreeHasRight(tree)) + zones = ZoneSetUnion(zones, cbsZonedBlockOfTree(TreeRight(tree))->zones); + + zonedBlock->zones = zones; +} + + +/* cbsInit -- Initialise a CBS structure + * + * . + */ + +ARG_DEFINE_KEY(cbs_block_pool, Pool); + +static Res cbsInitComm(Land land, LandClass klass, + Arena arena, Align alignment, + ArgList args, SplayUpdateNodeFunction update, + Size blockStructSize) +{ + CBS cbs; + ArgStruct arg; + Res res; + Pool blockPool = NULL; + + AVER(land != NULL); + res = NextMethod(Land, CBS, init)(land, arena, alignment, args); + if (res != ResOK) + return res; + cbs = CouldBeA(CBS, land); + + if (ArgPick(&arg, args, CBSBlockPool)) + blockPool = arg.val.pool; + + SplayTreeInit(cbsSplay(cbs), RangeTreeCompare, RangeTreeKey, update); + + if (blockPool != NULL) { + cbs->blockPool = blockPool; + cbs->ownPool = FALSE; + } else { + MPS_ARGS_BEGIN(pcArgs) { + MPS_ARGS_ADD(pcArgs, MPS_KEY_MFS_UNIT_SIZE, blockStructSize); + res = PoolCreate(&cbs->blockPool, LandArena(land), PoolClassMFS(), pcArgs); + } MPS_ARGS_END(pcArgs); + if (res != ResOK) + return res; + cbs->ownPool = TRUE; + } + STATISTIC(cbs->treeSize = 0); + cbs->size = 0; + + cbs->blockStructSize = blockStructSize; + + METER_INIT(cbs->treeSearch, "size of tree", (void *)cbs); + + SetClassOfPoly(land, klass); + cbs->sig = CBSSig; + AVERC(CBS, cbs); + + return ResOK; +} + +static Res cbsInit(Land land, Arena arena, Align alignment, ArgList args) +{ + return cbsInitComm(land, CLASS(CBS), arena, alignment, + args, SplayTrivUpdate, + sizeof(RangeTreeStruct)); +} + +static Res cbsInitFast(Land land, Arena arena, Align alignment, ArgList args) +{ + return cbsInitComm(land, CLASS(CBSFast), arena, alignment, + args, cbsUpdateFastNode, + sizeof(CBSFastBlockStruct)); +} + +static Res cbsInitZoned(Land land, Arena arena, Align alignment, ArgList args) +{ + return cbsInitComm(land, CLASS(CBSZoned), arena, alignment, + args, cbsUpdateZonedNode, + sizeof(CBSZonedBlockStruct)); +} + + +/* cbsFinish -- Finish a CBS structure + * + * . + */ + +static void cbsFinish(Inst inst) +{ + Land land = MustBeA(Land, inst); + CBS cbs = MustBeA(CBS, land); + + METER_EMIT(&cbs->treeSearch); + + cbs->sig = SigInvalid; + + SplayTreeFinish(cbsSplay(cbs)); + if (cbs->ownPool) + PoolDestroy(cbsBlockPool(cbs)); + + NextMethod(Inst, CBS, finish)(inst); +} + + +/* cbsSize -- total size of ranges in CBS + * + * . + */ + +static Size cbsSize(Land land) +{ + CBS cbs = MustBeA_CRITICAL(CBS, land); + return cbs->size; +} + +/* cbsBlockDestroy -- destroy a block */ + +static void cbsBlockDestroy(CBS cbs, RangeTree block) +{ + Size size; + + AVERT(CBS, cbs); + AVERT(RangeTree, block); + size = RangeTreeSize(block); + + STATISTIC(--cbs->treeSize); + AVER(cbs->size >= size); + cbs->size -= size; + + RangeTreeFinish(block); + PoolFree(cbsBlockPool(cbs), (Addr)block, cbs->blockStructSize); +} + + +/* RangeTree change operators + * + * These four functions are called whenever blocks are created, + * destroyed, grow, or shrink. They maintain the maxSize if fastFind is + * enabled. + */ + +static void cbsBlockDelete(CBS cbs, RangeTree block) +{ + Bool b; + + AVERT(CBS, cbs); + AVERT(RangeTree, block); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + b = SplayTreeDelete(cbsSplay(cbs), RangeTreeTree(block)); + AVER(b); /* expect block to be in the tree */ + cbsBlockDestroy(cbs, block); +} + +static void cbsBlockShrunk(CBS cbs, RangeTree block, Size oldSize) +{ + Size newSize; + + AVERT(CBS, cbs); + AVERT(RangeTree, block); + + newSize = RangeTreeSize(block); + AVER(oldSize > newSize); + AVER(cbs->size >= oldSize - newSize); + + SplayNodeRefresh(cbsSplay(cbs), RangeTreeTree(block)); + cbs->size -= oldSize - newSize; +} + +static void cbsBlockGrew(CBS cbs, RangeTree block, Size oldSize) +{ + Size newSize; + + AVERT(CBS, cbs); + AVERT(RangeTree, block); + + newSize = RangeTreeSize(block); + AVER(oldSize < newSize); + + SplayNodeRefresh(cbsSplay(cbs), RangeTreeTree(block)); + cbs->size += newSize - oldSize; +} + +/* cbsBlockAlloc -- allocate a new block and set its base and limit, + but do not insert it into the tree yet */ + +static Res cbsBlockAlloc(RangeTree *blockReturn, CBS cbs, Range range) +{ + Res res; + RangeTree block; + Addr p; + + AVER(blockReturn != NULL); + AVERT(CBS, cbs); + AVERT(Range, range); + + res = PoolAlloc(&p, cbsBlockPool(cbs), cbs->blockStructSize); + if (res != ResOK) + goto failPoolAlloc; + block = (RangeTree)p; + + RangeTreeInit(block, range); + + SplayNodeInit(cbsSplay(cbs), RangeTreeTree(block)); + + AVERT(RangeTree, block); + *blockReturn = block; + return ResOK; + +failPoolAlloc: + AVER(res != ResOK); + return res; +} + +/* cbsBlockInsert -- insert a block into the tree */ + +static void cbsBlockInsert(CBS cbs, RangeTree block) +{ + Bool b; + + AVERT_CRITICAL(CBS, cbs); + AVERT_CRITICAL(RangeTree, block); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + b = SplayTreeInsert(cbsSplay(cbs), RangeTreeTree(block)); + AVER_CRITICAL(b); + STATISTIC(++cbs->treeSize); + cbs->size += RangeTreeSize(block); +} + + +/* cbsInsert -- Insert a range into the CBS + * + * . + * + * .insert.alloc: Will only allocate a block if the range does not + * abut an existing range. + */ + +static Res cbsInsert(Range rangeReturn, Land land, Range range) +{ + CBS cbs = MustBeA_CRITICAL(CBS, land); + Bool b; + Res res; + Addr base, limit, newBase, newLimit; + Tree leftSplay, rightSplay; + RangeTree leftBlock, rightBlock; + Bool leftMerge, rightMerge; + Size oldSize; + + AVER_CRITICAL(rangeReturn != NULL); + AVERT_CRITICAL(Range, range); + AVER_CRITICAL(!RangeIsEmpty(range)); + AVER_CRITICAL(RangeIsAligned(range, LandAlignment(land))); + + base = RangeBase(range); + limit = RangeLimit(range); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + b = SplayTreeNeighbours(&leftSplay, &rightSplay, cbsSplay(cbs), + RangeTreeKeyOfBaseVar(base)); + if (!b) { + res = ResFAIL; + goto fail; + } + + /* .insert.overlap: The two cases below are not quite symmetrical, + because base was passed into the call to SplayTreeNeighbours, but + limit was not. So we know that if there is a left neighbour, then + leftBlock's limit <= base (this is ensured by RangeTreeCompare, + which is the comparison method on the tree). But if there is a + right neighbour, all we know is that base < rightBlock's base. But + for the range to fit, we need limit <= rightBlock's base too. Hence + the extra check and the possibility of failure in the second + case. */ + + if (leftSplay == TreeEMPTY) { + leftBlock = NULL; + leftMerge = FALSE; + } else { + leftBlock = RangeTreeOfTree(leftSplay); + AVER_CRITICAL(RangeTreeLimit(leftBlock) <= base); + leftMerge = RangeTreeLimit(leftBlock) == base; + } + + if (rightSplay == TreeEMPTY) { + rightBlock = NULL; + rightMerge = FALSE; + } else { + rightBlock = RangeTreeOfTree(rightSplay); + if (rightBlock != NULL && limit > RangeTreeBase(rightBlock)) { + /* .insert.overlap */ + res = ResFAIL; + goto fail; + } + rightMerge = RangeTreeBase(rightBlock) == limit; + } + + newBase = leftMerge ? RangeTreeBase(leftBlock) : base; + newLimit = rightMerge ? RangeTreeLimit(rightBlock) : limit; + + if (leftMerge && rightMerge) { + Size oldLeftSize = RangeTreeSize(leftBlock); + Addr rightLimit = RangeTreeLimit(rightBlock); + cbsBlockDelete(cbs, rightBlock); + RangeTreeSetLimit(leftBlock, rightLimit); + cbsBlockGrew(cbs, leftBlock, oldLeftSize); + + } else if (leftMerge) { + oldSize = RangeTreeSize(leftBlock); + RangeTreeSetLimit(leftBlock, limit); + cbsBlockGrew(cbs, leftBlock, oldSize); + + } else if (rightMerge) { + oldSize = RangeTreeSize(rightBlock); + RangeTreeSetBase(rightBlock, base); + cbsBlockGrew(cbs, rightBlock, oldSize); + + } else { + RangeTree block; + res = cbsBlockAlloc(&block, cbs, range); + if (res != ResOK) + goto fail; + cbsBlockInsert(cbs, block); + } + + AVER_CRITICAL(newBase <= base); + AVER_CRITICAL(newLimit >= limit); + RangeInit(rangeReturn, newBase, newLimit); + + return ResOK; + +fail: + AVER_CRITICAL(res != ResOK); + return res; +} + + +/* cbsExtendBlockPool -- extend block pool with memory */ + +static void cbsExtendBlockPool(CBS cbs, Addr base, Addr limit) +{ + Tract tract; + Addr addr; + + AVERC(CBS, cbs); + AVER(base < limit); + + /* Steal tracts from their owning pool */ + TRACT_FOR(tract, addr, CBSLand(cbs)->arena, base, limit) { + TractFinish(tract); + TractInit(tract, cbs->blockPool, addr); + } + + /* Extend the block pool with the stolen memory. */ + MFSExtend(cbs->blockPool, base, limit); +} + + +/* cbsInsertSteal -- Insert a range into the CBS, possibly stealing + * memory for the block pool + */ + +static Res cbsInsertSteal(Range rangeReturn, Land land, Range rangeIO) +{ + CBS cbs = MustBeA(CBS, land); + Arena arena = land->arena; + Size grainSize = ArenaGrainSize(arena); + Res res; + + AVER(rangeReturn != NULL); + AVER(rangeReturn != rangeIO); + AVERT(Range, rangeIO); + AVER(!RangeIsEmpty(rangeIO)); + AVER(RangeIsAligned(rangeIO, LandAlignment(land))); + AVER(AlignIsAligned(LandAlignment(land), grainSize)); + + res = cbsInsert(rangeReturn, land, rangeIO); + if (res != ResOK && res != ResFAIL) { + /* Steal an arena grain and use it to extend the block pool. */ + Addr stolenBase = RangeBase(rangeIO); + Addr stolenLimit = AddrAdd(stolenBase, grainSize); + cbsExtendBlockPool(cbs, stolenBase, stolenLimit); + + /* Update the inserted range and try again. */ + RangeSetBase(rangeIO, stolenLimit); + AVERT(Range, rangeIO); + if (RangeIsEmpty(rangeIO)) { + RangeCopy(rangeReturn, rangeIO); + res = ResOK; + } else { + res = cbsInsert(rangeReturn, land, rangeIO); + AVER(res == ResOK); /* since we just extended the block pool */ + } + } + return res; +} + + +/* cbsDelete -- Remove a range from a CBS + * + * . + * + * .delete.alloc: Will only allocate a block if the range splits + * an existing range. + */ + +static Res cbsDelete(Range rangeReturn, Land land, Range range) +{ + CBS cbs = MustBeA(CBS, land); + Res res; + RangeTree block; + Tree tree; + Addr base, limit, oldBase, oldLimit; + Size oldSize; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + AVER(!RangeIsEmpty(range)); + AVER(RangeIsAligned(range, LandAlignment(land))); + + base = RangeBase(range); + limit = RangeLimit(range); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + if (!SplayTreeFind(&tree, cbsSplay(cbs), RangeTreeKeyOfBaseVar(base))) { + res = ResFAIL; + goto failSplayTreeSearch; + } + block = RangeTreeOfTree(tree); + + if (limit > RangeTreeLimit(block)) { + res = ResFAIL; + goto failLimitCheck; + } + + oldBase = RangeTreeBase(block); + oldLimit = RangeTreeLimit(block); + oldSize = RangeTreeSize(block); + RangeInit(rangeReturn, oldBase, oldLimit); + + if (base == oldBase && limit == oldLimit) { + /* entire block */ + cbsBlockDelete(cbs, block); + + } else if (base == oldBase) { + /* remaining fragment at right */ + AVER(limit < oldLimit); + RangeTreeSetBase(block, limit); + cbsBlockShrunk(cbs, block, oldSize); + + } else if (limit == oldLimit) { + /* remaining fragment at left */ + AVER(base > oldBase); + RangeTreeSetLimit(block, base); + cbsBlockShrunk(cbs, block, oldSize); + + } else { + /* two remaining fragments. shrink block to represent fragment at + left, and create new block for fragment at right. */ + RangeStruct newRange; + RangeTree newBlock; + AVER(base > oldBase); + AVER(limit < oldLimit); + RangeInit(&newRange, limit, oldLimit); + res = cbsBlockAlloc(&newBlock, cbs, &newRange); + if (res != ResOK) { + goto failAlloc; + } + RangeTreeSetLimit(block, base); + cbsBlockShrunk(cbs, block, oldSize); + cbsBlockInsert(cbs, newBlock); + } + + return ResOK; + +failAlloc: +failLimitCheck: +failSplayTreeSearch: + AVER(res != ResOK); + return res; +} + + +static Res cbsDeleteSteal(Range rangeReturn, Land land, Range range) +{ + CBS cbs = MustBeA(CBS, land); + Arena arena = land->arena; + Size grainSize = ArenaGrainSize(arena); + RangeStruct containingRange; + Res res; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + AVER(!RangeIsEmpty(range)); + AVER(RangeIsAligned(range, LandAlignment(land))); + AVER(AlignIsAligned(LandAlignment(land), grainSize)); + + res = cbsDelete(&containingRange, land, range); + if (res == ResOK) { + RangeCopy(rangeReturn, &containingRange); + } else if (res != ResFAIL) { + /* Steal an arena grain from the base of the containing range and + use it to extend the block pool. */ + Addr stolenBase = RangeBase(&containingRange); + Addr stolenLimit = AddrAdd(stolenBase, grainSize); + RangeStruct stolenRange; + AVER(stolenLimit <= RangeBase(range)); + RangeInit(&stolenRange, stolenBase, stolenLimit); + res = cbsDelete(&containingRange, land, &stolenRange); + AVER(res == ResOK); /* since this does not split any range */ + cbsExtendBlockPool(cbs, stolenBase, stolenLimit); + + /* Try again with original range. */ + res = cbsDelete(rangeReturn, land, range); + AVER(res == ResOK); /* since we just extended the block pool */ + } + return res; +} + + +static Res cbsBlockDescribe(RangeTree block, mps_lib_FILE *stream) +{ + Res res; + + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, 0, + "[$P,$P)", + (WriteFP)RangeTreeBase(block), + (WriteFP)RangeTreeLimit(block), + NULL); + return res; +} + +static Res cbsSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) +{ + Res res; + + if (tree == TreeEMPTY) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = cbsBlockDescribe(RangeTreeOfTree(tree), stream); + return res; +} + +static Res cbsFastBlockDescribe(CBSFastBlock block, mps_lib_FILE *stream) +{ + Res res; + + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, 0, + "[$P,$P) {$U}", + (WriteFP)RangeTreeBase(cbsFastBlockNode(block)), + (WriteFP)RangeTreeLimit(cbsFastBlockNode(block)), + (WriteFU)block->maxSize, + NULL); + return res; +} + +static Res cbsFastSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) +{ + Res res; + + if (tree == TreeEMPTY) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = cbsFastBlockDescribe(cbsFastBlockOfTree(tree), stream); + return res; +} + +static Res cbsZonedBlockDescribe(CBSZonedBlock block, mps_lib_FILE *stream) +{ + Res res; + + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, 0, + "[$P,$P) {$U, $B}", + (WriteFP)RangeTreeBase(cbsZonedBlockNode(block)), + (WriteFP)RangeTreeLimit(cbsZonedBlockNode(block)), + (WriteFU)block->cbsFastBlockStruct.maxSize, + (WriteFB)block->zones, + NULL); + return res; +} + +static Res cbsZonedSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) +{ + Res res; + + if (tree == TreeEMPTY) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = cbsZonedBlockDescribe(cbsZonedBlockOfTree(tree), stream); + return res; +} + + +/* cbsIterate -- iterate over all blocks in CBS + * + * . + */ + +typedef struct CBSIterateClosure { + Land land; + LandVisitor visitor; + void *visitorClosure; +} CBSIterateClosure; + +static Bool cbsIterateVisit(Tree tree, void *closure) +{ + CBSIterateClosure *my = closure; + Land land = my->land; + RangeTree block = RangeTreeOfTree(tree); + RangeStruct range; + RangeInit(&range, RangeTreeBase(block), RangeTreeLimit(block)); + return my->visitor(land, &range, my->visitorClosure); +} + +static Bool cbsIterate(Land land, LandVisitor visitor, void *visitorClosure) +{ + CBS cbs = MustBeA(CBS, land); + SplayTree splay; + CBSIterateClosure iterateClosure; + + AVER(FUNCHECK(visitor)); + + splay = cbsSplay(cbs); + /* .splay-iterate.slow: We assume that splay tree iteration does */ + /* searches and meter it. */ + METER_ACC(cbs->treeSearch, cbs->treeSize); + + iterateClosure.land = land; + iterateClosure.visitor = visitor; + iterateClosure.visitorClosure = visitorClosure; + return TreeTraverse(SplayTreeRoot(splay), splay->compare, splay->nodeKey, + cbsIterateVisit, &iterateClosure); +} + + +/* cbsIterateAndDelete -- iterate over all blocks in CBS + * + * . + */ + +typedef struct CBSIterateAndDeleteClosure { + Land land; + LandDeleteVisitor visitor; + Bool cont; + void *visitorClosure; +} CBSIterateAndDeleteClosure; + +static Bool cbsIterateAndDeleteVisit(Tree tree, void *closure) +{ + CBSIterateAndDeleteClosure *my = closure; + Land land = my->land; + CBS cbs = MustBeA(CBS, land); + RangeTree block = RangeTreeOfTree(tree); + Bool deleteNode = FALSE; + RangeStruct range; + + RangeInit(&range, RangeTreeBase(block), RangeTreeLimit(block)); + if (my->cont) + my->cont = my->visitor(&deleteNode, land, &range, + my->visitorClosure); + if (deleteNode) + cbsBlockDestroy(cbs, block); + return deleteNode; +} + +static Bool cbsIterateAndDelete(Land land, LandDeleteVisitor visitor, + void *visitorClosure) +{ + CBS cbs = MustBeA(CBS, land); + SplayTree splay; + CBSIterateAndDeleteClosure iterateClosure; + + AVER(FUNCHECK(visitor)); + + splay = cbsSplay(cbs); + /* .splay-iterate.slow: We assume that splay tree iteration does */ + /* searches and meter it. */ + METER_ACC(cbs->treeSearch, cbs->treeSize); + + iterateClosure.land = land; + iterateClosure.visitor = visitor; + iterateClosure.visitorClosure = visitorClosure; + iterateClosure.cont = TRUE; + TreeTraverseAndDelete(&splay->root, cbsIterateAndDeleteVisit, + &iterateClosure); + return iterateClosure.cont; +} + + +/* cbsFindDeleteRange -- delete appropriate range of block found */ + +static void cbsFindDeleteRange(Range rangeReturn, Range oldRangeReturn, + Land land, Range range, Size size, + FindDelete findDelete) +{ + Bool callDelete = TRUE; + Addr base, limit; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(RangeIsAligned(range, LandAlignment(land))); + AVER(size > 0); + AVER(SizeIsAligned(size, LandAlignment(land))); + AVER(RangeSize(range) >= size); + AVERT(FindDelete, findDelete); + + base = RangeBase(range); + limit = RangeLimit(range); + + switch(findDelete) { + + case FindDeleteNONE: + callDelete = FALSE; + break; + + case FindDeleteLOW: + limit = AddrAdd(base, size); + break; + + case FindDeleteHIGH: + base = AddrSub(limit, size); + break; + + case FindDeleteENTIRE: + /* do nothing */ + break; + + default: + NOTREACHED; + break; + } + + RangeInit(rangeReturn, base, limit); + + if (callDelete) { + Res res; + res = cbsDelete(oldRangeReturn, land, rangeReturn); + /* Can't have run out of memory, because all our callers pass in + blocks that were just found in the tree, and we only + deleted from one end of the block, so cbsDelete did not + need to allocate a new block. */ + AVER(res == ResOK); + } else { + RangeCopy(oldRangeReturn, rangeReturn); + } +} + + +/* CBSFindFirst -- find the first block of at least the given size */ + +static Bool cbsFindFirst(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + CBS cbs = MustBeA_CRITICAL(CBS, land); + Bool found; + Tree tree; + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVER_CRITICAL(size > 0); + AVER_CRITICAL(SizeIsAligned(size, LandAlignment(land))); + AVERT_CRITICAL(FindDelete, findDelete); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + found = SplayFindFirst(&tree, cbsSplay(cbs), &cbsTestNode, + &cbsTestTree, &size); + if (found) { + RangeTree block; + RangeStruct range; + block = RangeTreeOfTree(tree); + AVER_CRITICAL(RangeTreeSize(block) >= size); + RangeInit(&range, RangeTreeBase(block), RangeTreeLimit(block)); + AVER_CRITICAL(RangeSize(&range) >= size); + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, + size, findDelete); + } + + return found; +} + + +/* cbsFindInZones -- find a block within a zone set + * + * Finds a block of at least the given size that lies entirely within a + * zone set. (The first such block, if high is FALSE, or the last, if + * high is TRUE.) + */ + +typedef struct cbsTestNodeInZonesClosureStruct { + Size size; + Arena arena; + ZoneSet zoneSet; + Addr base; + Addr limit; + Bool high; +} cbsTestNodeInZonesClosureStruct, *cbsTestNodeInZonesClosure; + +static Bool cbsTestNodeInZones(SplayTree splay, Tree tree, + void *closure) +{ + RangeTree block = RangeTreeOfTree(tree); + cbsTestNodeInZonesClosure my = closure; + RangeInZoneSet search; + + AVER_CRITICAL(closure != NULL); + UNUSED(splay); + + search = my->high ? RangeInZoneSetLast : RangeInZoneSetFirst; + + return search(&my->base, &my->limit, + RangeTreeBase(block), RangeTreeLimit(block), + my->arena, my->zoneSet, my->size); +} + +static Bool cbsTestTreeInZones(SplayTree splay, Tree tree, + void *closure) +{ + CBSFastBlock fastBlock = cbsFastBlockOfTree(tree); + CBSZonedBlock zonedBlock = cbsZonedBlockOfTree(tree); + cbsTestNodeInZonesClosure my = closure; + + AVER_CRITICAL(closure != NULL); + UNUSED(splay); + + return fastBlock->maxSize >= my->size + && ZoneSetInter(zonedBlock->zones, my->zoneSet) != ZoneSetEMPTY; +} + + +/* cbsFindLast -- find the last block of at least the given size */ + +static Bool cbsFindLast(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + CBS cbs = MustBeA_CRITICAL(CBSFast, land); + Bool found; + Tree tree; + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVER_CRITICAL(size > 0); + AVER_CRITICAL(SizeIsAligned(size, LandAlignment(land))); + AVERT_CRITICAL(FindDelete, findDelete); + + METER_ACC(cbs->treeSearch, cbs->treeSize); + found = SplayFindLast(&tree, cbsSplay(cbs), &cbsTestNode, + &cbsTestTree, &size); + if (found) { + RangeTree block; + RangeStruct range; + block = RangeTreeOfTree(tree); + AVER_CRITICAL(RangeTreeSize(block) >= size); + RangeInit(&range, RangeTreeBase(block), RangeTreeLimit(block)); + AVER_CRITICAL(RangeSize(&range) >= size); + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, + size, findDelete); + } + + return found; +} + + +/* cbsFindLargest -- find the largest block in the CBS */ + +static Bool cbsFindLargest(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + CBS cbs = MustBeA_CRITICAL(CBSFast, land); + Bool found = FALSE; + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVER_CRITICAL(size > 0); + AVERT_CRITICAL(FindDelete, findDelete); + + if (!SplayTreeIsEmpty(cbsSplay(cbs))) { + RangeStruct range; + Tree tree = TreeEMPTY; /* suppress "may be used uninitialized" */ + Size maxSize; + + maxSize = cbsFastBlockOfTree(SplayTreeRoot(cbsSplay(cbs)))->maxSize; + if (maxSize >= size) { + RangeTree block; + METER_ACC(cbs->treeSearch, cbs->treeSize); + found = SplayFindFirst(&tree, cbsSplay(cbs), &cbsTestNode, + &cbsTestTree, &maxSize); + AVER_CRITICAL(found); /* maxSize is exact, so we will find it. */ + block = RangeTreeOfTree(tree); + AVER_CRITICAL(RangeTreeSize(block) >= maxSize); + RangeInit(&range, RangeTreeBase(block), RangeTreeLimit(block)); + AVER_CRITICAL(RangeSize(&range) >= maxSize); + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, + size, findDelete); + } + } + + return found; +} + + +static Res cbsFindInZones(Bool *foundReturn, Range rangeReturn, + Range oldRangeReturn, Land land, Size size, + ZoneSet zoneSet, Bool high) +{ + CBS cbs = MustBeA_CRITICAL(CBSZoned, land); + RangeTree block; + Tree tree; + cbsTestNodeInZonesClosureStruct closure; + Res res; + LandFindMethod landFind; + SplayFindFunction splayFind; + RangeStruct rangeStruct, oldRangeStruct; + + AVER_CRITICAL(foundReturn != NULL); + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + /* AVERT_CRITICAL(ZoneSet, zoneSet); */ + AVERT_CRITICAL(Bool, high); + + landFind = high ? cbsFindLast : cbsFindFirst; + splayFind = high ? SplayFindLast : SplayFindFirst; + + if (zoneSet == ZoneSetEMPTY) + goto fail; + if (zoneSet == ZoneSetUNIV) { + FindDelete fd = high ? FindDeleteHIGH : FindDeleteLOW; + *foundReturn = (*landFind)(rangeReturn, oldRangeReturn, land, size, fd); + return ResOK; + } + if (ZoneSetIsSingle(zoneSet) && size > ArenaStripeSize(LandArena(land))) + goto fail; + + /* It would be nice if there were a neat way to eliminate all runs of + zones in zoneSet too small for size.*/ + + closure.arena = LandArena(land); + closure.zoneSet = zoneSet; + closure.size = size; + closure.high = high; + if (!(*splayFind)(&tree, cbsSplay(cbs), + cbsTestNodeInZones, cbsTestTreeInZones, + &closure)) + goto fail; + + block = RangeTreeOfTree(tree); + + AVER_CRITICAL(RangeTreeBase(block) <= closure.base); + AVER_CRITICAL(AddrOffset(closure.base, closure.limit) >= size); + AVER_CRITICAL(ZoneSetSub(ZoneSetOfRange(LandArena(land), closure.base, closure.limit), zoneSet)); + AVER_CRITICAL(closure.limit <= RangeTreeLimit(block)); + + if (!high) + RangeInit(&rangeStruct, closure.base, AddrAdd(closure.base, size)); + else + RangeInit(&rangeStruct, AddrSub(closure.limit, size), closure.limit); + res = cbsDelete(&oldRangeStruct, land, &rangeStruct); + if (res != ResOK) + /* not enough memory to split block */ + return res; + RangeCopy(rangeReturn, &rangeStruct); + RangeCopy(oldRangeReturn, &oldRangeStruct); + *foundReturn = TRUE; + return ResOK; + +fail: + *foundReturn = FALSE; + return ResOK; +} + + +/* cbsDescribe -- describe a CBS + * + * . + */ + +static Res cbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Land land = CouldBeA(Land, inst); + CBS cbs = CouldBeA(CBS, land); + Res res; + Res (*describe)(Tree, mps_lib_FILE *); + + if (!TESTC(CBS, cbs)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, CBS, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "blockPool $P\n", (WriteFP)cbsBlockPool(cbs), + "ownPool $U\n", (WriteFU)cbs->ownPool, + STATISTIC_WRITE(" treeSize: $U\n", (WriteFU)cbs->treeSize) + NULL); + if (res != ResOK) + return res; + + METER_WRITE(cbs->treeSearch, stream, depth + 2); + + /* This could be done by specialised methods in subclasses, but it + doesn't really come out any neater. */ + if (IsA(CBSZoned, land)) + describe = cbsZonedSplayNodeDescribe; + else if (IsA(CBSFast, land)) + describe = cbsFastSplayNodeDescribe; + else + describe = cbsSplayNodeDescribe; + + res = SplayTreeDescribe(cbsSplay(cbs), stream, depth + 2, describe); + if (res != ResOK) + return res; + + return res; +} + +DEFINE_CLASS(Land, CBS, klass) +{ + INHERIT_CLASS(klass, CBS, Land); + klass->instClassStruct.describe = cbsDescribe; + klass->instClassStruct.finish = cbsFinish; + klass->size = sizeof(CBSStruct); + klass->init = cbsInit; + klass->sizeMethod = cbsSize; + klass->insert = cbsInsert; + klass->insertSteal = cbsInsertSteal; + klass->delete = cbsDelete; + klass->deleteSteal = cbsDeleteSteal; + klass->iterate = cbsIterate; + klass->iterateAndDelete = cbsIterateAndDelete; + klass->findFirst = cbsFindFirst; + klass->findLast = cbsFindLast; + klass->findLargest = cbsFindLargest; + klass->findInZones = cbsFindInZones; + AVERT(LandClass, klass); +} + +DEFINE_CLASS(Land, CBSFast, klass) +{ + INHERIT_CLASS(klass, CBSFast, CBS); + klass->init = cbsInitFast; + AVERT(LandClass, klass); +} + +DEFINE_CLASS(Land, CBSZoned, klass) +{ + INHERIT_CLASS(klass, CBSZoned, CBSFast); + klass->init = cbsInitZoned; + AVERT(LandClass, klass); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/cbs.h b/mps/code/cbs.h new file mode 100644 index 00000000000..47125481b3e --- /dev/null +++ b/mps/code/cbs.h @@ -0,0 +1,85 @@ +/* cbs.h: CBS -- Coalescing Block Structure + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef cbs_h +#define cbs_h + +#include "arg.h" +#include "mpmtypes.h" +#include "mpm.h" +#include "mpmst.h" +#include "rangetree.h" +#include "splay.h" + +typedef struct CBSFastBlockStruct *CBSFastBlock; +typedef struct CBSFastBlockStruct { + struct RangeTreeStruct rangeTreeStruct; + Size maxSize; /* accurate maximum block size of sub-tree */ +} CBSFastBlockStruct; + +typedef struct CBSZonedBlockStruct *CBSZonedBlock; +typedef struct CBSZonedBlockStruct { + struct CBSFastBlockStruct cbsFastBlockStruct; + ZoneSet zones; /* union zone set of all ranges in sub-tree */ +} CBSZonedBlockStruct; + +typedef struct CBSStruct *CBS, *CBSFast, *CBSZoned; + +extern Bool CBSCheck(CBS cbs); + + +/* CBSLand -- convert CBS to Land + * + * We would like to use MustBeA(Land, cbs) for this, but it produces + * bogus warnings about strict aliasing from GCC 4.7 (and probably + * 4.8). We can abolish this macro when those are no longer in use in + * MPS development. + */ + +#define CBSLand(cbs) (&(cbs)->landStruct) + + +DECLARE_CLASS(Land, CBS, Land); +DECLARE_CLASS(Land, CBSFast, CBS); +DECLARE_CLASS(Land, CBSZoned, CBSFast); + +extern const struct mps_key_s _mps_key_cbs_block_pool; +#define CBSBlockPool (&_mps_key_cbs_block_pool) +#define CBSBlockPool_FIELD pool + +#endif /* cbs_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/check.h b/mps/code/check.h new file mode 100644 index 00000000000..2c43fa48d95 --- /dev/null +++ b/mps/code/check.h @@ -0,0 +1,402 @@ +/* check.h: ASSERTION INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .aver: This header defines a family of AVER and NOTREACHED macros. + * These macros should be used to instrument and annotate code with + * invariants, and so provide both interface and internal consistency + * checks. + * + * .comment: Non-obvious AVER statements should always be accompanied by + * a comment. + * + * .disable: When assertions are disabled, AVER expands to something + * which contains the condition but discards the result. Compilers + * will throw the code away, but check its syntax. + * + * .trans.level-check: CheckLevel itself is not checked anywhere. + * + * .careful: BE CAREFUL when changing this file. It is easy to make mistakes + * and change the checking level in a variety and thereby its performance + * without realising it. This has happened before. Eyeball the preprocessor + * output for each variety. For example: + * + * cc -E -DCONFIG_VAR_RASH trace.c + * cc -E -DCONFIG_VAR_HOT trace.c + * cc -E -DCONFIG_VAR_COOL trace.c + * + * Then look at TraceCheck to make sure checking is right, TraceAddWhite + * for general assertions, and TraceFix for the critical path assertions. + */ + +#ifndef check_h +#define check_h + +#include "config.h" +#include "misc.h" +#include "mpslib.h" +#include "protocol.h" + + +/* ASSERT -- basic assertion + * + * The ASSERT macro is equivalent to the ISO C assert() except that it is + * always defined, and uses the assertion handler from the MPS plinth, which + * can be replaced by the client code. + * + * It is not intended for direct use within the MPS. Use AVER and CHECK + * macros, which can be controlled by both build and run-time configuration. + */ + +#define ASSERT(cond, condstring) \ + BEGIN \ + if (LIKELY(cond)) NOOP; else \ + mps_lib_assert_fail(MPS_FILE, __LINE__, (condstring)); \ + END + +#define ASSERTP(cond, condstring, default_) \ + ((void)(LIKELY(cond) \ + || (mps_lib_assert_fail(MPS_FILE, __LINE__, (condstring)), FALSE)), \ + (default_)) + +#define ASSERT_ISTYPE(type, val) (type ## Check(val)) +#define ASSERT_TYPECHECK(type, val) \ + ASSERT(ASSERT_ISTYPE(type, val), "TypeCheck " #type ": " #val) + +#define ASSERT_ISCLASS(klass, val) (klass ## Check(CouldBeA(klass, val))) +#define ASSERT_CLASSCHECK(klass, val) \ + ASSERT(ASSERT_ISCLASS(klass, val), "ClassCheck " #klass ": " #val) + +#define ASSERT_NULLCHECK(type, val) \ + ASSERT((val) != NULL, "NullCheck " #type ": " #val) + + +/* CheckLevel -- control for check method behaviour + * + * When the MPS is build with AVER_AND_CHECK_ALL (in a "cool" variety) the + * static variable CheckLevel controls the frequency and detail of + * consistency checking on structures. + * + * By default, CHECKLEVEL is defined to a static value in config.h, though + * it can be overridden on the compiler command line, e.g. + * cc -DCHECKLEVEL=CheckLevelSHALLOW ... + * + * However, if CHECKLEVEL_DYNAMIC is defined we use a variable to control + * the level of checking. The run-time overhead for this is quite high + * (observed double run-time on amcss when the variable is set to SHALLOW). + * CHECKLEVEL_DYNAMIC should be set to the initial level for the variable, + * which is in mpm.c. + * + * In general, it's better to adjust the check level by defining CHECKLEVEL + * but this is intended to meet the case where a run-time adjustable + * checking level is required -- where recompilation or relinking is + * undesirable or impossible. + * + * TODO: Should also allow the check level variable to come from an + * environment variable. + */ + +enum { + CheckLevelMINIMAL = 0, /* local sig check only */ + CheckLevelSHALLOW = 1, /* local invariants, */ + /* and adjacent (up, down) sig checks */ + CheckLevelDEEP = 2 /* local invariants, */ + /* and adjacent up sig checks */ + /* and recursive down full type checks */ +}; + +#ifdef CHECKLEVEL_DYNAMIC +extern unsigned CheckLevel; +#undef CHECKLEVEL +#define CHECKLEVEL CheckLevel +#endif + + +/* AVER, AVERT, AVERC, AVERP -- MPM assertions + * + * AVER and friends are used to assert conditions in the code. + * + * AVER checks an expression. + * + * AVERT checks that a value is of the correct type and may perform + * consistency checks on the value by calling a check function. + * + * AVERC checks that a value is of the correct class (including + * subclasses) and may perform consistency checks on the value by + * calling a check function. + * + * AVERP checks an expression but is itself a void * expression, and + * so can be used in expression macros. + * + * AVER etc. are on by default, and check conditions even in "hot" + * varieties intended to work in production. To avoid the cost of a + * check in critical parts of the code, use AVER_CRITICAL etc., but + * only when you've *proved* that this makes a difference to + * performance that affects requirements. + */ + +#if defined(AVER_AND_CHECK_NONE) + +#define AVER(cond) DISCARD(cond) +#define AVERT(type, val) DISCARD(ASSERT_ISTYPE(type, val)) +#define AVERC(klass, val) DISCARD(ASSERT_ISCLASS(klass, val)) +#define AVERP(cond, dflt) (DISCARD_EXP(cond), dflt) +#define AVERPC(cond, condstring, dflt) (DISCARD_EXP(cond), dflt) + +#else + +#define AVER(cond) ASSERT(cond, #cond) +#define AVERT ASSERT_TYPECHECK +#define AVERC ASSERT_CLASSCHECK +#define AVERP(cond, dflt) ASSERTP(cond, #cond, dflt) +#define AVERPC ASSERTP + +#endif + +#if defined(AVER_AND_CHECK_ALL) + +#define AVER_CRITICAL(cond) ASSERT(cond, #cond) +#define AVERT_CRITICAL ASSERT_TYPECHECK +#define AVERC_CRITICAL ASSERT_CLASSCHECK +#define AVERP_CRITICAL(cond, dflt) ASSERTP(cond, #cond, dflt) +#define AVERPC_CRITICAL ASSERTP + +#else + +#define AVER_CRITICAL DISCARD +#define AVERT_CRITICAL(type, val) DISCARD(ASSERT_ISTYPE(type, val)) +#define AVERC_CRITICAL(klass, val) DISCARD(ASSERT_ISCLASS(klass, val)) +#define AVERP_CRITICAL(cond, dflt) (DISCARD_EXP(cond), dflt) +#define AVERPC_CRITICAL(cond, condstring, dflt) (DISCARD_EXP(cond), dflt) + +#endif + + +/* NOTREACHED -- control should never reach this statement + * + * This is a sort of AVER; it is equivalent to AVER(FALSE), but will produce + * a more informative message. + */ + +#if defined(AVER_AND_CHECK_NONE) + +#define NOTREACHED NOOP + +#else + +#define NOTREACHED \ + BEGIN \ + mps_lib_assert_fail(__FILE__, __LINE__, "unreachable code"); \ + END + +#endif + + +/* TESTT -- check type simply + * + * Must be thread safe. + * and . + */ + +#define TESTT(type, val) ((val) != NULL && (val)->sig == type ## Sig) + + +/* TESTC -- check class simply + * + * TODO: Does this need to be thread safe like TESTT? + */ + +#define TESTC(klass, val) ((val) != NULL && IsA(klass, val)) + + +/* CHECKS, CHECKC -- Check Signature, Check Class + * + * (if CHECKLEVEL == CheckLevelMINIMAL, this is all we check) + */ + +#if defined(AVER_AND_CHECK_NONE) +#define CHECKS(type, val) DISCARD(TESTT(type, val)) +#define CHECKC(klass, val) DISCARD(MustBeA(klass, val)) +#else +#define CHECKS(type, val) \ + ASSERT(TESTT(type, val), "SigCheck " #type ": " #val) +#define CHECKC(klass, val) \ + ASSERT(TESTC(klass, val), "ClassCheck " #klass ": " #val) +#endif + + +/* CHECKL, CHECKD, CHECKU -- local, "down", and "up" checks + * + * Each type should have a function defined called Check that checks + * the consistency of the type. This function should return TRUE iff the + * value passes consistency checks. In general, it should assert otherwise, + * but we allow for the possibility of returning FALSE in this case for + * configuration adaptability. + * + * For structure types, the check function should: + * + * - check its own signature with CHECKS + * + * - check fields that it "owns" with CHECKL, like asserts + * + * - check "down" values which are its "children" with CHECKD + * + * - check "up" values which are its "parents" with CHECKU. + * + * These various checks will be compiled out or compiled to be controlled + * by CHECKLEVEL. + * + * For example: + * + * Bool MessageCheck(Message message) + * { + * CHECKS(Message, message); + * CHECKU(Arena, message->arena); + * CHECKD(MessageClass, message->class); + * CHECKL(RingCheck(&message->queueRing)); + * CHECKL(MessageIsClocked(message) || (message->postedClock == 0)); + * return TRUE; + * } + * + * The parent/child distinction depends on the structure, but in the MPS + * the Arena has no parents, and has children which are Pools, which have + * children which are Segments, etc. + * + * The important thing is to have a partial order on types so that recursive + * checking will terminate. When CHECKLEVEL is set to DEEP, checking will + * recurse into check methods for children, but will only do a shallow + * signature check on parents, avoiding infinite regression. + */ + +#if defined(AVER_AND_CHECK_ALL) + +#define CHECK_BY_LEVEL(minimal, shallow, deep) \ + BEGIN \ + switch (CHECKLEVEL) { \ + case CheckLevelDEEP: deep; break; \ + case CheckLevelSHALLOW: shallow; break; \ + default: NOTREACHED; /* fall through */ \ + case CheckLevelMINIMAL: minimal; break; \ + } \ + END + +#define CHECKL(cond) \ + CHECK_BY_LEVEL(NOOP, \ + ASSERT(cond, #cond), \ + ASSERT(cond, #cond)) + +#define CHECKD(type, val) \ + CHECK_BY_LEVEL(NOOP, \ + CHECKS(type, val), \ + ASSERT_TYPECHECK(type, val)) + +#define CHECKD_NOSIG(type, val) \ + CHECK_BY_LEVEL(NOOP, \ + ASSERT_NULLCHECK(type, val), \ + ASSERT_TYPECHECK(type, val)) + +#define CHECKD_CLASS(klass, val) \ + CHECK_BY_LEVEL(NOOP, \ + CHECKC(klass, val) \ + ASSERT_CLASSCHECK(klass, val)) + +#define CHECKU(type, val) \ + CHECK_BY_LEVEL(NOOP, \ + CHECKS(type, val), \ + CHECKS(type, val)) + +#define CHECKU_NOSIG(type, val) \ + CHECK_BY_LEVEL(NOOP, \ + ASSERT_NULLCHECK(type, val), \ + ASSERT_NULLCHECK(type, val)) + +#else /* AVER_AND_CHECK_ALL, not */ + +/* TODO: This gives comparable performance to RASH when compiling + using mps.c and -O2 (to get check methods inlined), but is it a bit + too minimal? How much do we rely on check methods? */ + +#define CHECKL(cond) DISCARD(cond) +#define CHECKD(type, val) DISCARD(TESTT(type, val)) +#define CHECKD_NOSIG(type, val) DISCARD((val) != NULL) +#define CHECKD_CLASS(klass, val) DISCARD((val) != NULL) +#define CHECKU(type, val) DISCARD(TESTT(type, val)) +#define CHECKU_NOSIG(type, val) DISCARD((val) != NULL) + +#endif /* AVER_AND_CHECK_ALL */ + + +/* COMPAT* -- type compatibility checking + * + * .check.macros: The COMPAT* macros use some C trickery to attempt to + * verify that certain types and fields are equivalent. They do not do + * a complete job. This trickery is justified by the security gained + * in knowing that matches the MPM. See + * . [This paragraph is intended to + * satisfy rule.impl.trick.] + */ + +/* compile-time check */ +#define COMPATLVALUE(lv1, lv2) \ + ((void)sizeof((lv1) = (lv2)), (void)sizeof((lv2) = (lv1)), TRUE) + +/* aims to test whether t1 and t2 are assignment-compatible */ +#define COMPATTYPE(t1, t2) \ + (sizeof(t1) == sizeof(t2) && \ + COMPATLVALUE(*((t1 *)0), *((t2 *)0))) + +#define COMPATFIELDAPPROX(s1, f1, s2, f2) \ + (sizeof(((s1 *)0)->f1) == sizeof(((s2 *)0)->f2) && \ + offsetof(s1, f1) == offsetof(s2, f2)) + +#define COMPATFIELD(s1, f1, s2, f2) \ + (COMPATFIELDAPPROX(s1, f1, s2, f2) && \ + COMPATLVALUE(((s1 *)0)->f1, ((s2 *)0)->f2)) + + +/* NONNEGATIVE -- test that value is greater than or equal to zero + * + * We'd like to write "x >= 0" but when x belongs to an unsigned + * integral type then this results in a "comparison of unsigned + * expression >= 0 is always true" warning from GCC if -Wextra is + * specified. We also don't want to remove these assertions because + * they protect us against errors if the type of x should ever be + * changed to a signed type on some platform. + * + * Note that this macro evaluates its argument twice. + */ +#define NONNEGATIVE(x) ((x) == 0 || (x) > 0) + +#endif /* check_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/clock.h b/mps/code/clock.h new file mode 100644 index 00000000000..4ff3329e57d --- /dev/null +++ b/mps/code/clock.h @@ -0,0 +1,208 @@ +/* clock.h -- Fast clocks and timers + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * $Id$ + * + * .design: . + */ + +#ifndef clock_h +#define clock_h + +#include "mpmtypes.h" /* for Word */ + + +/* EVENT_CLOCK -- fast event timestamp clock + * + * On platforms that support it, we want to stamp events with a very cheap + * and fast high-resolution timer. + */ + +/* Microsoft C provides an intrinsic for the Intel rdtsc instruction. + * + */ +#if (defined(MPS_ARCH_I3) || defined(MPS_ARCH_I6)) && defined(MPS_BUILD_MV) + +typedef unsigned __int64 EventClock; + +typedef union EventClockUnion { + struct { + unsigned low, high; + } half; + unsigned __int64 whole; +} EventClockUnion; + +#define EVENT_CLOCK_MAKE(lvalue, low, high) \ + BEGIN \ + ((EventClockUnion*)&(lvalue))->half.low = (low); \ + ((EventClockUnion*)&(lvalue))->half.high = (high); \ + END + +#if _MSC_VER >= 1400 + +#pragma intrinsic(__rdtsc) + +#define EVENT_CLOCK(lvalue) \ + BEGIN \ + (lvalue) = __rdtsc(); \ + END + +#else /* _MSC_VER < 1400 */ + +/* This is mostly a patch for Open Dylan's bootstrap on Windows, which is + using Microsoft Visual Studio 6 because of support for CodeView debugging + information. */ + +#include "mpswin.h" /* KILL IT WITH FIRE! */ + +#define EVENT_CLOCK(lvalue) \ + BEGIN \ + LARGE_INTEGER _count; \ + QueryPerformanceCounter(&_count); \ + (lvalue) = _count.QuadPart; \ + END + +#endif /* _MSC_VER < 1400 */ + +#if defined(MPS_ARCH_I3) + +/* We can't use a shift to get the top half of the 64-bit event clock, + because that introduces a dependency on `__aullshr` in the C run-time. */ + +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%08lX%08lX", \ + (*(EventClockUnion *)&(clock)).half.high, \ + (*(EventClockUnion *)&(clock)).half.low) + +#define EVENT_CLOCK_WRITE(stream, depth, clock) \ + WriteF(stream, depth, "$W$W", \ + (*(EventClockUnion *)&(clock)).half.high, \ + (*(EventClockUnion *)&(clock)).half.low, \ + NULL) + +#elif defined(MPS_ARCH_I6) + +#if defined(MPS_BUILD_MV) + +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%016llX", (clock)); + +#else + +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%016lX", (clock)); + +#endif + +#define EVENT_CLOCK_WRITE(stream, depth, clock) \ + WriteF(stream, depth, "$W", (WriteFW)(clock), NULL) + +#endif + +#endif /* Microsoft C on Intel */ + +/* If we have GCC or Clang, assemble the rdtsc instruction */ +#if !defined(EVENT_CLOCK) && \ + (defined(MPS_ARCH_I3) || defined(MPS_ARCH_I6)) && \ + (defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL)) + +/* Use __extension__ to enable use of a 64-bit type on 32-bit pedantic + GCC or Clang. */ +__extension__ typedef unsigned long long EventClock; + +#define EVENT_CLOCK_MAKE(lvalue, low, high) \ + ((lvalue) = ((EventClock)(high) << 32) + ((EventClock)(low) & (0xfffffffful))) + +/* Clang provides a cross-platform builtin for a fast timer, but it + * was not available on Mac OS X 10.8 until the release of XCode 4.6. + * + */ +#if defined(MPS_BUILD_LL) + +#if __has_builtin(__builtin_readcyclecounter) + +#define EVENT_CLOCK(lvalue) \ + BEGIN \ + (lvalue) = __builtin_readcyclecounter(); \ + END + +#endif /* __has_builtin(__builtin_readcyclecounter) */ + +#endif /* Clang */ + +#ifndef EVENT_CLOCK + +#define EVENT_CLOCK(lvalue) \ + BEGIN \ + unsigned _l, _h; \ + __asm__ __volatile__("rdtsc" : "=a"(_l), "=d"(_h)); \ + (lvalue) = ((EventClock)_h << 32) | _l; \ + END + +#endif + +/* The __extension__ keyword doesn't work on printf formats, so we + concatenate two 32-bit hex numbers to print the 64-bit value. */ +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%08lX%08lX", \ + (unsigned long)((clock) >> 32), \ + (unsigned long)((clock) & 0xffffffff)) + +#define EVENT_CLOCK_WRITE(stream, depth, clock) \ + WriteF(stream, depth, "$W$W", (WriteFW)((clock) >> 32), (WriteFW)clock, NULL) + +#endif /* Intel, GCC or Clang */ + +/* no fast clock, use plinth, probably from the C library */ +#ifndef EVENT_CLOCK + +typedef mps_clock_t EventClock; + +#define EVENT_CLOCK_MAKE(lvalue, low, high) \ + ((lvalue) = ((EventClock)(high) << 32) + ((EventClock)(low) & (0xfffffffful))) + +#define EVENT_CLOCK(lvalue) \ + BEGIN \ + (lvalue) = mps_clock(); \ + END + +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%lu", (unsigned long)clock) + +#define EVENT_CLOCK_WRITE(stream, depth, clock) \ + WriteF(stream, depth, "$W", (WriteFW)clock, NULL) + +#endif + + +#endif /* clock_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/comm.gmk b/mps/code/comm.gmk new file mode 100644 index 00000000000..05c73fb411a --- /dev/null +++ b/mps/code/comm.gmk @@ -0,0 +1,749 @@ +# -*- makefile -*- +# +# comm.gmk: COMMON GNUMAKEFILE FRAGMENT +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# DESCRIPTION +# +# This makefile fragment is included in more specific makefiles for +# platforms which use GNU make. +# +# PARAMETERS +# +# Assumes the following variables and definitions: +# EXTRA_TARGETS a list of extra targets to build +# CFLAGSCOMPILER a list of flags for all compilations +# CFLAGSCOMPILERSTRICT a list of flags for almost all compilations +# CFLAGSCOMPILERLAX a list of flags for compilations which can't be as +# strict (e.g. because they have to include a third- +# party header file that isn't -ansi -pedantic). +# CFLAGSDEBUG a list of flags for compilations with maximum debug +# information, and any optimization possible +# CFLAGSOPT a list of flags for compilations with maximum +# optimization, and any debug info possible +# CC the command for the C compiler +# LINKFLAGS a list of flags passed to the linker +# ARFLAGSPFM platform-specific flags for ar +# RANLIB the command to index a library (default: none needed) +# gendep optionally defined to be a command sequence for +# generating the dependency file (.d) from a C file (.c); +# it is used in a rule of the form: +# $(PFM)/$(VARIETY)/%.d: %.c +# PFM platform code, e.g. "sus8gc" +# LIBS extra libraries to include in tests (usually "-lm") +# NOISY if defined and non-empty, causes commands to be emitted +# MPMPF platform-dependent C sources for the "mpm" part +# MPMS assembler sources for the "mpm" part (.s files) +# +# %%PART: When adding a new part, add a new parameter above for the +# files included in the part. +# +# EDITING +# +# To add new targets, varieties, and parts: +# Search for the string "%%TARGET", "%%VARIETY", or "%%PART" in this +# makefile and follow the instructions. If you're adding a part, you'll +# have to change the makefiles for all the platforms which use this +# makefile to define the source list for that part, and the GNUmakefile +# to include a recursive call to the name of that part. +# +# CHECK PARAMETERS +# +# Old versions of GNU make don't have the $(error) function, but lines +# starting "error" cause it to exit with an error. [These only test to +# see whether the symbol is defined. We could be more thorough and +# test the syntax and content. -- richard 1995-09-07] + +ifndef CC +error "comm.gmk: CC not defined" +endif +ifndef CFLAGSCOMPILER +error "comm.gmk: CFLAGSCOMPILER not defined" +endif +ifndef CFLAGSDEBUG +error "comm.gmk: CFLAGSDEBUG not defined" +endif +ifndef CFLAGSOPT +error "comm.gmk: CFLAGSOPT not defined" +endif + + +# TELEMETRY TARGETS + +EVENT_TARGETS = mpseventcnv mpseventpy mpseventsql mpseventtxt + + +# EXTRA TARGETS +# +# Don't build mpseventsql by default (might not have sqlite3 installed), +# but do build the other event target. + +EXTRA_TARGETS ?= $(filter-out mpseventsql,$(EVENT_TARGETS)) + + +# +# %%PART: When adding a new part, add checks for the parameter with the +# sources for the new part. + +ifndef PFM +error "comm.gmk: PFM not defined" +endif +ifndef MPMPF +error "comm.gmk: MPMPF not defined" +endif + + +# DECLARATIONS + +ifdef NOISY +ECHO = : +else +.SILENT: +ECHO = echo +endif + +.PHONY: phony + + +# C FLAGS + +# These flags are included in all compilations. +# Avoid using PFMDEFS in platform makefiles, as they prevent the MPS being +# built with a simple command like "cc -c mps.c". +CFLAGSCOMMONSTRICT = $(PFMDEFS) $(CFLAGSCOMPILER) $(CFLAGSCOMPILERSTRICT) +CFLAGSCOMMONLAX = $(PFMDEFS) $(CFLAGSCOMPILER) $(CFLAGSCOMPILERLAX) + +# %%VARIETY: When adding a new variety, define a macro containing the set +# of flags for the new variety. + +# These flags are added to compilations for the indicated variety. +CFRASH = -DCONFIG_VAR_RASH $(CFLAGSOPT) +CFHOT = -DCONFIG_VAR_HOT $(CFLAGSOPT) +CFCOOL = -DCONFIG_VAR_COOL $(CFLAGSDEBUG) + +# Bind CFLAGSVARIETY to the appropriate set of flags for the variety. +# %%VARIETY: When adding a new variety, add a test for the variety and +# set CFLAGSVARIETY here. +ifeq ($(VARIETY),rash) +CFLAGSVARIETY=$(CFRASH) +else +ifeq ($(VARIETY),hot) +CFLAGSVARIETY=$(CFHOT) +else +ifeq ($(VARIETY),cool) +CFLAGSVARIETY=$(CFCOOL) +else +ifneq ($(VARIETY),) +$(error Variety "$(VARIETY)" not recognized: must be rash/hot/cool) +endif +endif +endif +endif + +CFLAGSSTRICT=$(CFLAGSCOMMONSTRICT) $(CFLAGSVARIETY) $(CFLAGS) +CFLAGSLAX=$(CFLAGSCOMMONLAX) $(CFLAGSVARIETY) $(CFLAGS) +ARFLAGS=rc$(ARFLAGSPFM) + + +# == Common definitions == +# %%PART: When adding a new part, add it here, unless it's platform-specific +# These values are defined here because they have no variation between +# platforms. + +AMC = poolamc.c +AMS = poolams.c +AWL = poolawl.c +LO = poollo.c +SNC = poolsnc.c +POOLN = pooln.c +MV2 = poolmv2.c +MVFF = poolmvff.c +TESTLIB = testlib.c +TESTTHR = testthrix.c +FMTDY = fmtdy.c fmtno.c +FMTDYTST = fmtdy.c fmtno.c fmtdytst.c +FMTHETST = fmthe.c fmtdy.c fmtno.c fmtdytst.c +FMTSCM = fmtscheme.c +PLINTH = mpsliban.c mpsioan.c +MPMCOMMON = \ + abq.c \ + arena.c \ + arenacl.c \ + arenavm.c \ + arg.c \ + boot.c \ + bt.c \ + buffer.c \ + cbs.c \ + dbgpool.c \ + dbgpooli.c \ + event.c \ + failover.c \ + format.c \ + freelist.c \ + global.c \ + land.c \ + ld.c \ + locus.c \ + message.c \ + meter.c \ + mpm.c \ + mpsi.c \ + nailboard.c \ + policy.c \ + pool.c \ + poolabs.c \ + poolmfs.c \ + poolmrg.c \ + protocol.c \ + range.c \ + rangetree.c \ + ref.c \ + ring.c \ + root.c \ + sa.c \ + sac.c \ + scan.c \ + seg.c \ + shield.c \ + splay.c \ + ss.c \ + table.c \ + trace.c \ + traceanc.c \ + tract.c \ + trans.c \ + tree.c \ + version.c \ + vm.c \ + walk.c +POOLS = $(AMC) $(AMS) $(AWL) $(LO) $(MV2) $(MVFF) $(SNC) +MPM = $(MPMCOMMON) $(MPMPF) $(POOLS) $(PLINTH) + + +# These map the source file lists onto object files and dependency files +# in the platform/variety directory. +# +# %%PART: When adding a new part, add a new macro which expands to the files +# included in the part. + +ifdef VARIETY +MPMOBJ = $(MPM:%.c=$(PFM)/$(VARIETY)/%.o) \ + $(MPMS:%.s=$(PFM)/$(VARIETY)/%.o) +FMTDYOBJ = $(FMTDY:%.c=$(PFM)/$(VARIETY)/%.o) +FMTDYTSTOBJ = $(FMTDYTST:%.c=$(PFM)/$(VARIETY)/%.o) +FMTHETSTOBJ = $(FMTHETST:%.c=$(PFM)/$(VARIETY)/%.o) +FMTSCMOBJ = $(FMTSCM:%.c=$(PFM)/$(VARIETY)/%.o) +PLINTHOBJ = $(PLINTH:%.c=$(PFM)/$(VARIETY)/%.o) +POOLNOBJ = $(POOLN:%.c=$(PFM)/$(VARIETY)/%.o) +TESTLIBOBJ = $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.o) +TESTTHROBJ = $(TESTTHR:%.c=$(PFM)/$(VARIETY)/%.o) +endif + + +# == Test cases == +# +# %%TARGET: When adding a new target, add it to one of the variables +# in this section. Library components go in LIB_TARGETS. + +LIB_TARGETS=mps.a mpsplan.a + +# Test executables go in TEST_TARGETS. + +TEST_TARGETS=\ + abqtest \ + addrobj \ + airtest \ + amcss \ + amcsshe \ + amcssth \ + amsss \ + amssshe \ + apss \ + arenacv \ + awlut \ + awluthe \ + awlutth \ + btcv \ + bttest \ + djbench \ + extcon \ + finalcv \ + finaltest \ + forktest \ + fotest \ + gcbench \ + landtest \ + locbwcss \ + lockcov \ + lockut \ + locusss \ + locv \ + messtest \ + mpmss \ + mpsicv \ + mv2test \ + nailboardtest \ + poolncv \ + qs \ + sacss \ + segsmss \ + sncss \ + steptest \ + tagtest \ + teletest \ + walkt0 \ + zcoll \ + zmess \ + ztfm + +# This target records programs that we were once able to build but +# can't at the moment: + +UNBUILDABLE_TARGETS=\ + replay # depends on the EPVM pool + +ALL_TARGETS=$(LIB_TARGETS) $(TEST_TARGETS) $(EXTRA_TARGETS) + + +# == Pseudo-targets == + +all: $(ALL_TARGETS) + + +# == Automated test suites == +# +# See design.mps.tests.target. + +TEST_SUITES=testrun testci testall testansi testpollnone + +$(addprefix $(PFM)/$(VARIETY)/,$(TEST_SUITES)): $(TEST_TARGETS) + ../tool/testrun.sh -s "$(notdir $@)" "$(PFM)/$(VARIETY)" + + +# == Automated performance testing == +# +# See design.mps.tests.target.testratio. + +TESTRATIO_SEED = 1564912146 + +define ratio +TIME_HOT=$$(/usr/bin/time -p $(PFM)/hot/$(1) -x $(TESTRATIO_SEED) $(2) 2>&1 | tail -2 | awk '{T += $$2} END {print T}'); \ +TIME_RASH=$$(/usr/bin/time -p $(PFM)/rash/$(1) -x $(TESTRATIO_SEED) $(2) 2>&1 | tail -2 | awk '{T += $$2} END {print T}'); \ +RATIO=$$(awk "BEGIN{print int(100 * $$TIME_HOT / $$TIME_RASH)}"); \ +printf "Performance ratio (hot/rash) for $(2): %d%%\n" $$RATIO +endef + +testratio: phony + $(MAKE) -f $(PFM).gmk VARIETY=hot djbench gcbench + $(MAKE) -f $(PFM).gmk VARIETY=rash djbench gcbench + $(call ratio,gcbench,amc) + $(call ratio,djbench,mvff) + + +# == MMQA test suite == +# +# See test/README for documentation on running the MMQA test suite. + +MMQA=perl test/qa -p $(PFM) -v $(VARIETY) + +$(PFM)/$(VARIETY)/testmmqa: + if [ "$(VARIETY)" = "cool" ]; then (cd ../test && $(MMQA) runset testsets/coolonly); fi + (cd ../test && $(MMQA) runset testsets/argerr testsets/conerr testsets/passing) + + +# == Toy Scheme interpreter == + +testscheme: phony + $(MAKE) -C ../example/scheme test + + +# These convenience targets allow one to type "make foo" to build target +# foo in selected varieties (or none, for the latter rule). + +$(LIB_TARGETS) $(TEST_TARGETS) $(EVENT_TARGETS) $(TEST_SUITES) testmmqa: phony +ifdef VARIETY + $(MAKE) -f $(PFM).gmk TARGET=$@ variety +else + $(MAKE) -f $(PFM).gmk TARGET=$@ target +endif + + +# "clean" removes the directory containing the build results for the +# platform. + +clean: phony + $(ECHO) "$(PFM): $@" + rm -rf "$(PFM)" + +# "target" builds some varieties of the target named in the TARGET +# macro. +# +# %%VARIETY: When adding a new target, optionally add a recursive make call +# for the new variety, if it should be built by default. It probably +# shouldn't without a product design decision and an update of the readme +# and build manual! +# +# Note that we build VARIETY=cool before VARIETY=hot because +# the former doesn't need to optimize and so detects errors more +# quickly; and because the former uses file-at-a-time compilation and +# so can pick up where it left off instead of having to start from the +# beginning of mps.c + +ifdef TARGET +ifndef VARIETY +target: phony + $(MAKE) -f $(PFM).gmk VARIETY=cool variety + $(MAKE) -f $(PFM).gmk VARIETY=hot variety +endif +endif + + +# "variety" builds the target named in the TARGET macro using the +# variety named in the VARIETY macro. + +ifdef VARIETY +ifdef TARGET +variety: $(PFM)/$(VARIETY)/$(TARGET) +endif +endif + + +# THE MPS LIBRARY +# +# The MPS library is built in two ways: +# +# 1. In the usual way, from a pile of object files compiled from their +# corresponding sources. +# +# 2. From mps.c, which effectively concatenates all the sources, allowing +# important global optimisation and inlining to occur. +# +# We mostly use the method (2), because it is fast to compile and execute. +# But we use method (1) for some varieties to ensure correctness of +# code (linkage errors are masked by (2)) and to maintain a correct list +# of source files in case method (2) won't work on some future constrained +# platform. +# +# %%VARIETY: When adding a new variety, add a rule for how to build the +# MPS library for the variety. + +$(PFM)/rash/mps.a: $(PFM)/rash/mps.o +$(PFM)/hot/mps.a: $(PFM)/hot/mps.o +$(PFM)/cool/mps.a: $(MPMOBJ) + + +# OTHER GENUINE TARGETS +# +# Each line defines an executable or library target to be built and the +# object files it is built from. These lines add dependencies to the +# generic rules below, and should not include commands to execute. +# +# %%TARGET: When adding a new target, add the dependencies for the new target +# here. + +ifdef VARIETY + +$(PFM)/$(VARIETY)/abqtest: $(PFM)/$(VARIETY)/abqtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/addrobj: $(PFM)/$(VARIETY)/addrobj.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/airtest: $(PFM)/$(VARIETY)/airtest.o \ + $(FMTSCMOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/amcss: $(PFM)/$(VARIETY)/amcss.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/amcsshe: $(PFM)/$(VARIETY)/amcsshe.o \ + $(FMTHETSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/amcssth: $(PFM)/$(VARIETY)/amcssth.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/amsss: $(PFM)/$(VARIETY)/amsss.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/amssshe: $(PFM)/$(VARIETY)/amssshe.o \ + $(FMTHETSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/apss: $(PFM)/$(VARIETY)/apss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/arenacv: $(PFM)/$(VARIETY)/arenacv.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/awlut: $(PFM)/$(VARIETY)/awlut.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/awluthe: $(PFM)/$(VARIETY)/awluthe.o \ + $(FMTHETSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/awlutth: $(PFM)/$(VARIETY)/awlutth.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/btcv: $(PFM)/$(VARIETY)/btcv.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/bttest: $(PFM)/$(VARIETY)/bttest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/djbench: $(PFM)/$(VARIETY)/djbench.o \ + $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)/$(VARIETY)/extcon: $(PFM)/$(VARIETY)/extcon.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/finalcv: $(PFM)/$(VARIETY)/finalcv.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/finaltest: $(PFM)/$(VARIETY)/finaltest.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/forktest: $(PFM)/$(VARIETY)/forktest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/fotest: $(PFM)/$(VARIETY)/fotest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/gcbench: $(PFM)/$(VARIETY)/gcbench.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)/$(VARIETY)/landtest: $(PFM)/$(VARIETY)/landtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/locbwcss: $(PFM)/$(VARIETY)/locbwcss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/lockcov: $(PFM)/$(VARIETY)/lockcov.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/lockut: $(PFM)/$(VARIETY)/lockut.o \ + $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/locusss: $(PFM)/$(VARIETY)/locusss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/locv: $(PFM)/$(VARIETY)/locv.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/messtest: $(PFM)/$(VARIETY)/messtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpmss: $(PFM)/$(VARIETY)/mpmss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpsicv: $(PFM)/$(VARIETY)/mpsicv.o \ + $(FMTDYTSTOBJ) $(FMTHETSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mv2test: $(PFM)/$(VARIETY)/mv2test.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/nailboardtest: $(PFM)/$(VARIETY)/nailboardtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/poolncv: $(PFM)/$(VARIETY)/poolncv.o \ + $(POOLNOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/qs: $(PFM)/$(VARIETY)/qs.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/sacss: $(PFM)/$(VARIETY)/sacss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/segsmss: $(PFM)/$(VARIETY)/segsmss.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/sncss: $(PFM)/$(VARIETY)/sncss.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/steptest: $(PFM)/$(VARIETY)/steptest.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/tagtest: $(PFM)/$(VARIETY)/tagtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/teletest: $(PFM)/$(VARIETY)/teletest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/walkt0: $(PFM)/$(VARIETY)/walkt0.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/zcoll: $(PFM)/$(VARIETY)/zcoll.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/zmess: $(PFM)/$(VARIETY)/zmess.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/ztfm: $(PFM)/$(VARIETY)/ztfm.o \ + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpseventcnv: $(PFM)/$(VARIETY)/eventcnv.o \ + $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpseventpy: $(PFM)/$(VARIETY)/eventpy.o \ + $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpseventtxt: $(PFM)/$(VARIETY)/eventtxt.o \ + $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpseventsql: $(PFM)/$(VARIETY)/eventsql.o \ + $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/replay: $(PFM)/$(VARIETY)/replay.o \ + $(PFM)/$(VARIETY)/eventrep.o \ + $(PFM)/$(VARIETY)/table.o \ + $(PFM)/$(VARIETY)/mps.a + +$(PFM)/$(VARIETY)/mpsplan.a: $(PLINTHOBJ) + +endif + + +# GENERIC RULES +# +# These generate build output in the / directory. + +# Object files + +define run-cc-strict +$(ECHO) "$(PFM): $@" +mkdir -p $(PFM) +mkdir -p $(PFM)/$(VARIETY) +$(CC) $(CFLAGSSTRICT) -c -o $@ $< +endef + +define run-cc-lax +$(ECHO) "$(PFM): $@ - compiling with lax flags." +mkdir -p $(PFM) +mkdir -p $(PFM)/$(VARIETY) +$(CC) $(CFLAGSLAX) -c -o $@ $< +endef + +# .rule.c-to-o: +$(PFM)/$(VARIETY)/%.o: %.c + $(run-cc-strict) + +$(PFM)/$(VARIETY)/eventsql.o: eventsql.c + $(run-cc-lax) + +$(PFM)/$(VARIETY)/%.o: %.s + $(run-cc-strict) + +$(PFM)/$(VARIETY)/%.o: %.S + $(run-cc-strict) + +# Dependencies +# +# These are included into _this_ makefile (see below). GNU make does the +# right thing as long as it knows how to make the dependency files before +# including them. + +ifdef gendep + +$(PFM)/$(VARIETY)/%.d: %.c + $(ECHO) "$(PFM): $@" + mkdir -p $(PFM) + mkdir -p $(PFM)/$(VARIETY) + $(gendep) + +ifdef VARIETY +ifdef TARGET + +# %%VARIETY: When adding a new variety, add the dependencies files for it +# here. +ifeq ($(VARIETY),rash) +include $(PFM)/$(VARIETY)/mps.d +else +ifeq ($(VARIETY),hot) +include $(PFM)/$(VARIETY)/mps.d +else +include $(MPM:%.c=$(PFM)/$(VARIETY)/%.d) +endif # VARIETY != hot +endif # VARIETY != rash + +# %%PART: When adding a new part, add the dependencies file for the +# new part here. +include \ + $(FMTDY:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTDYTST:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTHETST:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTSCM:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(PLINTH:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(POOLN:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(TESTTHR:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(EXTRA_TARGETS:mps%=$(PFM)/$(VARIETY)/%.d) \ + $(TEST_TARGETS:%=$(PFM)/$(VARIETY)/%.d) + +endif # !defined TARGET +endif # !defined VARIETY + +endif # !defined gendep + +# Library + +ifndef RANLIB +RANLIB = : +endif + +$(PFM)/$(VARIETY)/%.a: + $(ECHO) "$(PFM): $@" + rm -f $@ + $(AR) $(ARFLAGS) $@ $^ + $(RANLIB) $@ + +# Executable + +$(PFM)/$(VARIETY)/%: + $(ECHO) "$(PFM): $@" + $(CC) $(CFLAGSSTRICT) $(LINKFLAGS) -o $@ $^ $(LIBS) + +$(PFM)/$(VARIETY)/mpseventsql: + $(ECHO) "$(PFM): $@" + $(CC) $(CFLAGSLAX) $(LINKFLAGS) -o $@ $^ $(LIBS) -lsqlite3 + +# Special targets for development + +# Currently FreeBSD 7 GCC 4.2.1 is the best platform we have for warning +# us about strict aliasing rule violations caused by type puns. This +# target reveals them, and produces an assembler output file that can be +# examined to see if they're actually dangerous. RB 2012-09-07 + +find-puns: phony + { echo '#include "mps.c"'; echo '#include "fmtdy.c"'; } | \ + gcc -S -fverbose-asm -ansi -pedantic -Wall -Wstrict-aliasing=2 -O3 -x c -o pun.s - + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/commpost.nmk b/mps/code/commpost.nmk new file mode 100644 index 00000000000..83dd82fa6d2 --- /dev/null +++ b/mps/code/commpost.nmk @@ -0,0 +1,416 @@ +# commpost.nmk: SECOND COMMON FRAGMENT FOR PLATFORMS USING NMAKE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# DESCRIPTION +# +# Second common makefile fragment for w3*mv.nmk. See commpre.nmk + + +# == Pseudo-targets == + +all: $(ALL_TARGETS) + + +# Convenience targets + +$(ALL_TARGETS) $(OPTIONAL_TARGETS): +!IFDEF VARIETY + $(MAKE) /nologo /f $(PFM).nmk TARGET=$@ variety +!ELSE + $(MAKE) /nologo /f $(PFM).nmk TARGET=$@ target +!ENDIF + +# "clean" removes the directory containing the build results. +# Depends on there being no file called "clean". +# Note that we suspend error processing on the if line because rmdir +# sometimes exits with an error and the message "The directory is not +# empty" even if the /s option is given. See job003854. + +clean: + $(ECHO) $(PFM): $@ + -if exist $(PFM) rmdir /q /s $(PFM) + +# target target +# %%VARIETY: When adding a new variety, optionally, add a recursive make +# call for the new variety, if it should be built by default. It probably +# shouldn't without a product design decision and an update of the readme +# and build manual! +# Depends on there being no file called "target". + +!IFDEF TARGET +!IFNDEF VARIETY +target: + $(MAKE) /nologo /f $(PFM).nmk VARIETY=cool variety + $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot variety +!ENDIF +!ENDIF + +# variety +# Depends on there being no file called "variety". + +!IFDEF VARIETY +!IFDEF TARGET +variety: $(PFM)\$(VARIETY)\$(TARGET) +!ENDIF +!ENDIF + +# testrun testci testall testansi testpollnone +# Runs automated test cases. See design.mps.tests.target. + +testrun testci testall testansi testpollnone: $(TEST_TARGETS) +!IFDEF VARIETY + ..\tool\testrun.bat $(PFM) $(VARIETY) $@ +!ELSE + $(MAKE) /nologo /f $(PFM).nmk VARIETY=cool $@ + $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot $@ +!ENDIF + + +# FLAGS AMALGAMATION +# +# %%VARIETY: When adding a new variety, add the following macros that +# expand to sets of flags that the variety should use: +# +# CFLAGS -- when compiling C; +# CFLAGSSQL -- when compiling mpseventsql; +# LINKFLAGS -- when building executables; +# LIBFLAGS -- when building libraries. + +!IF "$(VARIETY)" == "hot" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFHOT) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) + +!ELSEIF "$(VARIETY)" == "cool" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) + +!ELSEIF "$(VARIETY)" == "rash" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) + +!ENDIF + + +# SOURCE TO OBJECT FILE MAPPINGS +# +# %%PART: When adding a new part, add new macros which expand to the object +# files included in the part +# +# Note: nmake doesn't expand variables within a string replacement +# operation. We work around this by writing out a temporary makefile +# and including it. + +TEMPMAKE=$(TEMP)\mps.nmk +!IF [echo MPMOBJ0 = $$(MPM:[=$(PFM)\$(VARIETY)\) > $(TEMPMAKE)] == 0 \ + && [echo FMTDYOBJ0 = $$(FMTDY:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 \ + && [echo FMTTESTOBJ0 = $$(FMTTEST:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 \ + && [echo FMTSCHEMEOBJ0 = $$(FMTSCHEME:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 \ + && [echo POOLNOBJ0 = $$(POOLN:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 \ + && [echo TESTLIBOBJ0 = $$(TESTLIB:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 \ + && [echo TESTTHROBJ0 = $$(TESTTHR:[=$(PFM)\$(VARIETY)\) >> $(TEMPMAKE)] == 0 +!INCLUDE $(TEMPMAKE) +!IF [del $(TEMPMAKE)] != 0 +!ERROR Failed to delete $(TEMPMAKE) +!ENDIF +!ENDIF + +MPMOBJ = $(MPMOBJ0:]=.obj) +FMTDYOBJ = $(FMTDYOBJ0:]=.obj) +FMTTESTOBJ = $(FMTTESTOBJ0:]=.obj) +FMTSCHEMEOBJ = $(FMTSCHEMEOBJ0:]=.obj) +POOLNOBJ = $(POOLNOBJ0:]=.obj) +TESTLIBOBJ = $(TESTLIBOBJ0:]=.obj) +TESTTHROBJ = $(TESTTHROBJ0:]=.obj) + + +# THE MPS LIBRARY +# +# The MPS library is built in two ways: +# +# 1. In the usual way, from a pile of object files compiled from their +# corresponding sources. +# +# 2. From mps.c, which effectively concatenates all the sources, allowing +# important global optimisation and inlining to occur. +# +# We mostly use the method (2), because it is fast to compile and execute. +# But we use method (1) for some varieties to ensure correctness of +# code (linkage errors are masked by (2)) and to maintain a correct list +# of source files in case method (1) won't work on some future constrained +# platform. +# +# %%VARIETY: When adding a new variety, add a rule for how to build the +# MPS library for the variety + +$(PFM)\rash\mps.lib: $(PFM)\rash\mps.obj + $(ECHO) $@ + $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** + +$(PFM)\hot\mps.lib: $(PFM)\hot\mps.obj + $(ECHO) $@ + $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** + +$(PFM)\cool\mps.lib: $(MPMOBJ) + $(ECHO) $@ + $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** + + +# OTHER GENUINE TARGETS +# +# Each line defines an executable or library target to be built and the object +# files it is build from. For an executable these lines add dependencies to +# the generic rules below, and should not include commands to execute. +# For a library this is not possible and the target should include commands +# to build it. +# %%TARGET: When adding a new target, add your new target here + +!IFDEF VARIETY + +$(PFM)\$(VARIETY)\abqtest.exe: $(PFM)\$(VARIETY)\abqtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\addrobj.exe: $(PFM)\$(VARIETY)\addrobj.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\airtest.exe: $(PFM)\$(VARIETY)\airtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTSCHEMEOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\amcss.exe: $(PFM)\$(VARIETY)\amcss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\amcsshe.exe: $(PFM)\$(VARIETY)\amcsshe.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\amcssth.exe: $(PFM)\$(VARIETY)\amcssth.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\amsss.exe: $(PFM)\$(VARIETY)\amsss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\amssshe.exe: $(PFM)\$(VARIETY)\amssshe.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\apss.exe: $(PFM)\$(VARIETY)\apss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\arenacv.exe: $(PFM)\$(VARIETY)\arenacv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\awlut.exe: $(PFM)\$(VARIETY)\awlut.obj \ + $(FMTTESTOBJ) \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\awluthe.exe: $(PFM)\$(VARIETY)\awluthe.obj \ + $(FMTTESTOBJ) \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\awlutth.exe: $(PFM)\$(VARIETY)\awlutth.obj \ + $(FMTTESTOBJ) \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\btcv.exe: $(PFM)\$(VARIETY)\btcv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\bttest.exe: $(PFM)\$(VARIETY)\bttest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\djbench.exe: $(PFM)\$(VARIETY)\djbench.obj \ + $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\extcon.exe: $(PFM)\$(VARIETY)\extcon.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\finalcv.exe: $(PFM)\$(VARIETY)\finalcv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\finaltest.exe: $(PFM)\$(VARIETY)\finaltest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\fotest.exe: $(PFM)\$(VARIETY)\fotest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\gcbench.exe: $(PFM)\$(VARIETY)\gcbench.obj \ + $(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\landtest.exe: $(PFM)\$(VARIETY)\landtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\locbwcss.exe: $(PFM)\$(VARIETY)\locbwcss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\lockcov.exe: $(PFM)\$(VARIETY)\lockcov.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\lockut.exe: $(PFM)\$(VARIETY)\lockut.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\locusss.exe: $(PFM)\$(VARIETY)\locusss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\locv.exe: $(PFM)\$(VARIETY)\locv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\messtest.exe: $(PFM)\$(VARIETY)\messtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\mpmss.exe: $(PFM)\$(VARIETY)\mpmss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\mpsicv.exe: $(PFM)\$(VARIETY)\mpsicv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\mv2test.exe: $(PFM)\$(VARIETY)\mv2test.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\nailboardtest.exe: $(PFM)\$(VARIETY)\nailboardtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\poolncv.exe: $(PFM)\$(VARIETY)\poolncv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(POOLNOBJ) + +$(PFM)\$(VARIETY)\qs.exe: $(PFM)\$(VARIETY)\qs.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\sacss.exe: $(PFM)\$(VARIETY)\sacss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\segsmss.exe: $(PFM)\$(VARIETY)\segsmss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\sncss.exe: $(PFM)\$(VARIETY)\sncss.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\steptest.exe: $(PFM)\$(VARIETY)\steptest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\tagtest.exe: $(PFM)\$(VARIETY)\tagtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\teletest.exe: $(PFM)\$(VARIETY)\teletest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\walkt0.exe: $(PFM)\$(VARIETY)\walkt0.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\zcoll.exe: $(PFM)\$(VARIETY)\zcoll.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\zmess.exe: $(PFM)\$(VARIETY)\zmess.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\ztfm.exe: $(PFM)\$(VARIETY)\ztfm.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\mpseventcnv.exe: $(PFM)\$(VARIETY)\eventcnv.obj \ + $(PFM)\$(VARIETY)\mps.lib + +$(PFM)\$(VARIETY)\mpseventpy.exe: $(PFM)\$(VARIETY)\eventpy.obj \ + $(PFM)\$(VARIETY)\mps.lib + +$(PFM)\$(VARIETY)\mpseventtxt.exe: $(PFM)\$(VARIETY)\eventtxt.obj \ + $(PFM)\$(VARIETY)\mps.lib + +$(PFM)\$(VARIETY)\mpseventsql.exe: $(PFM)\$(VARIETY)\eventsql.obj \ + $(PFM)\$(VARIETY)\sqlite3.obj $(PFM)\$(VARIETY)\mps.lib + +$(PFM)\$(VARIETY)\replay.exe: $(PFM)\$(VARIETY)\replay.obj \ + $(PFM)\$(VARIETY)\eventrep.obj \ + $(PFM)\$(VARIETY)\table.obj \ + $(PFM)\$(VARIETY)\mps.lib + +# Have to rename the object file, because the names must match, or +# the template rule for .exe.obj won't be used. +$(PFM)\$(VARIETY)\replaysw.obj: $(PFM)\$(VARIETY)\replay.obj + $(ECHO) $@ + copy $** $@ >nul: + +$(PFM)\$(VARIETY)\mpseventcnv.obj: $(PFM)\$(VARIETY)\eventcnv.obj + copy $** $@ >nul: + +$(PFM)\$(VARIETY)\mpseventpy.obj: $(PFM)\$(VARIETY)\eventpy.obj + copy $** $@ >nul: + +$(PFM)\$(VARIETY)\mpseventtxt.obj: $(PFM)\$(VARIETY)\eventtxt.obj + copy $** $@ >nul: + +$(PFM)\$(VARIETY)\mpseventsql.obj: $(PFM)\$(VARIETY)\eventsql.obj + copy $** $@ >nul: + +!ENDIF + + +# GENERIC RULES + +# Object files + +{}.c{$(PFM)\$(VARIETY)}.obj: + $(ECHO) $@ + @if not exist $(PFM) mkdir $(PFM) + @if not exist $(PFM)\$(VARIETY) mkdir $(PFM)\$(VARIETY) + $(CC) /c $(CFLAGS) /Fo$@ $< + +$(PFM)\$(VARIETY)\sqlite3.obj: + $(ECHO) $@ + @if not exist $(PFM) mkdir $(PFM) + @if not exist $(PFM)\$(VARIETY) mkdir $(PFM)\$(VARIETY) + $(CC) /c $(CFLAGSSQL) /Fo$@ sqlite3.c + +{}.asm{$(PFM)\$(VARIETY)}.obj: + $(ECHO) $@ + @if not exist $(PFM) mkdir $(PFM) + @if not exist $(PFM)\$(VARIETY) mkdir $(PFM)\$(VARIETY) + $(MASM) /nologo /c /Fo$@ $< + +# Coverage files +#{$(PFM)\$(VARIETY)}.exe{$(PFM)\$(VARIETY)}.cov: +# $(ECHO) $@ +# cd $(PFM)\$(VARIETY) +# prep /nologo /lv $( $(@F) + + +# Executables + +{$(PFM)\$(VARIETY)}.obj{$(PFM)\$(VARIETY)}.exe: + $(ECHO) $@ + $(LINKER) $(LINKFLAGS) /OUT:$@ $(**) + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/commpre.nmk b/mps/code/commpre.nmk new file mode 100644 index 00000000000..77102f03355 --- /dev/null +++ b/mps/code/commpre.nmk @@ -0,0 +1,363 @@ +# commpre.nmk: FIRST COMMON FRAGMENT FOR PLATFORMS USING NMAKE -*- makefile -*-1 +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# DESCRIPTION +# +# .description: This makefile fragment is included in more specific +# makefiles for platforms which use the "mv" builder. This is +# the first of two common makefile fragments (the other is commpost.nmk). +# Alas, due to shortcomings in nmake, it is not possible to use only one +# common fragment. +# +# %%PART: When adding a new part, add a new parameter for the files included +# in the part +# Parameters: +# PFM platform code, e.g. "w3i3mv" +# PFMDEFS /D options to define platforms preprocessor symbols +# to the compiler. Avoid using this if possible, as it +# prevents the MPS being built with a simple command like +# "cl mps.c". +# MPMCOMMON list of sources which make up the "mpm" part for all +# platforms. Each source is stripped of its .c extension +# and surrounded with [brackets]. +# MPMPF as above for the current platform. +# PLINTH as above for the "plinth" part +# AMC as above for the "amc" part +# AMS as above for the "ams" part +# LO as above for the "lo" part +# POOLN as above for the "pooln" part +# SNC as above for the "snc" part +# POOLS as above for all pools included in the target +# MPM as above for the MPMCOMMON + MPMPF + PLINTH + POOLS +# DW as above for the "dw" part +# FMTTEST as above for the "fmttest" part +# FMTSCHEME as above for the "fmtscheme" part +# TESTLIB as above for the "testlib" part +# TESTTHR as above for the "testthr" part +# NOISY if defined, causes command to be emitted +# +# +# EDITING +# +# To add new targets, varieties, and parts: +# Search for the string "%%TARGET", "%%VARIETY", or "%%PART" in this makefile +# and follow the instructions. +# + + +# TARGETS +# +# +# %%TARGET: When adding a new target, add it to one of the variables +# in this section. Library components go in LIB_TARGETS. + +LIB_TARGETS=mps.lib + +# Test cases go in TEST_TARGETS. + +TEST_TARGETS=\ + abqtest.exe \ + addrobj.exe \ + airtest.exe \ + amcss.exe \ + amcsshe.exe \ + amcssth.exe \ + amsss.exe \ + amssshe.exe \ + apss.exe \ + arenacv.exe \ + awlut.exe \ + awluthe.exe \ + awlutth.exe \ + btcv.exe \ + bttest.exe \ + djbench.exe \ + extcon.exe \ + finalcv.exe \ + finaltest.exe \ + fotest.exe \ + gcbench.exe \ + landtest.exe \ + locbwcss.exe \ + lockcov.exe \ + lockut.exe \ + locusss.exe \ + locv.exe \ + messtest.exe \ + mpmss.exe \ + mpsicv.exe \ + mv2test.exe \ + nailboardtest.exe \ + poolncv.exe \ + qs.exe \ + sacss.exe \ + segsmss.exe \ + sncss.exe \ + steptest.exe \ + tagtest.exe \ + teletest.exe \ + walkt0.exe \ + zcoll.exe \ + zmess.exe \ + ztfm.exe + +# Stand-alone programs go in EXTRA_TARGETS if they should always be +# built, or in OPTIONAL_TARGETS if they should only be built if + +EXTRA_TARGETS=mpseventcnv.exe mpseventpy.exe mpseventtxt.exe +OPTIONAL_TARGETS=mpseventsql.exe + +# This target records programs that we were once able to build but +# can't at the moment: +# +# replay -- depends on the EPVM pool. + +UNBUILDABLE_TARGETS=replay.exe + +ALL_TARGETS=$(LIB_TARGETS) $(TEST_TARGETS) $(EXTRA_TARGETS) + + +# PARAMETERS +# +# +# %%PART: When adding a new part, add the sources for the new part here. + +MPMCOMMON=\ + [abq] \ + [arena] \ + [arenacl] \ + [arenavm] \ + [arg] \ + [boot] \ + [bt] \ + [buffer] \ + [cbs] \ + [dbgpool] \ + [dbgpooli] \ + [event] \ + [failover] \ + [format] \ + [freelist] \ + [global] \ + [land] \ + [ld] \ + [locus] \ + [message] \ + [meter] \ + [mpm] \ + [mpsi] \ + [nailboard] \ + [policy] \ + [pool] \ + [poolabs] \ + [poolmfs] \ + [poolmrg] \ + [poolmv2] \ + [protocol] \ + [range] \ + [rangetree] \ + [ref] \ + [ring] \ + [root] \ + [sa] \ + [sac] \ + [scan] \ + [seg] \ + [shield] \ + [splay] \ + [ss] \ + [table] \ + [trace] \ + [traceanc] \ + [tract] \ + [trans] \ + [tree] \ + [version] \ + [vm] \ + [walk] +PLINTH = [mpsliban] [mpsioan] +AMC = [poolamc] +AMS = [poolams] +AWL = [poolawl] +LO = [poollo] +MVFF = [poolmvff] +POOLN = [pooln] +SNC = [poolsnc] +FMTDY = [fmtdy] [fmtno] +FMTTEST = [fmthe] [fmtdy] [fmtno] [fmtdytst] +FMTSCHEME = [fmtscheme] +TESTLIB = [testlib] [getoptl] +TESTTHR = [testthrw3] +POOLS = $(AMC) $(AMS) $(AWL) $(LO) $(MV2) $(MVFF) $(SNC) +MPM = $(MPMCOMMON) $(MPMPF) $(POOLS) $(PLINTH) + + +# CHECK PARAMETERS +# +# +# %%PART: When adding a new part, add checks for the parameter with the +# sources for the new part. + +!IFNDEF PFM +!ERROR commpre.nmk: PFM not defined +!ENDIF +!IFNDEF MPM +!ERROR commpre.nmk: MPM not defined +!ENDIF +!IFNDEF MPMCOMMON +!ERROR commpre.nmk: MPMCOMMON not defined +!ENDIF +!IFNDEF MPMPF +!ERROR commpre.nmk: MPMPF not defined +!ENDIF +!IFNDEF PLINTH +!ERROR commpre.nmk: PLINTH not defined +!ENDIF +!IFNDEF LO +!ERROR commpre.nmk: LO not defined +!ENDIF +!IFNDEF AMC +!ERROR commpre.nmk: AMC not defined +!ENDIF +!IFNDEF AMS +!ERROR commpre.nmk: AMS not defined +!ENDIF +!IFNDEF POOLN +!ERROR commpre.nmk: POOLN not defined +!ENDIF +!IFNDEF SNC +!ERROR commpre.nmk: SNC not defined +!ENDIF +!IFNDEF FMTDY +!ERROR commpre.nmk: FMTDY not defined +!ENDIF +!IFNDEF FMTTEST +!ERROR commpre.nmk: FMTTEST not defined +!ENDIF +!IFNDEF FMTSCHEME +!ERROR commpre.nmk: FMTSCHEME not defined +!ENDIF +!IFNDEF TESTLIB +!ERROR commpre.nmk: TESTLIB not defined +!ENDIF +!IFNDEF TESTTHR +!ERROR commpre.nmk: TESTTHR not defined +!ENDIF + + +# DECLARATIONS + + +!IFDEF NOISY +ECHO = rem +!ELSE +.SILENT: +ECHO = echo +!ENDIF + + +# C FLAGS + +CFLAGSTARGETPRE = +CFLAGSTARGETPOST = +CRTFLAGSHOT = +CRTFLAGSCOOL = +LINKFLAGSHOT = +LINKFLAGSCOOL = + +CFLAGSSQLPRE = /nologo $(PFMDEFS) +CFLAGSCOMMONPRE = /nologo $(PFMDEFS) $(CFLAGSTARGETPRE) +CFLAGSSQLPOST = +CFLAGSCOMMONPOST = $(CFLAGSTARGETPOST) + +# Flags for use in the variety combinations +CFLAGSHOT = /O2 +# (above /O2 (maximise speed) used to be set to /Ox +# (maximise optimisations) in for tool versions before VS 9) +# We used to have /GZ here (stack probe). +# Note that GZ is specific to version 12 of the cl tool. drj 2003-11-04 +# It is ignored on earlier versions of the cl tool. +# /GZ here generates a dependency on the C library and when we are +# building a DLL, mpsdy.dll, the linker step will fail (error LNK2001: +# unresolved external symbol __chkesp). See +# http://support.microsoft.com/kb/q191669/ +CFLAGSCOOL = +CFLAGSINTERNAL = /Zi +CFLAGSEXTERNAL = + +# The combinations of variety +# %%VARIETY: When adding a new variety, define a macro containing the set +# of flags for the new variety. +CFRASH = /DCONFIG_VAR_RASH $(CRTFLAGSHOT) $(CFLAGSHOT) $(CFLAGSEXTERNAL) +CFHOT = /DCONFIG_VAR_HOT $(CRTFLAGSHOT) $(CFLAGSHOT) $(CFLAGSINTERNAL) +CFCOOL = /DCONFIG_VAR_COOL $(CRTFLAGSCOOL) $(CFLAGSCOOL) $(CFLAGSINTERNAL) + +# Microsoft documentation is not very clear on the point of using both +# optimization and debug information + +# LINKER FLAGS +# %%VARIETY: When adding a new variety, define a macro containing the flags +# for the new variety +LINKER = link +LINKFLAGSCOMMON = /nologo /LARGEADDRESSAWARE +LINKFLAGSINTERNAL = /DEBUG +# ( Internal flags used to be set to /DEBUG:full ) +LINKFLAGSEXTERNAL = /RELEASE + +LFRASH = $(LINKFLAGSHOT) $(LINKFLAGSEXTERNAL) +LFHOT = $(LINKFLAGSHOT) $(LINKFLAGSINTERNAL) +LFCOOL = $(LINKFLAGSCOOL) $(LINKFLAGSINTERNAL) + +#LFCV = /PROFILE /DEBUG:full /DEBUGTYPE:cv + +# Library manager +# %%VARIETY: When adding a new variety, define a macro containing the flags +# for the new variety +LIBMAN = lib # can't call this LIB - it screws the environment +LIBFLAGSCOMMON = + +LIBFLAGSRASH = +LIBFLAGSHOT = +LIBFLAGSCOOL = + +# Browser database manager [not used at present] +#BSC = bscmake +#BSCFLAGS = /nologo /n + + +# == Common definitions == +# %%PART: When adding a new part, add it here, unless it's platform-specific +# [It is not possible use a macro, like $(PFM), in a substitution, +# hence all parts end up being platform-specific.] + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/config.h b/mps/code/config.h new file mode 100644 index 00000000000..02d335efec6 --- /dev/null +++ b/mps/code/config.h @@ -0,0 +1,768 @@ +/* config.h: MPS CONFIGURATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * PURPOSE + * + * This module translates from high-level symbols defined by the + * external build system (gnumake, nmake, etc.) into specific sets + * of features used by MPS modules. + * + * TODO: It also defines defaults for various parts of the MPS. + * Perhaps this should be split into a separate header with better + * documentation. See + * . + * + * DESIGN + * + * . + * + * TODO: Many of the default constants defined in this file do not + * have documented justification. See GitHub issue #176 + * . + */ + +#ifndef config_h +#define config_h + + +/* CONFIG_VAR_* -- variety Configuration + * + * These are translated into the directives CONFIG_ASSERT, CONFIG_STATS, + * CONFIG_LOG, etc. which control actual compilation features. + */ + +/* CONFIG_VAR_RASH -- the rash and reckless variety + * + * This variety switches off as many features as possible for maximum + * performance, but is therefore unsafe and undebuggable. It is not intended + * for use, but for comparison with the hot variety, to check that assertion, + * logging, etc. have negligible overhead. + */ + +#if defined(CONFIG_VAR_RASH) +/* no asserts */ +/* no statistic meters */ +/* no telemetry log events */ + + +/* CONFIG_VAR_COOL -- cool variety + * + * The cool variety is intended for use when developing an integration with + * the MPS or debugging memory problems or collecting detailed telemetry + * data for performance analysis. It has more thorough consistency checking + * and data collection and output, and full debugging information. + */ + +#elif defined(CONFIG_VAR_COOL) +#define CONFIG_ASSERT +#define CONFIG_ASSERT_ALL +#define CONFIG_ASSERT_ABORT +#define CONFIG_STATS +#ifndef CHECKLEVEL +#define CHECKLEVEL CheckLevelSHALLOW +#endif +#define CONFIG_LOG +#define CONFIG_LOG_ALL + + +#else /* CONFIG_VAR_* */ + +/* CONFIG_VAR_HOT -- the hot variety + * + * This variety is the default variety for distribution in products that use + * the MPS. It has maximum performance while retaining a good level of + * consistency checking and allowing some debugging and telemetry features. + */ + +/* #elif defined(CONFIG_VAR_HOT) */ +#define CONFIG_ASSERT +/* Note, not CONFIG_ASSERT_ABORT */ +#ifndef CHECKLEVEL +#define CHECKLEVEL CheckLevelMINIMAL +#endif +/* no statistic meters */ +#define CONFIG_LOG + +#endif /* CONFIG_VAR_* */ + + +/* Build Features */ + + +#if defined(CONFIG_ASSERT) +/* asserts: AVER, AVERT, NOTREACHED, CHECKx */ +/* note: a direct call to ASSERT() will *still* fire */ +#define AVER_AND_CHECK +#if defined(CONFIG_ASSERT_ALL) +#define AVER_AND_CHECK_ALL +#define MPS_ASSERT_STRING "assertastic" +#else /* CONFIG_ASSERT_ALL, not */ +#define MPS_ASSERT_STRING "asserted" +#endif /* CONFIG_ASSERT_ALL */ +#else /* CONFIG_ASSERT, not */ +#define AVER_AND_CHECK_NONE +#define MPS_ASSERT_STRING "nonasserted" +#endif +#if defined(CONFIG_ASSERT_ABORT) +#define ASSERT_ABORT() abort() +#else +#define ASSERT_ABORT() NOOP +#endif + + +#if defined(CONFIG_STATS) +/* CONFIG_STATS = STATISTICS = METERs */ +#define STATISTICS +#define MPS_STATS_STRING "stats" +#else +#define STATISTICS_NONE +#define MPS_STATS_STRING "nonstats" +#endif + + +#if defined(CONFIG_LOG) +/* TELEMETRY = LOG = EVENTs */ +#define EVENT +#if defined(CONFIG_LOG_ALL) +#define EVENT_ALL 1 /* log events on critical path */ +#define MPS_LOG_STRING "logtastic" +#else /* CONFIG_LOG_ALL, not */ +#define EVENT_ALL 0 /* don't log events on critical path */ +#define MPS_LOG_STRING "logging" +#endif /* CONFIG_LOG_ALL */ +#else /* CONFIG_LOG, not */ +#define EVENT_NONE +#define MPS_LOG_STRING "nonlogging" +#endif /* CONFIG_LOG */ + + +/* CONFIG_PLINTH_NONE -- exclude the ANSI plinth + * + * Some MPS deployment environments want to avoid dependencies on the + * standard C library. In this case, the plinth, defined in mpslib.h must + * be supplied when linking. + * + * For example, Open Dylan on Windows does not link the C library, but + * supplies its own plinth directly using Windows and Dylan interfaces. + * + * CONFIG_PLINTH_NONE tells mps.c to exclude the ANSI plinth and removes + * all standard C library dependencies. e.g. + * + * cc -O2 -c -DCONFIG_PLINTH_NONE mps.c + */ + +#if !defined(CONFIG_PLINTH_NONE) +#define PLINTH +#else +#define PLINTH_NONE +#endif + + +/* CONFIG_PF_ANSI -- use the ANSI platform + * + * This symbol tells mps.c to exclude the sources for the + * auto-detected platform, and use the generic ("ANSI") platform + * instead. + */ + +#if defined(CONFIG_PF_ANSI) +#define PLATFORM_ANSI +#endif + + +/* CONFIG_THREAD_SINGLE -- support single-threaded execution only + * + * This symbol causes the MPS to be built for single-threaded + * execution only, where locks are not needed and so the generic + * ("ANSI") lock module lockan.c can be used instead of the + * platform-specific lock module. + */ + +#if !defined(CONFIG_THREAD_SINGLE) +#define LOCK +#else +#define LOCK_NONE +#endif + + +/* CONFIG_POLL_NONE -- no support for polling + * + * This symbol causes the MPS to built without support for polling. + * This means that garbage collections will only happen if requested + * explicitly via mps_arena_collect() or mps_arena_step(), but it also + * means that protection is not needed, and so shield operations can + * be replaced with no-ops in mpm.h. + */ + +#if !defined(CONFIG_POLL_NONE) +#define REMEMBERED_SET +#define SHIELD +#else +#if !defined(CONFIG_THREAD_SINGLE) +#error "CONFIG_POLL_NONE without CONFIG_THREAD_SINGLE" +#endif +#define REMEMBERED_SET_NONE +#define SHIELD_NONE +#endif + + +#define MPS_VARIETY_STRING \ + MPS_ASSERT_STRING "." MPS_LOG_STRING "." MPS_STATS_STRING + + +/* Platform Configuration */ + +#include "mpstd.h" + +/* Suppress Visual C warnings at /W4 (warning level 4) */ +/* This is also done in testlib.h. */ + +#ifdef MPS_BUILD_MV + +/* "constant conditional" (provoked by MPS_END) */ +#pragma warning(disable: 4127) + +#endif /* MPS_BUILD_MV */ + + +/* Suppress Pelles C warnings at /W2 (warning level 2) */ +/* Some of the same settings are done in testlib.h. */ + +#ifdef MPS_BUILD_PC + +/* "Unreachable code" (provoked by AVER, if condition is constantly true). */ +#pragma warn(disable: 2154) + +/* "Consider changing type to 'size_t' for loop variable" */ +#pragma warn(disable: 2804) + +#endif /* MPS_BUILD_PC */ + + +/* MPS_FILE -- expands to __FILE__ in nested macros */ + +#ifdef MPS_BUILD_PC + +/* Pelles C loses definition of __FILE__ in deeply nested macro + * expansions. See + */ +#define MPS_FILE "<__FILE__ unavailable in " MPS_PF_STRING ">" + +#else + +#define MPS_FILE __FILE__ + +#endif + + +/* Function attributes */ +/* Some of these are also defined in testlib.h */ + +/* Attribute for functions that take a printf-like format argument, so + * that the compiler can check the format specifiers against the types + * of the arguments. + * GCC: + * Clang: + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_FORMAT(ARGLIST) __attribute__((__format__ ARGLIST)) +#else +#define ATTRIBUTE_FORMAT(ARGLIST) +#endif + +/* Attribute for functions that should not be instrumented by Clang's + * address sanitizer. + * + */ +#if defined(MPS_BUILD_LL) +#if __has_feature(address_sanitizer) +#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((__no_sanitize_address__)) +#else +#define ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif +#else +#define ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +/* Attribute for functions that must not be inlined. + * GCC: + * MSVC: + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_NOINLINE __attribute__((__noinline__)) +#elif defined(MPS_BUILD_MV) +#define ATTRIBUTE_NOINLINE __declspec(noinline) +#else +#define ATTRIBUTE_NOINLINE +#endif + +/* Attribute for functions that do not return. + * GCC: + * Clang: + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_NORETURN __attribute__((__noreturn__)) +#else +#define ATTRIBUTE_NORETURN +#endif + +/* Attribute for functions that may be unused in some build configurations. + * GCC: + * + * This attribute must be applied to all Check functions, otherwise + * the RASH variety fails to compile with -Wunused-function. (It + * should not be applied to functions that are unused in all build + * configurations: these functions should not be compiled.) + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ATTRIBUTE_UNUSED +#endif + + +/* Compiler extensions */ + +/* LIKELY -- likely conditions + * + * Use to annotate conditions that are likely to be true, such as + * assertions, to help move unlikely code out-of-line. See + * . + */ + +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define LIKELY(exp) __builtin_expect((exp) != 0, 1) +#else +#define LIKELY(exp) ((exp) != 0) +#endif + + +/* Buffer Configuration -- see */ + +#define BUFFER_RANK_DEFAULT (mps_rank_exact()) + + +/* Format defaults: see */ + +#define FMT_ALIGN_DEFAULT ((Align)MPS_PF_ALIGN) +#define FMT_HEADER_SIZE_DEFAULT ((Size)0) +#define FMT_SCAN_DEFAULT (&FormatNoScan) +#define FMT_SKIP_DEFAULT (&FormatNoSkip) +#define FMT_FWD_DEFAULT (&FormatNoMove) +#define FMT_ISFWD_DEFAULT (&FormatNoIsMoved) +#define FMT_PAD_DEFAULT (&FormatNoPad) +#define FMT_CLASS_DEFAULT (&FormatDefaultClass) + + +/* Pool AMC Configuration -- see */ + +#define AMC_INTERIOR_DEFAULT TRUE +/* AMC treats objects larger than or equal to this as "Large" */ +#define AMC_LARGE_SIZE_DEFAULT ((Size)32768) +#define AMC_EXTEND_BY_DEFAULT ((Size)8192) + + +/* Pool AMS Configuration -- see */ + +#define AMS_SUPPORT_AMBIGUOUS_DEFAULT TRUE +#define AMS_GEN_DEFAULT 0 + + +/* Pool AWL Configuration -- see */ + +#define AWL_GEN_DEFAULT 0 +#define AWL_HAVE_SEG_SA_LIMIT TRUE +#define AWL_SEG_SA_LIMIT 200 /* TODO: Improve guesswork with measurements */ +#define AWL_HAVE_TOTAL_SA_LIMIT FALSE +#define AWL_TOTAL_SA_LIMIT 0 + + +/* Pool LO Configuration -- see */ + +#define LO_GEN_DEFAULT 0 + + +/* Pool MFS Configuration -- see */ + +#define MFS_EXTEND_BY_DEFAULT ((Size)65536) + + +/* Pool MVFF Configuration -- see */ + +#define MVFF_EXTEND_BY_DEFAULT ((Size)65536) +#define MVFF_AVG_SIZE_DEFAULT ((Size)32) +#define MVFF_ALIGN_DEFAULT MPS_PF_ALIGN +#define MVFF_SLOT_HIGH_DEFAULT FALSE +#define MVFF_ARENA_HIGH_DEFAULT FALSE +#define MVFF_FIRST_FIT_DEFAULT TRUE +#define MVFF_SPARE_DEFAULT 0.75 + + +/* Pool MVT Configuration -- see */ + +/* TODO: These numbers were lifted from mv2test and need thought. See + GitHub issue #176 + . */ + +#define MVT_ALIGN_DEFAULT MPS_PF_ALIGN +#define MVT_MIN_SIZE_DEFAULT MPS_PF_ALIGN +#define MVT_MEAN_SIZE_DEFAULT 32 +#define MVT_MAX_SIZE_DEFAULT 8192 +#define MVT_RESERVE_DEPTH_DEFAULT 1024 +#define MVT_FRAG_LIMIT_DEFAULT 30 + + +/* Arena Configuration -- see */ + +#define ArenaPollALLOCTIME (65536.0) + +/* .client.seg-size: ARENA_CLIENT_GRAIN_SIZE is the minimum size, in + * bytes, of a grain in the client arena. It's set at 8192 with no + * particular justification. */ + +#define ARENA_CLIENT_GRAIN_SIZE ((Size)8192) + +#define ARENA_DEFAULT_COMMIT_LIMIT ((Size)-1) + +#define ARENA_SPARE_DEFAULT 0.75 + +/* ARENA_DEFAULT_PAUSE_TIME is the maximum time (in seconds) that + * operations within the arena may pause the mutator for. The default + * is set for typical human interaction. See mps_arena_pause_time_set + * in the manual. */ + +#define ARENA_DEFAULT_PAUSE_TIME (0.1) + +#define ARENA_DEFAULT_ZONED TRUE + +/* ARENA_MINIMUM_COLLECTABLE_SIZE is the minimum size (in bytes) of + * collectable memory that might be considered worthwhile to run a + * full garbage collection. */ + +#define ARENA_MINIMUM_COLLECTABLE_SIZE ((Size)1000000) + +/* ARENA_DEFAULT_COLLECTION_RATE is an estimate of the MPS's + * collection rate (in work per second; see ), for + * use in the case where there isn't enough data to use a measured + * value. */ + +#define ARENA_DEFAULT_COLLECTION_RATE (25000000.0) + +/* ARENA_DEFAULT_COLLECTION_OVERHEAD is an estimate of the MPS's + * collection overhead (in seconds), for use in the case where there + * isn't enough data to use a measured value. */ + +#define ARENA_DEFAULT_COLLECTION_OVERHEAD (0.1) + +/* ARENA_MAX_COLLECT_FRACTION is the maximum fraction of runtime that + * ArenaStep is prepared to spend in collections. */ + +#define ARENA_MAX_COLLECT_FRACTION (0.1) + +/* ArenaDefaultZONESET is the zone set used by LocusPrefDEFAULT. + * + * TODO: This is left over from before branches 2014-01-29/mps-chain-zones + * and 2014-01-17/cbs-tract-alloc reformed allocation, and may now be + * doing more harm than good. Experiment with setting to ZoneSetUNIV. */ + +#define ArenaDefaultZONESET (ZoneSetUNIV << (MPS_WORD_WIDTH / 2)) + +/* LocusPrefDEFAULT is the allocation preference used by manual pool + * classes (these don't care where they allocate). */ + +#define LocusPrefDEFAULT { \ + LocusPrefSig, /* sig */ \ + FALSE, /* high */ \ + ArenaDefaultZONESET, /* zoneSet */ \ + ZoneSetEMPTY, /* avoid */ \ +} + +#define LDHistoryLENGTH ((Size)4) + +/* Value of MPS_KEY_EXTEND_BY for the arena control pool. */ +#define CONTROL_EXTEND_BY ((Size)32768) + +#define VM_ARENA_SIZE_DEFAULT ((Size)1 << 28) + + +/* Locus configuration -- see */ + +/* Weighting for the current observation, in the exponential moving + * average computation of the mortality of a generation. */ +#define LocusMortalityALPHA (0.4) + + +/* Stack probe configuration -- see */ + +/* Currently StackProbe has a useful implementation only on Windows. */ +#if defined(MPS_OS_W3) && !defined(CONFIG_PF_ANSI) +/* See for a justification of this value. */ +#define StackProbeDEPTH ((Size)500) +#else +#define StackProbeDEPTH ((Size)0) +#endif + + +/* Shield Configuration -- see */ + +#define ShieldQueueLENGTH 512 /* initial length of shield queue */ +#define ShieldDepthWIDTH 4 /* log2(max nested exposes + 1) */ + + +/* VM Configuration -- see */ + +#define VMAN_PAGE_SIZE ((Align)4096) +#define VMJunkBYTE ((unsigned char)0xA9) +#define VMParamSize (sizeof(Word)) + + +/* .feature.li: Linux feature specification + * + * The MPS needs the following symbols which are not defined if the + * -ansi option is given to GCC: + * + * Source Symbols Header Feature + * =========== ========================= ============= ==================== + * eventtxt.c setenv _GNU_SOURCE + * lockix.c pthread_mutexattr_settype _XOPEN_SOURCE >= 500 + * prmcix.h stack_t, siginfo_t _XOPEN_SOURCE + * prmclii3.c REG_EAX etc. _GNU_SOURCE + * prmclii6.c REG_RAX etc. _GNU_SOURCE + * pthrdext.c sigaction etc. _XOPEN_SOURCE + * vmix.c MAP_ANON _GNU_SOURCE + * + * It is not possible to localize these feature specifications around + * the individual headers: all headers share a common set of features + * (via ) and so all sources in the same compilation unit + * must turn on the same set of features. + * + * See "Feature Test Macros" in the Glibc Manual: + * + */ + +#if defined(MPS_OS_LI) + +#if defined(_XOPEN_SOURCE) && _XOPEN_SOURCE < 500 +#undef _XOPEN_SOURCE +#endif +#if !defined(_XOPEN_SOURCE) +#define _XOPEN_SOURCE 500 +#endif + +#if !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif + +#endif + + +/* .feature.xc: macOS feature specification + * + * The MPS needs the following symbols which are not defined by default + * + * Source Symbols Header Feature + * =========== ========================= ============= ==================== + * prmclii3.c __eax etc. _XOPEN_SOURCE + * prmclii6.c __rax etc. _XOPEN_SOURCE + * + * It is not possible to localize these feature specifications around + * the individual headers: all headers share a common set of features + * (via ) and so all sources in the same compilation unit + * must turn on the same set of features. + */ + +#if defined(MPS_OS_XC) + +#if !defined(_XOPEN_SOURCE) +#define _XOPEN_SOURCE +#endif + +#endif + + +/* Almost all of protxc.c etc. are architecture-independent, but unfortunately + the Mach headers don't provide architecture neutral symbols for simple + things like thread states. These definitions fix that. */ + +#if defined(MPS_OS_XC) +#if defined(MPS_ARCH_A6) + +#define THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT +#define THREAD_STATE_FLAVOR ARM_THREAD_STATE64 +#define THREAD_STATE_S arm_thread_state64_t + +#elif defined(MPS_ARCH_I6) + +#define THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT +#define THREAD_STATE_FLAVOR x86_THREAD_STATE64 +#define THREAD_STATE_S x86_thread_state64_t + +#elif defined(MPS_ARCH_I3) + +#define THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT +#define THREAD_STATE_FLAVOR x86_THREAD_STATE32 +#define THREAD_STATE_S x86_thread_state32_t + +#else + +#error "Unknown macOS architecture" + +#endif +#endif + + +/* POSIX thread extensions configuration -- see */ + +#if defined(MPS_OS_LI) || defined(MPS_OS_FR) + +/* PTHREADEXT_SIGSUSPEND -- signal used to suspend a thread + * + */ +#if defined(CONFIG_PTHREADEXT_SIGSUSPEND) +#define PTHREADEXT_SIGSUSPEND CONFIG_PTHREADEXT_SIGSUSPEND +#else +#define PTHREADEXT_SIGSUSPEND SIGXFSZ +#endif + +/* PTHREADEXT_SIGRESUME -- signal used to resume a thread + * + */ +#if defined(CONFIG_PTHREADEXT_SIGRESUME) +#define PTHREADEXT_SIGRESUME CONFIG_PTHREADEXT_SIGRESUME +#else +#define PTHREADEXT_SIGRESUME SIGXCPU +#endif + +#endif + + +/* Tracer Configuration -- see */ + +#define TraceLIMIT ((size_t)1) +/* I count 4 function calls to scan, 10 to copy. */ +#define TraceCopyScanRATIO (1.5) + + +/* Events + * + * EventBufferSIZE is the number of words in the global event buffer. + */ + +#define EventBufferSIZE ((size_t)4096) +#define EventStringLengthMAX ((size_t)255) /* Not including NUL */ + + +/* Assert Buffer */ + +#define ASSERT_BUFFER_SIZE ((Size)512) + + +/* memory operator configuration + * + * We need efficient operators similar to memcpy, memset, and memcmp. + * In general, we cannot use the C library mem functions directly as + * that would not be freestanding. However, on some platforms we can do + * this, because they are inlined by the compiler and so do not actually + * create a dependence on an external library. + */ + +#if defined(MPS_PF_W3I3MV) +/* MSVC on Intel inlines mem* when optimizing */ +#define mps_lib_memset(p, c, l) memset(p, c, l) +#define mps_lib_memcpy(p, q, s) memcpy(p, q, s) +#define mps_lib_memcmp(p, q, s) memcmp(p, q, s) +/* get prototypes for ANSI mem* */ +#include +#endif + + +/* Product Configuration + * + * Deprecated, see design/config/#req.prod>. This now only contains the + * configuration used by the former "MPS" product, which is now the only + * product. + */ + +#define MPS_PROD_STRING "mps" +#define MPS_PROD_MPS + + +/* Default chain for GC pools + * + * TODO: The default should be to measure liveness and make sensible + * decisions. See job003794. + */ + +#define ChainDEFAULT \ + { \ + { 8 * 1024, 0.85 }, /* nursery */ \ + { 36 * 1024, 0.45 } /* second gen, after which dynamic */ \ + } + + +/* Write barrier deferral + * + * . + * + * TODO: These settings were determined by trial and error, but should + * be based on measurement of the protection overhead on each + * platform. We know it's extremely different between macOS and + * Windows, for example. . + * + * TODO: Consider basing the count on the amount of time that has + * passed in the mutator rather than the number of scans. + */ + +#define WB_DEFER_BITS 2 /* bitfield width for deferral count */ +#define WB_DEFER_INIT 3 /* boring scans after new segment */ +#define WB_DEFER_DELAY 3 /* boring scans after interesting scan */ +#define WB_DEFER_HIT 1 /* boring scans after barrier hit */ + + +/* Apple Hardened Runtime + * + * The MAYBE_HARDENED_RUNTIME macro is true if Apple's "Hardened + * Runtime" feature may be enabled, and so calls to mmap() and + * mprotect() with PROT_WRITE | PROT_EXEC may fail with EACCES. + * See for details. + */ +#if defined(MPS_OS_XC) && defined(MPS_ARCH_A6) +#define MAYBE_HARDENED_RUNTIME 1 +#else +#define MAYBE_HARDENED_RUNTIME 0 +#endif + +#endif /* config_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/dbgpool.c b/mps/code/dbgpool.c new file mode 100644 index 00000000000..ac5f010299e --- /dev/null +++ b/mps/code/dbgpool.c @@ -0,0 +1,813 @@ +/* dbgpool.c: POOL DEBUG MIXIN + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .source: + */ + +#include "dbgpool.h" +#include "poolmfs.h" +#include "splay.h" +#include "mpm.h" +#include + +SRCID(dbgpool, "$Id$"); + + +/* tagStruct -- tags for storing info about allocated objects */ + +typedef struct tagStruct { + /* We don't want to pay the expense of a sig in every tag */ + Addr addr; + Size size; + TreeStruct treeStruct; + char userdata[1 /* actually variable length */]; +} tagStruct; + +#define TagTree(tag) (&(tag)->treeStruct) +#define TagOfTree(tree) TREE_ELT(tag, treeStruct, tree) + +typedef tagStruct *Tag; + + +/* tag init methods: copying the user-supplied data into the tag */ + +static void TagTrivInit(void* tag, va_list args) +{ + UNUSED(tag); UNUSED(args); +} + + +/* TagComp -- splay comparison function for address ordering of tags */ + +static Compare TagCompare(Tree node, TreeKey key) +{ + Addr addr1, addr2; + + addr1 = *(Addr *)key; + addr2 = TagOfTree(node)->addr; + if (addr1 < addr2) + return CompareLESS; + else if (addr1 > addr2) { + /* Check key is not inside the object of this tag */ + AVER_CRITICAL(AddrAdd(addr2, TagOfTree(node)->size) <= addr1); + return CompareGREATER; + } else + return CompareEQUAL; +} + +static TreeKey TagKey(Tree node) +{ + return &TagOfTree(node)->addr; +} + + +/* PoolDebugMixinCheck -- check a PoolDebugMixin */ + +Bool PoolDebugMixinCheck(PoolDebugMixin debug) +{ + /* Nothing to check about fenceTemplate */ + /* Nothing to check about fenceSize */ + /* Nothing to check about freeTemplate */ + /* Nothing to check about freeSize */ + if (debug->tagInit != NULL) { + CHECKL(FUNCHECK(debug->tagInit)); + /* Nothing to check about tagSize */ + CHECKD(Pool, debug->tagPool); + CHECKL(COMPATTYPE(Addr, void*)); /* tagPool relies on this */ + /* Nothing to check about missingTags */ + CHECKD(SplayTree, &debug->index); + } + UNUSED(debug); /* see */ + return TRUE; +} + + +/* DebugPoolDebugMixin -- gets the debug mixin, if any */ + +#define DebugPoolDebugMixin(pool) (Method(Pool, pool, debugMixin)(pool)) + + +/* PoolNoDebugMixin -- debug mixin methods for pools with no mixin */ + +PoolDebugMixin PoolNoDebugMixin(Pool pool) +{ + AVERT(Pool, pool); + return NULL; +} + + +/* PoolDebugOptionsCheck -- check a PoolDebugOptions */ + +Bool PoolDebugOptionsCheck(PoolDebugOptions opt) +{ + CHECKL(opt != NULL); + if (opt->fenceSize != 0) { + CHECKL(opt->fenceTemplate != NULL); + /* Nothing to check about fenceSize */ + } + if (opt->freeSize != 0) { + CHECKL(opt->freeTemplate != NULL); + /* Nothing to check about freeSize */ + } + return TRUE; +} + + +/* DebugPoolInit -- init method for a debug pool + * + * Someday, this could be split into fence and tag init methods. + */ + +ARG_DEFINE_KEY(POOL_DEBUG_OPTIONS, PoolDebugOptions); + +static PoolDebugOptionsStruct debugPoolOptionsDefault = { + "POST", 4, "DEAD", 4, +}; + +static Res DebugPoolInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Res res; + PoolDebugOptions options = &debugPoolOptionsDefault; + PoolDebugMixin debug; + TagInitFunction tagInit; + Size tagSize; + ArgStruct arg; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(PoolClass, klass); + AVERT(ArgList, args); + + if (ArgPick(&arg, args, MPS_KEY_POOL_DEBUG_OPTIONS)) + options = (PoolDebugOptions)arg.val.pool_debug_options; + + AVERT(PoolDebugOptions, options); + + /* @@@@ Tag parameters should be taken from options, but tags have */ + /* not been published yet. */ + tagInit = NULL; tagSize = 0; + + res = SuperclassPoly(Pool, klass)->init(pool, arena, klass, args); + if (res != ResOK) + return res; + + SetClassOfPoly(pool, klass); + debug = DebugPoolDebugMixin(pool); + AVER(debug != NULL); + + /* fencepost init */ + /* @@@@ This parses a user argument, options, so it should really */ + /* go through the MPS interface. The template needs to be copied */ + /* into Addr memory, to avoid breaking . */ + debug->fenceSize = options->fenceSize; + if (debug->fenceSize != 0) { + /* Fenceposting turns on tagging */ + if (tagInit == NULL) { + tagSize = 0; + tagInit = TagTrivInit; + } + debug->fenceTemplate = options->fenceTemplate; + } + + /* free-checking init */ + /* @@@@ This parses a user argument, options, so it should really */ + /* go through the MPS interface. The template needs to be copied */ + /* into Addr memory, to avoid breaking . */ + debug->freeSize = options->freeSize; + if (debug->freeSize != 0) { + debug->freeTemplate = options->freeTemplate; + } + + /* tag init */ + debug->tagInit = tagInit; + if (debug->tagInit != NULL) { + debug->tagSize = tagSize + sizeof(tagStruct) - 1; + /* This pool has to be like the arena control pool: the blocks */ + /* allocated must be accessible using void*. */ + MPS_ARGS_BEGIN(pcArgs) { + /* By setting EXTEND_BY to debug->tagSize we get the smallest + possible extensions compatible with the tags, and so the + least amount of wasted space. */ + MPS_ARGS_ADD(pcArgs, MPS_KEY_EXTEND_BY, debug->tagSize); + MPS_ARGS_ADD(pcArgs, MPS_KEY_MFS_UNIT_SIZE, debug->tagSize); + res = PoolCreate(&debug->tagPool, PoolArena(pool), PoolClassMFS(), pcArgs); + } MPS_ARGS_END(pcArgs); + if (res != ResOK) + goto tagFail; + debug->missingTags = 0; + SplayTreeInit(&debug->index, TagCompare, TagKey, SplayTrivUpdate); + } + + debug->sig = PoolDebugMixinSig; + AVERT(PoolDebugMixin, debug); + return ResOK; + +tagFail: + SuperclassPoly(Inst, klass)->finish(MustBeA(Inst, pool)); + AVER(res != ResOK); + return res; +} + + +/* DebugPoolFinish -- finish method for a debug pool */ + +static void DebugPoolFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + PoolDebugMixin debug; + PoolClass klass; + + AVERT(Pool, pool); + + debug = DebugPoolDebugMixin(pool); + AVER(debug != NULL); + AVERT(PoolDebugMixin, debug); + if (debug->tagInit != NULL) { + SplayTreeFinish(&debug->index); + PoolDestroy(debug->tagPool); + } + klass = ClassOfPoly(Pool, pool); + SuperclassPoly(Inst, klass)->finish(inst); +} + + +/* patternIterate -- call visitor for occurrences of pattern between + * base and limit + * + * pattern is an arbitrary pattern that's size bytes long. + * + * Imagine that the entirety of memory were covered by contiguous + * copies of pattern starting at address 0. Then call visitor for each + * copy (or part) of pattern that lies between base and limit. In each + * call, target is the address of the copy or part (where base <= + * target < limit); source is the corresponding byte of the pattern + * (where pattern <= source < pattern + size); and size is the length + * of the copy or part. + */ + +typedef Bool (*patternVisitor)(Addr target, ReadonlyAddr source, Size size); + +static Bool patternIterate(ReadonlyAddr pattern, Size size, + Addr base, Addr limit, patternVisitor visitor) +{ + Addr p; + + AVER(pattern != NULL); + AVER(0 < size); + AVER(base != NULL); + AVER(base <= limit); + + p = base; + while (p < limit) { + Addr end = AddrAdd(p, size); + Addr rounded = AddrRoundUp(p, size); + Size offset = (Word)p % size; + if (end < p || rounded < p) { + /* Address range overflow */ + break; + } else if (p == rounded && end <= limit) { + /* Room for a whole copy */ + if (!(*visitor)(p, pattern, size)) + return FALSE; + p = end; + } else if (p < rounded && rounded <= end && rounded <= limit) { + /* Copy up to rounded */ + if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset), + AddrOffset(p, rounded))) + return FALSE; + p = rounded; + } else { + /* Copy up to limit */ + AVER(limit <= end); + AVER(p == rounded || limit <= rounded); + if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset), + AddrOffset(p, limit))) + return FALSE; + p = limit; + } + } + + return TRUE; +} + + +/* patternCopy -- copy pattern to fill a range + * + * Fill the range of addresses from base (inclusive) to limit + * (exclusive) with copies of pattern (which is size bytes long). + */ + +static Bool patternCopyVisitor(Addr target, ReadonlyAddr source, Size size) +{ + (void)AddrCopy(target, source, size); + return TRUE; +} + +static void patternCopy(ReadonlyAddr pattern, Size size, Addr base, Addr limit) +{ + (void)patternIterate(pattern, size, base, limit, patternCopyVisitor); +} + + +/* patternCheck -- check pattern against a range + * + * Compare the range of addresses from base (inclusive) to limit + * (exclusive) with copies of pattern (which is size bytes long). The + * copies of pattern must be arranged so that fresh copies start at + * aligned addresses wherever possible. + */ + +static Bool patternCheckVisitor(Addr target, ReadonlyAddr source, Size size) +{ + return AddrComp(target, source, size) == 0; +} + +static Bool patternCheck(ReadonlyAddr pattern, Size size, Addr base, Addr limit) +{ + return patternIterate(pattern, size, base, limit, patternCheckVisitor); +} + + +/* debugPoolSegIterate -- iterate over a range of segments in an arena + * + * Expects to be called on a range corresponding to objects within a + * single pool. + * + * NOTE: This relies on pools consistently using segments + * contiguously. + */ + +static void debugPoolSegIterate(Arena arena, Addr base, Addr limit, + void (*visitor)(Arena, Seg)) +{ + Seg seg; + + if (SegOfAddr(&seg, arena, base)) { + do { + base = SegLimit(seg); + (*visitor)(arena, seg); + } while (base < limit && SegOfAddr(&seg, arena, base)); + AVER(base >= limit); /* shouldn't run out of segments */ + } +} + +static void debugPoolShieldExpose(Arena arena, Seg seg) +{ + ShieldExpose(arena, seg); +} + +static void debugPoolShieldCover(Arena arena, Seg seg) +{ + ShieldCover(arena, seg); +} + + +/* freeSplat -- splat free block with splat pattern */ + +static void freeSplat(PoolDebugMixin debug, Pool pool, Addr base, Addr limit) +{ + Arena arena; + + AVER(base < limit); + + /* If the block is in one or more segments, make sure the segments + are exposed so that we can overwrite the block with the pattern. */ + arena = PoolArena(pool); + debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose); + patternCopy(debug->freeTemplate, debug->freeSize, base, limit); + debugPoolSegIterate(arena, base, limit, debugPoolShieldCover); +} + + +/* freeCheck -- check free block for splat pattern */ + +static Bool freeCheck(PoolDebugMixin debug, Pool pool, Addr base, Addr limit) +{ + Bool res; + Arena arena; + + AVER(base < limit); + + /* If the block is in one or more segments, make sure the segments + are exposed so we can read the pattern. */ + arena = PoolArena(pool); + debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose); + res = patternCheck(debug->freeTemplate, debug->freeSize, base, limit); + debugPoolSegIterate(arena, base, limit, debugPoolShieldCover); + return res; +} + + +/* freeCheckAlloc -- allocation wrapper for free-checking */ + +static Res freeCheckAlloc(Addr *aReturn, PoolDebugMixin debug, Pool pool, + Size size) +{ + Res res; + Addr new; + PoolClass klass; + + AVER(aReturn != NULL); + + klass = ClassOfPoly(Pool, pool); + res = SuperclassPoly(Pool, klass)->alloc(&new, pool, size); + if (res != ResOK) + return res; + if (debug->freeSize != 0) + ASSERT(freeCheck(debug, pool, new, AddrAdd(new, size)), + "free space corrupted on alloc"); + + *aReturn = new; + return res; +} + + +/* freeCheckFree -- freeing wrapper for free-checking */ + +static void freeCheckFree(PoolDebugMixin debug, + Pool pool, Addr old, Size size) +{ + PoolClass klass; + if (debug->freeSize != 0) + freeSplat(debug, pool, old, AddrAdd(old, size)); + klass = ClassOfPoly(Pool, pool); + SuperclassPoly(Pool, klass)->free(pool, old, size); +} + + +/* fenceAlloc -- allocation wrapper for fenceposts + * + * Allocates an object, adding fenceposts on both sides. Layout: + * + * |----------|-------------------------------------|------|----------| + * start fp client object slop end fp + * + * slop is the extra allocation from rounding up the client request to + * the pool's alignment. The fenceposting code adds this slop so that + * there's a better chance of the end fencepost being flush with the + * next object (though it can't be guaranteed, since the underlying + * pool could have allocated an even larger block). The alignment slop + * is filled from the fencepost template as well. + * + * Keep in sync with fenceCheck. + */ + +static Res fenceAlloc(Addr *aReturn, PoolDebugMixin debug, Pool pool, + Size size) +{ + Res res; + Addr obj, startFence, clientNew, clientLimit, limit; + Size alignedFenceSize, alignedSize; + + AVER(aReturn != NULL); + AVERT(PoolDebugMixin, debug); + AVERT(Pool, pool); + + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); + alignedSize = SizeAlignUp(size, PoolAlignment(pool)); + res = freeCheckAlloc(&obj, debug, pool, + alignedSize + 2 * alignedFenceSize); + if (res != ResOK) + return res; + + startFence = obj; + clientNew = AddrAdd(startFence, alignedFenceSize); + clientLimit = AddrAdd(clientNew, size); + limit = AddrAdd(clientNew, alignedSize + alignedFenceSize); + + /* @@@@ shields? */ + patternCopy(debug->fenceTemplate, debug->fenceSize, startFence, clientNew); + patternCopy(debug->fenceTemplate, debug->fenceSize, clientLimit, limit); + + *aReturn = clientNew; + return ResOK; +} + + +/* fenceCheck -- check fences of an object + * + * Keep in sync with fenceAlloc. + */ + +static Bool fenceCheck(PoolDebugMixin debug, Pool pool, Addr obj, Size size) +{ + Addr startFence, clientNew, clientLimit, limit; + Size alignedFenceSize, alignedSize; + + AVERT_CRITICAL(PoolDebugMixin, debug); + AVERT_CRITICAL(Pool, pool); + /* Can't check obj */ + + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); + alignedSize = SizeAlignUp(size, PoolAlignment(pool)); + + startFence = AddrSub(obj, alignedFenceSize); + clientNew = obj; + clientLimit = AddrAdd(clientNew, size); + limit = AddrAdd(clientNew, alignedSize + alignedFenceSize); + + /* @@@@ shields? */ + return patternCheck(debug->fenceTemplate, debug->fenceSize, + startFence, clientNew) + && patternCheck(debug->fenceTemplate, debug->fenceSize, + clientLimit, limit); +} + + +/* fenceFree -- freeing wrapper for fenceposts */ + +static void fenceFree(PoolDebugMixin debug, + Pool pool, Addr old, Size size) +{ + Size alignedFenceSize, alignedSize; + + ASSERT(fenceCheck(debug, pool, old, size), "fencepost check on free"); /* */ + + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); + alignedSize = SizeAlignUp(size, PoolAlignment(pool)); + freeCheckFree(debug, pool, AddrSub(old, alignedFenceSize), + alignedSize + 2 * alignedFenceSize); +} + + +/* tagAlloc -- allocation wrapper for tagged pools */ + +static Res tagAlloc(PoolDebugMixin debug, + Pool pool, Addr new, Size size) +{ + Tag tag; + Res res; + Bool b; + Addr addr; + + UNUSED(pool); + res = PoolAlloc(&addr, debug->tagPool, debug->tagSize); + if (res != ResOK) + return res; + tag = (Tag)addr; + tag->addr = new; tag->size = size; + TreeInit(TagTree(tag)); + /* In the future, we might call debug->tagInit here. */ + b = SplayTreeInsert(&debug->index, TagTree(tag)); + AVER(b); + return ResOK; +} + + +/* tagFree -- deallocation wrapper for tagged pools */ + +static void tagFree(PoolDebugMixin debug, Pool pool, Addr old, Size size) +{ + Tree node; + Tag tag; + Bool b; + + AVERT(PoolDebugMixin, debug); + AVERT(Pool, pool); + AVER(size > 0); + + if (!SplayTreeFind(&node, &debug->index, &old)) { + AVER(debug->missingTags > 0); + debug->missingTags--; + return; + } + tag = TagOfTree(node); + AVER(tag->size == size); + AVER(tag->addr == old); + b = SplayTreeDelete(&debug->index, node); + AVER(b); /* expect tag to be in the tree */ + TreeFinish(node); + PoolFree(debug->tagPool, (Addr)tag, debug->tagSize); +} + + +/* DebugPoolAlloc -- alloc method for a debug pool + * + * Eventually, tag init args will need to be handled somewhere here. + */ + +static Res DebugPoolAlloc(Addr *aReturn, Pool pool, Size size) +{ + Res res; + Addr new = NULL; /* suppress "may be used uninitialized" warning */ + PoolDebugMixin debug; + + AVER(aReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + + debug = DebugPoolDebugMixin(pool); + AVER(debug != NULL); + AVERT(PoolDebugMixin, debug); + if (debug->fenceSize != 0) + res = fenceAlloc(&new, debug, pool, size); + else + res = freeCheckAlloc(&new, debug, pool, size); + if (res != ResOK) + return res; + /* Allocate object first, so it fits even when the tag doesn't. */ + if (debug->tagInit != NULL) { + res = tagAlloc(debug, pool, new, size); + if (res != ResOK) + goto tagFail; + } + + *aReturn = new; + return res; + +tagFail: + fenceFree(debug, pool, new, size); + return res; +} + + +/* DebugPoolFree -- free method for a debug pool */ + +static void DebugPoolFree(Pool pool, Addr old, Size size) +{ + PoolDebugMixin debug; + + AVERT(Pool, pool); + /* Can't check old */ + AVER(size > 0); + + debug = DebugPoolDebugMixin(pool); + AVER(debug != NULL); + AVERT(PoolDebugMixin, debug); + + if (debug->fenceSize != 0) + fenceFree(debug, pool, old, size); + else + freeCheckFree(debug, pool, old, size); + /* Free the object first, to get fences checked before tag. */ + if (debug->tagInit != NULL) + tagFree(debug, pool, old, size); +} + + +/* TagWalk -- walk all objects in the pool using tags */ + +typedef void (*ObjectsVisitor)(Addr addr, Size size, Format fmt, + Pool pool, void *tagData, void *p); + +static void TagWalk(Pool pool, ObjectsVisitor visitor, void *p) +{ + Tree node; + PoolDebugMixin debug; + + AVERT(Pool, pool); + AVER(FUNCHECK(visitor)); + /* Can't check p */ + + debug = DebugPoolDebugMixin(pool); + AVER(debug != NULL); + AVERT(PoolDebugMixin, debug); + + node = SplayTreeFirst(&debug->index); + while (node != TreeEMPTY) { + Tag tag = TagOfTree(node); + + (*visitor)(tag->addr, tag->size, NULL, pool, &tag->userdata, p); + node = SplayTreeNext(&debug->index, &tag->addr); + } +} + + +/* fenceCheckingStep -- step function for DebugPoolCheckFences */ + +static void fenceCheckingStep(Addr addr, Size size, Format fmt, + Pool pool, void *tagData, void *p) +{ + /* no need to check arguments checked in the caller */ + UNUSED(fmt); UNUSED(tagData); + ASSERT(fenceCheck((PoolDebugMixin)p, pool, addr, size), + "fencepost check requested by client"); +} + + +/* DebugPoolCheckFences -- check all the fenceposts in the pool */ + +void DebugPoolCheckFences(Pool pool) +{ + PoolDebugMixin debug; + + AVERT(Pool, pool); + debug = DebugPoolDebugMixin(pool); + if (debug == NULL) + return; + AVERT(PoolDebugMixin, debug); + + if (debug->fenceSize != 0) + TagWalk(pool, fenceCheckingStep, (void *)debug); +} + + +/* DebugPoolFreeSplat -- if in a free-checking debug pool, splat free block */ + +void DebugPoolFreeSplat(Pool pool, Addr base, Addr limit) +{ + PoolDebugMixin debug; + + AVERT(Pool, pool); + AVER(PoolHasAddr(pool, base)); + AVER(PoolHasAddr(pool, AddrSub(limit, 1))); + + debug = DebugPoolDebugMixin(pool); + if (debug != NULL) { + AVERT(PoolDebugMixin, debug); + if (debug->freeSize != 0) + freeSplat(debug, pool, base, limit); + } +} + + +/* DebugPoolFreeCheck -- if in a free-checking debug pool, check free block */ + +void DebugPoolFreeCheck(Pool pool, Addr base, Addr limit) +{ + PoolDebugMixin debug; + + AVERT(Pool, pool); + AVER(PoolHasAddr(pool, base)); + AVER(PoolHasAddr(pool, AddrSub(limit, 1))); + + debug = DebugPoolDebugMixin(pool); + if (debug != NULL) { + AVERT(PoolDebugMixin, debug); + if (debug->freeSize != 0) + ASSERT(freeCheck(debug, pool, base, limit), + "free space corrupted on release"); /* */ + } +} + + +/* freeCheckingStep -- step function for DebugPoolCheckFreeSpace */ + +static void freeCheckingStep(Addr base, Addr limit, Pool pool, void *p) +{ + /* no need to check arguments checked in the caller */ + ASSERT(freeCheck((PoolDebugMixin)p, pool, base, limit), + "free space corrupted on client check"); +} + + +/* DebugPoolCheckFreeSpace -- check free space in the pool for overwrites */ + +void DebugPoolCheckFreeSpace(Pool pool) +{ + PoolDebugMixin debug; + + AVERT(Pool, pool); + debug = DebugPoolDebugMixin(pool); + if (debug == NULL) + return; + AVERT(PoolDebugMixin, debug); + + if (debug->freeSize != 0) + PoolFreeWalk(pool, freeCheckingStep, (void *)debug); +} + + +/* PoolClassMixInDebug -- mix in the debug support for class init */ + +void PoolClassMixInDebug(PoolClass klass) +{ + /* Can't check klass because it's not initialized yet */ + klass->instClassStruct.finish = DebugPoolFinish; + klass->init = DebugPoolInit; + klass->alloc = DebugPoolAlloc; + klass->free = DebugPoolFree; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/dbgpool.h b/mps/code/dbgpool.h new file mode 100644 index 00000000000..59a901d4932 --- /dev/null +++ b/mps/code/dbgpool.h @@ -0,0 +1,101 @@ +/* dbgpool.h: POOL DEBUG MIXIN + * + * . + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#ifndef dbgpool_h +#define dbgpool_h + +#include "splay.h" +#include "mpmtypes.h" +#include + + +/* tag init function: copies the user-supplied data into the tag */ + +typedef void (*TagInitFunction)(void *tag, va_list args); + + +/* PoolDebugOptions -- option structure for debug pool init + * + * This must be kept in sync with . + */ + +typedef struct PoolDebugOptionsStruct { + const void *fenceTemplate; + Size fenceSize; + const void *freeTemplate; + Size freeSize; + /* TagInitFunction tagInit; */ + /* Size tagSize; */ +} PoolDebugOptionsStruct; + +typedef PoolDebugOptionsStruct *PoolDebugOptions; + + +/* PoolDebugMixinStruct -- internal structure for debug mixins */ + +#define PoolDebugMixinSig ((Sig)0x519B0DB9) /* SIGnature POol DeBuG */ + +typedef struct PoolDebugMixinStruct { + Sig sig; /* design.mps.sig.field */ + const struct AddrStruct *fenceTemplate; + Size fenceSize; + const struct AddrStruct *freeTemplate; + Size freeSize; + TagInitFunction tagInit; + Size tagSize; + Pool tagPool; + Count missingTags; + SplayTreeStruct index; +} PoolDebugMixinStruct; + + +extern Bool PoolDebugOptionsCheck(PoolDebugOptions opt); + +extern Bool PoolDebugMixinCheck(PoolDebugMixin dbg); + +extern void PoolClassMixInDebug(PoolClass klass); + +extern void DebugPoolCheckFences(Pool pool); +extern void DebugPoolCheckFreeSpace(Pool pool); + +extern void DebugPoolFreeSplat(Pool pool, Addr base, Addr limit); +extern void DebugPoolFreeCheck(Pool pool, Addr base, Addr limit); + + +#endif /* dbgpool_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/dbgpooli.c b/mps/code/dbgpooli.c new file mode 100644 index 00000000000..fa8d4ea0ffc --- /dev/null +++ b/mps/code/dbgpooli.c @@ -0,0 +1,84 @@ +/* dbgpooli.c: POOL DEBUG MIXIN C INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .source: + */ + +#include "dbgpool.h" +#include "mps.h" +#include "mpm.h" + +SRCID(dbgpooli, "$Id$"); + + +/* mps_pool_check_fenceposts -- check all the fenceposts in the pool */ + +void mps_pool_check_fenceposts(mps_pool_t mps_pool) +{ + Pool pool = (Pool)mps_pool; + Arena arena; + + /* TESTT not AVERT, see . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/djbench.c b/mps/code/djbench.c new file mode 100644 index 00000000000..778dd466cf9 --- /dev/null +++ b/mps/code/djbench.c @@ -0,0 +1,423 @@ +/* djbench.c -- "DJ" Benchmark on ANSI C library + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * This is an allocation stress benchmark test for manual variable pools + * and also for stdlib malloc/free (for comparison). + * + * It repeatedly runs over an array of blocks and allocates or frees them + * with some probability, then frees all the remaining blocks at the end. + * This test can be iterated. + */ + +#include "mps.c" + +#include "testlib.h" +#include "testthr.h" + +#ifdef MPS_OS_W3 +#include "getopt.h" +#else +#include +#endif + +#include /* fprintf, stderr */ +#include /* alloca, exit, EXIT_SUCCESS, EXIT_FAILURE */ +#include /* CLOCKS_PER_SEC, clock */ + +#define DJMUST(expr) \ + do { \ + mps_res_t res = (expr); \ + if (res != MPS_RES_OK) { \ + fprintf(stderr, #expr " returned %d\n", res); \ + exit(EXIT_FAILURE); \ + } \ + } while(0) + +static mps_arena_t arena; +static mps_pool_t pool; + + +/* The benchmark behaviour is defined as a macro in order to give realistic + opportunities for compiler optimisation and the intended inlining of the + MPS functions. */ + +static rnd_state_t seed = 0; /* random number seed */ +static unsigned nthreads = 1; /* threads */ +static unsigned niter = 50; /* iterations */ +static unsigned npass = 100; /* passes over blocks */ +static unsigned nblocks = 64; /* number of blocks */ +static unsigned sshift = 18; /* log2 max block size in words */ +static double pact = 0.2; /* probability per pass of acting */ +static unsigned rinter = 75; /* pass interval for recursion */ +static unsigned rmax = 10; /* maximum recursion depth */ +static mps_bool_t zoned = TRUE; /* arena allocates using zones */ +static size_t arena_size = 256ul * 1024 * 1024; /* arena size */ +static size_t arena_grain_size = 1; /* arena grain size */ +static double spare = ARENA_SPARE_DEFAULT; /* spare commit fraction */ + +#define DJRUN(fname, alloc, free) \ + static unsigned fname##_inner(mps_ap_t ap, unsigned depth, unsigned r) { \ + struct {void *p; size_t s;} *blocks = alloca(sizeof(blocks[0]) * nblocks); \ + unsigned j, k; \ + \ + for (k = 0; k < nblocks; ++k) { \ + blocks[k].p = NULL; \ + blocks[k].s = 0; \ + } \ + \ + for (j = 0; j < npass; ++j) { \ + for (k = 0; k < nblocks; ++k) { \ + if (rnd() % 16384 < pact * 16384) { \ + if (blocks[k].p == NULL) { \ + size_t s = rnd() % ((sizeof(void *) << (rnd() % sshift)) - 1); \ + void *p = NULL; \ + if (s > 0) \ + alloc(p, s); \ + blocks[k].p = p; \ + blocks[k].s = s; \ + } else { \ + free(blocks[k].p, blocks[k].s); \ + blocks[k].p = NULL; \ + } \ + } \ + } \ + if (rinter > 0 && depth > 0 && ++r % rinter == 0) { \ + /* putchar('>'); fflush(stdout); */ \ + r = fname##_inner(ap, depth - 1, r); \ + /* putchar('<'); fflush(stdout); */ \ + } \ + } \ + \ + for (k = 0; k < nblocks; ++k) { \ + if (blocks[k].p) { \ + free(blocks[k].p, blocks[k].s); \ + blocks[k].p = NULL; \ + } \ + } \ + return r; \ + } \ + \ + static void *fname(void *p) { \ + unsigned i; \ + mps_ap_t ap = NULL; \ + if (pool != NULL) \ + DJMUST(mps_ap_create_k(&ap, pool, mps_args_none)); \ + for (i = 0; i < niter; ++i) \ + (void)fname##_inner(ap, rmax, 0); \ + if (ap != NULL) \ + mps_ap_destroy(ap); \ + return p; \ + } + + +/* malloc/free benchmark */ + +#define MALLOC_ALLOC(p, s) do { p = malloc(s); } while(0) +#define MALLOC_FREE(p, s) do { free(p); } while(0) + +DJRUN(dj_malloc, MALLOC_ALLOC, MALLOC_FREE) + + +/* mps_alloc/mps_free benchmark */ + +#define MPS_ALLOC(p, s) do { mps_alloc(&p, pool, s); } while(0) +#define MPS_FREE(p, s) do { mps_free(pool, p, s); } while(0) + +DJRUN(dj_alloc, MPS_ALLOC, MPS_FREE) + + +/* reserve/free benchmark */ + +#define ALIGN_UP(s, a) (((s) + ((a) - 1)) & ~((a) - 1)) +#define RESERVE_ALLOC(p, s) \ + do { \ + size_t _s = ALIGN_UP(s, (size_t)MPS_PF_ALIGN); \ + (void)mps_reserve(&p, ap, _s); \ + (void)mps_commit(ap, p, _s); \ + } while(0) +#define RESERVE_FREE(p, s) do { mps_free(pool, p, s); } while(0) + +DJRUN(dj_reserve, RESERVE_ALLOC, RESERVE_FREE) + +typedef void *(*dj_t)(void *); + +static void weave(dj_t dj) +{ + testthr_t *threads = alloca(sizeof(threads[0]) * nthreads); + unsigned t; + + for (t = 0; t < nthreads; ++t) + testthr_create(&threads[t], dj, NULL); + + for (t = 0; t < nthreads; ++t) + testthr_join(&threads[t], NULL); +} + + +static void watch(dj_t dj, const char *name) +{ + clock_t start, finish; + + start = clock(); + if (nthreads == 1) + dj(NULL); + else + weave(dj); + finish = clock(); + + printf("%s: %g\n", name, (double)(finish - start) / CLOCKS_PER_SEC); +} + + +/* Wrap a call to dj benchmark that doesn't require MPS setup */ + +static void wrap(dj_t dj, mps_pool_class_t dummy, const char *name) +{ + (void)dummy; + pool = NULL; + watch(dj, name); +} + + +/* Wrap a call to a dj benchmark that requires MPS setup */ + +static void arena_wrap(dj_t dj, mps_pool_class_t pool_class, const char *name) +{ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arena_size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, zoned); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, spare); + DJMUST(mps_arena_create_k(&arena, mps_arena_class_vm(), args)); + } MPS_ARGS_END(args); + DJMUST(mps_pool_create_k(&pool, arena, pool_class, mps_args_none)); + watch(dj, name); + mps_pool_destroy(pool); + mps_arena_destroy(arena); +} + + +/* Command-line options definitions. See getopt_long(3). */ + +static struct option longopts[] = { + {"help", no_argument, NULL, 'h'}, + {"nthreads", required_argument, NULL, 't'}, + {"niter", required_argument, NULL, 'i'}, + {"npass", required_argument, NULL, 'p'}, + {"nblocks", required_argument, NULL, 'b'}, + {"sshift", required_argument, NULL, 's'}, + {"pact", required_argument, NULL, 'c'}, + {"rinter", required_argument, NULL, 'r'}, + {"rmax", required_argument, NULL, 'd'}, + {"seed", required_argument, NULL, 'x'}, + {"arena-size", required_argument, NULL, 'm'}, + {"arena-grain-size", required_argument, NULL, 'a'}, + {"arena-unzoned", no_argument, NULL, 'z'}, + {"spare", required_argument, NULL, 'S'}, + {NULL, 0, NULL, 0 } +}; + + +/* Test definitions. */ + +static mps_pool_class_t dummy_class(void) +{ + return NULL; +} + +static struct { + const char *name; + void (*wrap)(dj_t, mps_pool_class_t, const char *name); + dj_t dj; + mps_pool_class_t (*pool_class)(void); +} pools[] = { + {"mvt", arena_wrap, dj_reserve, mps_class_mvt}, + {"mvff", arena_wrap, dj_reserve, mps_class_mvff}, + {"mvffa", arena_wrap, dj_alloc, mps_class_mvff}, /* mvff with alloc */ + {"an", wrap, dj_malloc, dummy_class}, +}; + + +/* Command-line driver */ + +int main(int argc, char *argv[]) +{ + int ch; + unsigned i; + mps_bool_t seed_specified = FALSE; + + seed = rnd_seed(); + + while ((ch = getopt_long(argc, argv, "ht:i:p:b:s:c:r:d:m:a:x:zS:", + longopts, NULL)) != -1) + switch (ch) { + case 't': + nthreads = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'i': + niter = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'p': + npass = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'b': + nblocks = (unsigned)strtoul(optarg, NULL, 10); + break; + case 's': + sshift = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'c': + pact = strtod(optarg, NULL); + break; + case 'r': + rinter = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'd': + rmax = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'x': + seed = strtoul(optarg, NULL, 10); + seed_specified = TRUE; + break; + case 'z': + zoned = FALSE; + break; + case 'm': { + char *p; + arena_size = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arena_size <<= 30; break; + case 'M': arena_size <<= 20; break; + case 'K': arena_size <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + case 'a': { + char *p; + arena_grain_size = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arena_grain_size <<= 30; break; + case 'M': arena_grain_size <<= 20; break; + case 'K': arena_grain_size <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena grain size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + case 'S': + spare = strtod(optarg, NULL); + break; + default: + /* This is printed in parts to keep within the 509 character + limit for string literals in portable standard C. */ + fprintf(stderr, + "Usage: %s [option...] [test...]\n" + "Options:\n" + " -m n, --arena-size=n[KMG]?\n" + " Initial size of arena (default %lu)\n" + " -g n, --arena-grain-size=n[KMG]?\n" + " Arena grain size (default %lu)\n" + " -t n, --nthreads=n\n" + " Launch n threads each running the test\n" + " -i n, --niter=n\n" + " Iterate each test n times (default %u)\n" + " -p n, --npass=n\n" + " Pass over the block array n times (default %u)\n" + " -b n, --nblocks=n\n" + " Length of the block array (default %u)\n" + " -s n, --sshift=n\n" + " Log2 max block size in words (default %u)\n", + argv[0], + (unsigned long)arena_size, + (unsigned long)arena_grain_size, + niter, + npass, + nblocks, + sshift); + fprintf(stderr, + " -c p, --pact=p\n" + " Probability of acting on a block (default %g)\n" + " -r n, --rinter=n\n" + " Recurse every n passes if n > 0 (default %u)\n" + " -d n, --rmax=n\n" + " Maximum recursion depth (default %u)\n" + " -x n, --seed=n\n" + " Random number seed (default from entropy)\n" + " -z, --arena-unzoned\n" + " Disabled zoned allocation in the arena\n" + " -S f, --spare\n" + " Maximum spare committed fraction (default %f)\n", + pact, + rinter, + rmax, + spare); + fprintf(stderr, + "Tests:\n" + " mvt pool class MVT\n" + " mvff pool class MVFF (buffer interface)\n" + " mvffa pool class MVFF (alloc interface)\n" + " an malloc\n"); + return EXIT_FAILURE; + } + argc -= optind; + argv += optind; + + if (!seed_specified) { + printf("seed: %lu\n", seed); + (void)fflush(stdout); + } + + while (argc > 0) { + for (i = 0; i < NELEMS(pools); ++i) + if (strcmp(argv[0], pools[i].name) == 0) + goto found; + fprintf(stderr, "unknown pool test \"%s\"\n", argv[0]); + return EXIT_FAILURE; + found: + (void)mps_lib_assert_fail_install(assert_die); + rnd_state_set(seed); + pools[i].wrap(pools[i].dj, pools[i].pool_class(), pools[i].name); + --argc; + ++argv; + } + + return EXIT_SUCCESS; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/event.c b/mps/code/event.c new file mode 100644 index 00000000000..3827b70ebd2 --- /dev/null +++ b/mps/code/event.c @@ -0,0 +1,568 @@ +/* event.c: EVENT LOGGING + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .sources: mps.design.event + * + * TRANSGRESSIONS (rule.impl.trans) + * + * .trans.ifdef: This file should logically be split into two, event.c + * (which contains NOOP definitions, for general use) and eventdl.c, which + * is specific to the logging variety and actually does logging (maybe). + * Unfortunately, the build system doesn't really cope, and so this file + * consists of two versions which are conditional on the EVENT symbol. + */ + +#include "mpm.h" +#include "event.h" +#include "mpsio.h" +#include "lock.h" + +SRCID(event, "$Id$"); + + +#ifdef EVENT /* .trans.ifdef */ + + +static Bool eventInited = FALSE; +static Bool eventIOInited = FALSE; +static mps_io_t eventIO; +static Serial EventInternSerial; + +/* Buffers in which events are recorded, from the top down. */ +char EventBuffer[EventKindLIMIT][EventBufferSIZE]; + +/* Pointers to last event logged into each buffer. */ +char *EventLast[EventKindLIMIT]; + +/* Pointers to the last event written out of each buffer. */ +static char *EventWritten[EventKindLIMIT]; + +EventControlSet EventKindControl; /* Bit set used to control output. */ + + +/* A single event structure output once per buffer flush. */ +static EventEventClockSyncStruct eventClockSyncStruct; + + +/* eventClockSync -- Populate and write the clock sync event. */ + +static Res eventClockSync(void) +{ + Res res; + size_t size; + + size= size_tAlignUp(sizeof(eventClockSyncStruct), EVENT_ALIGN); + eventClockSyncStruct.code = EventEventClockSyncCode; + eventClockSyncStruct.size = (EventSize)size; + EVENT_CLOCK(eventClockSyncStruct.clock); + eventClockSyncStruct.f0 = (Word)mps_clock(); + res = (Res)mps_io_write(eventIO, (void *)&eventClockSyncStruct, size); + if (res != ResOK) + goto failWrite; + + res = ResOK; +failWrite: + return res; +} + + +/* EventFlush -- flush event buffer (perhaps to the event stream) */ + +void EventFlush(EventKind kind) +{ + AVER(eventInited); + AVER(NONNEGATIVE(kind)); + AVER(kind < EventKindLIMIT); + + AVER(EventBuffer[kind] <= EventLast[kind]); + AVER(EventLast[kind] <= EventWritten[kind]); + AVER(EventWritten[kind] <= EventBuffer[kind] + EventBufferSIZE); + + /* Send all pending events to the event stream. */ + EventSync(); + + /* Flush the in-memory buffer whether or not we send this buffer, so + that we can continue to record recent events. */ + EventLast[kind] = EventWritten[kind] = EventBuffer[kind] + EventBufferSIZE; +} + + +/* EventSync -- synchronize the event stream with the buffers */ + +void EventSync(void) +{ + EventKind kind; + Bool wrote = FALSE; + + for (kind = 0; kind < EventKindLIMIT; ++kind) { + + /* Is event logging enabled for this kind of event, or are or are we just + writing to the buffer for backtraces, cores, and other debugging? */ + if (BS_IS_MEMBER(EventKindControl, kind)) { + size_t size; + Res res; + + AVER(EventBuffer[kind] <= EventLast[kind]); + AVER(EventLast[kind] <= EventWritten[kind]); + AVER(EventWritten[kind] <= EventBuffer[kind] + EventBufferSIZE); + + size = (size_t)(EventWritten[kind] - EventLast[kind]); + if (size > 0) { + + /* Ensure the IO stream is open. We do this late so that no stream is + created if no events are enabled by telemetry control. */ + if (!eventIOInited) { + res = (Res)mps_io_create(&eventIO); + if(res != ResOK) { + /* TODO: Consider taking some other action if open fails. */ + return; + } + eventIOInited = TRUE; + } + + /* Writing might be faster if the size is aligned to a multiple of the + C library or kernel's buffer size. We could pad out the buffer with + a marker for this purpose. */ + + res = (Res)mps_io_write(eventIO, (void *)EventLast[kind], size); + if (res == ResOK) { + /* TODO: Consider taking some other action if a write fails. */ + EventWritten[kind] = EventLast[kind]; + wrote = TRUE; + } + } + } + } + + /* If we wrote out events, send an EventClockSync event and flush + the telemetry stream. */ + if (wrote) { + (void)eventClockSync(); + (void)mps_io_flush(eventIO); + } +} + + +/* EventInit -- start using the event system, initialize if necessary */ + +void EventInit(void) +{ + /* Make local enums for all event params in order to check that the indexes + in the parameter definition macros are in order, and that parameter + idents are unique. */ + +#define EVENT_CHECK_ENUM_PARAM(name, index, sort, ident, doc) \ + Event##name##Param##ident, + +#define EVENT_CHECK_ENUM(X, name, code, used, kind) \ + enum Event##name##ParamEnum { \ + EVENT_##name##_PARAMS(EVENT_CHECK_ENUM_PARAM, name) \ + Event##name##ParamLIMIT \ + }; + + EVENT_LIST(EVENT_CHECK_ENUM, X) + + /* Check consistency of the event definitions. These are all compile-time + checks and should get optimised away. */ + +#define EVENT_PARAM_CHECK_P(name, index) +#define EVENT_PARAM_CHECK_A(name, index) +#define EVENT_PARAM_CHECK_W(name, index) +#define EVENT_PARAM_CHECK_U(name, index) +#define EVENT_PARAM_CHECK_D(name, index) +#define EVENT_PARAM_CHECK_B(name, index) +#define EVENT_PARAM_CHECK_S(name, index) \ + AVER(index + 1 == Event##name##ParamLIMIT); /* strings must come last */ + +#define EVENT_PARAM_CHECK(name, index, sort, ident, doc) \ + AVER(index == Event##name##Param##ident); \ + AVER(sizeof(EventF##sort) >= 0); /* check existence of type */ \ + EVENT_PARAM_CHECK_##sort(name, index) + +#define EVENT_CHECK(X, name, code, used, kind) \ + AVER(size_tAlignUp(sizeof(Event##name##Struct), EVENT_ALIGN) \ + <= EventSizeMAX); \ + AVER(Event##name##Code == code); \ + AVER(0 <= code); \ + AVER(code <= EventCodeMAX); \ + AVER(sizeof(#name) - 1 <= EventNameMAX); \ + AVER((Bool)Event##name##Used == used); \ + AVERT(Bool, used); \ + AVER(0 <= Event##name##Kind); \ + AVER((EventKind)Event##name##Kind < EventKindLIMIT); \ + EVENT_##name##_PARAMS(EVENT_PARAM_CHECK, name) + + EVENT_LIST(EVENT_CHECK, X); + + /* Ensure that no event can be larger than the maximum event size. */ + AVER(EventBufferSIZE <= EventSizeMAX); + + /* Only if this is the first call. */ + if (!eventInited) { /* See .trans.log */ + LockClaimGlobalRecursive(); + if (!eventInited) { + EventKind kind; + for (kind = 0; kind < EventKindLIMIT; ++kind) { + AVER(EventLast[kind] == NULL); + AVER(EventWritten[kind] == NULL); + EventLast[kind] = EventWritten[kind] = EventBuffer[kind] + EventBufferSIZE; + } + eventInited = TRUE; + EventKindControl = (Word)mps_lib_telemetry_control(); + EventInternSerial = (Serial)1; /* 0 is reserved */ + (void)EventInternString(MPSVersion()); /* emit version */ + EVENT7(EventInit, EVENT_VERSION_MAJOR, EVENT_VERSION_MEDIAN, + EVENT_VERSION_MINOR, EventCodeMAX, EventNameMAX, MPS_WORD_WIDTH, + mps_clocks_per_sec()); + /* flush these initial events to get the first ClockSync out. */ + EventSync(); + } + LockReleaseGlobalRecursive(); + } +} + + +/* EventFinish -- stop using the event system */ + +void EventFinish(void) +{ + AVER(eventInited); + + EventSync(); +} + + +/* EventControl -- Change or read control word + * + * Resets the bits specified in resetMask, and flips those in + * flipMask. Returns old value. + * + * Operations can be implemented as follows: + * Set(M) EventControl(M,M) + * Reset(M) EventControl(M,0) + * Flip(M) EventControl(0,M) + * Read() EventControl(0,0) + * + * TODO: Candy-machine interface is a transgression. + */ + +EventControlSet EventControl(EventControlSet resetMask, + EventControlSet flipMask) +{ + EventControlSet oldValue = EventKindControl; + + /* EventKindControl = (EventKindControl & ~resetMask) ^ flipMask */ + EventKindControl = + BS_SYM_DIFF(BS_DIFF(EventKindControl, resetMask), flipMask); + + return oldValue; +} + + +/* EventInternString -- emit an Intern event on the (null-term) string given */ + +EventStringId EventInternString(const char *label) +{ + AVER(label != NULL); + return EventInternGenString(StringLength(label), label); +} + + +/* EventInternGenString -- emit an Intern event on the string given */ + +EventStringId EventInternGenString(size_t len, const char *label) +{ + EventStringId id; + + AVER(label != NULL); + + id = EventInternSerial; + ++EventInternSerial; + + EVENT2S(Intern, id, len, label); + + return id; +} + + +/* EventLabelAddr -- emit event to label address with the given id */ + +void EventLabelAddr(Addr addr, EventStringId id) +{ + AVER((Serial)id < EventInternSerial); + + EVENT2(Label, addr, id); +} + + +/* EventLabelPointer -- emit event to label pointer with the given id */ + +void EventLabelPointer(Pointer pointer, EventStringId id) +{ + AVER((Serial)id < EventInternSerial); + + EVENT2(LabelPointer, pointer, id); +} + + +/* Convert event parameter sort to WriteF arguments */ + +#define EVENT_WRITE_PARAM_MOST(name, index, sort) \ + " $"#sort, (WriteF##sort)event->name.f##index, +#define EVENT_WRITE_PARAM_A EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_P EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_U EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_W EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_D EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_S EVENT_WRITE_PARAM_MOST +#define EVENT_WRITE_PARAM_B(name, index, sort) \ + " $U", (WriteFU)event->name.f##index, + + +Res EventDescribe(Event event, mps_lib_FILE *stream, Count depth) +{ + Res res; + + /* TODO: Some sort of EventCheck would be good */ + if (event == NULL) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "Event $P {\n", (WriteFP)event, + " code $U\n", (WriteFU)event->any.code, + " clock ", NULL); + if (res != ResOK) + return res; + res = EVENT_CLOCK_WRITE(stream, depth, event->any.clock); + if (res != ResOK) + return res; + res = WriteF(stream, depth, "\n size $U\n", (WriteFU)event->any.size, NULL); + if (res != ResOK) + return res; + + switch (event->any.code) { + +#define EVENT_DESC_PARAM(name, index, sort, ident, doc) \ + "\n $S", (WriteFS)#ident, \ + EVENT_WRITE_PARAM_##sort(name, index, sort) + +#define EVENT_DESC(X, name, code, used, kind) \ + case code: \ + res = WriteF(stream, depth, \ + " event \"$S\"", (WriteFS)#name, \ + EVENT_##name##_PARAMS(EVENT_DESC_PARAM, name) \ + NULL); \ + if (res != ResOK) \ + return res; \ + break; + + EVENT_LIST(EVENT_DESC, X) + + default: + res = WriteF(stream, depth, " event type unknown", NULL); + if (res != ResOK) + return res; + /* TODO: Hexdump unknown event contents. */ + break; + } + + res = WriteF(stream, depth, + "\n} Event $P\n", (WriteFP)event, + NULL); + return res; +} + + +Res EventWrite(Event event, mps_lib_FILE *stream) +{ + Res res; + + if (event == NULL) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = EVENT_CLOCK_WRITE(stream, 0, event->any.clock); + if (res != ResOK) + return res; + + switch (event->any.code) { + +#define EVENT_WRITE_PARAM(name, index, sort, ident, doc) \ + EVENT_WRITE_PARAM_##sort(name, index, sort) + +#define EVENT_WRITE(X, name, code, used, kind) \ + case code: \ + res = WriteF(stream, 0, " $S", (WriteFS)#name, \ + EVENT_##name##_PARAMS(EVENT_WRITE_PARAM, name) \ + NULL); \ + if (res != ResOK) \ + return res; \ + break; + EVENT_LIST(EVENT_WRITE, X) + + default: + res = WriteF(stream, 0, " ", + (WriteFU)event->any.code, NULL); + if (res != ResOK) + return res; + /* TODO: Hexdump unknown event contents. */ + break; + } + + return ResOK; +} + + +void EventDump(mps_lib_FILE *stream) +{ + EventKind kind; + + AVER(stream != NULL); + + /* This can happen if there's a backtrace very early in the life of + the MPS, and will cause an access violation if we continue. */ + if (!eventInited) { + (void)WriteF(stream, 0, "No events\n", NULL); + return; + } + + for (kind = 0; kind < EventKindLIMIT; ++kind) { + char *cursor = EventLast[kind]; + const char *end = EventBuffer[kind] + EventBufferSIZE; + while (cursor < end) { + Event event = (void *)cursor; + /* Try to keep going even if there's an error, because this is + used for debugging and we'll take what we can get. */ + (void)EventWrite(event, stream); + (void)WriteF(stream, 0, "\n", NULL); + cursor += event->any.size; + } + } +} + + +#else /* EVENT, not */ + + +void EventSync(void) +{ + NOOP; +} + + +void EventInit(void) +{ + NOOP; +} + + +void EventFinish(void) +{ + NOOP; +} + + +EventControlSet EventControl(EventControlSet resetMask, + EventControlSet flipMask) +{ + UNUSED(resetMask); + UNUSED(flipMask); + return BS_EMPTY(EventControlSet); +} + + +EventStringId EventInternString(const char *label) +{ + UNUSED(label); + /* EventInternString is reached in varieties without events, but the result + is not used for anything. */ + return (EventStringId)0x4026EAC8; +} + + +Word EventInternGenString(size_t len, const char *label) +{ + UNUSED(len); UNUSED(label); + /* EventInternGenString is reached in varieties without events, but + the result is not used for anything. */ + return (EventStringId)0x4026EAC8; +} + + +void EventLabelAddr(Addr addr, Word id) +{ + UNUSED(addr); + UNUSED(id); + /* EventLabelAddr is reached in varieties without events, but doesn't have + to do anything. */ +} + + +void EventLabelPointer(Pointer pointer, Word id) +{ + UNUSED(pointer); + UNUSED(id); + /* EventLabelPointer is reached in varieties without events, but + doesn't have to do anything. */ +} + + +Res EventDescribe(Event event, mps_lib_FILE *stream, Count depth) +{ + UNUSED(event); + UNUSED(stream); + UNUSED(depth); + return ResUNIMPL; +} + + +Res EventWrite(Event event, mps_lib_FILE *stream) +{ + UNUSED(event); + UNUSED(stream); + return ResUNIMPL; +} + + +void EventDump(mps_lib_FILE *stream) +{ + UNUSED(stream); +} + + +#endif /* EVENT */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/event.h b/mps/code/event.h new file mode 100644 index 00000000000..954b315b776 --- /dev/null +++ b/mps/code/event.h @@ -0,0 +1,234 @@ +/* -- Event Logging Interface + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * $Id$ + * + * READERSHIP + * + * .readership: MPS developers. + * + * DESIGN + * + * .design: . + */ + +#ifndef event_h +#define event_h + +#include "eventcom.h" +#include "mpm.h" +#include "eventdef.h" +#include "mpslib.h" + + +typedef Word EventStringId; +typedef Word EventControlSet; + +extern void EventSync(void); +extern void EventInit(void); +extern void EventFinish(void); +extern EventControlSet EventControl(EventControlSet resetMask, + EventControlSet flipMask); +extern EventStringId EventInternString(const char *label); +extern EventStringId EventInternGenString(size_t, const char *label); +extern void EventLabelAddr(Addr addr, Word id); +extern void EventLabelPointer(Pointer pointer, Word id); +extern void EventFlush(EventKind kind); +extern Res EventDescribe(Event event, mps_lib_FILE *stream, Count depth); +extern Res EventWrite(Event event, mps_lib_FILE *stream); +extern void EventDump(mps_lib_FILE *stream); + + +#ifdef EVENT + +/* Event writing support */ + +extern char EventBuffer[EventKindLIMIT][EventBufferSIZE]; +extern char *EventLast[EventKindLIMIT]; +extern Word EventKindControl; + + +/* EVENT_BEGIN -- flush buffer if necessary and write event header */ + +#define EVENT_BEGIN(name, structSize) \ + BEGIN \ + Event##name##Struct *_event; \ + EventKind _kind = Event##name##Kind; \ + size_t _size = size_tAlignUp(structSize, EVENT_ALIGN); \ + AVER(Event##name##Used); \ + if (_size > (size_t)(EventLast[Event##name##Kind] \ + - EventBuffer[Event##name##Kind])) \ + EventFlush(Event##name##Kind); \ + AVER(_size <= (size_t)(EventLast[Event##name##Kind] \ + - EventBuffer[Event##name##Kind])); \ + _event = (void *)(EventLast[Event##name##Kind] - _size); \ + _event->code = Event##name##Code; \ + _event->size = (EventSize)_size; \ + EVENT_CLOCK(_event->clock); + +#define EVENT_END \ + EventLast[_kind] -= _size; \ + END + + +/* EVENTn -- event emitting macros + * + * The macros EVENT0, EVENT1, etc. are used throughout the MPS to emit an + * event with parameters. They work by appending the event parameters to + * an event buffer, which is flushed to the telemetry output stream when + * full. EVENT2S is a special case that takes a variable length string. + */ + +#define EVENT2S(name, p0, length, string) \ + BEGIN \ + size_t _string_len = (length); \ + size_t size; \ + AVER(_string_len <= EventStringLengthMAX); \ + size = offsetof(Event##name##Struct, f1) + _string_len + sizeof('\0'); \ + EVENT_BEGIN(name, size) \ + _event->f0 = (p0); \ + (void)mps_lib_memcpy(_event->f1, (string), _string_len); \ + _event->f1[_string_len] = '\0'; \ + EVENT_END; \ + END + +#define EVENT0 EVENT_RECORD0 +#define EVENT1 EVENT_RECORD1 +#define EVENT2 EVENT_RECORD2 +#define EVENT3 EVENT_RECORD3 +#define EVENT4 EVENT_RECORD4 +#define EVENT5 EVENT_RECORD5 +#define EVENT6 EVENT_RECORD6 +#define EVENT7 EVENT_RECORD7 +#define EVENT8 EVENT_RECORD8 +#define EVENT9 EVENT_RECORD9 +#define EVENT10 EVENT_RECORD10 +#define EVENT11 EVENT_RECORD11 +#define EVENT12 EVENT_RECORD12 +#define EVENT13 EVENT_RECORD13 +#define EVENT14 EVENT_RECORD14 + +#else /* !EVENT */ + +#define EVENT0 EVENT_IGNORE0 +#define EVENT1 EVENT_IGNORE1 +#define EVENT2 EVENT_IGNORE2 +#define EVENT3 EVENT_IGNORE3 +#define EVENT4 EVENT_IGNORE4 +#define EVENT5 EVENT_IGNORE5 +#define EVENT6 EVENT_IGNORE6 +#define EVENT7 EVENT_IGNORE7 +#define EVENT8 EVENT_IGNORE8 +#define EVENT9 EVENT_IGNORE9 +#define EVENT10 EVENT_IGNORE10 +#define EVENT11 EVENT_IGNORE11 +#define EVENT12 EVENT_IGNORE12 +#define EVENT13 EVENT_IGNORE13 +#define EVENT14 EVENT_IGNORE14 + +#endif /* !EVENT */ + +#if EVENT_ALL + +#define EVENT_CRITICAL0 EVENT0 +#define EVENT_CRITICAL1 EVENT1 +#define EVENT_CRITICAL2 EVENT2 +#define EVENT_CRITICAL3 EVENT3 +#define EVENT_CRITICAL4 EVENT4 +#define EVENT_CRITICAL5 EVENT5 +#define EVENT_CRITICAL6 EVENT6 +#define EVENT_CRITICAL7 EVENT7 +#define EVENT_CRITICAL8 EVENT8 +#define EVENT_CRITICAL9 EVENT9 +#define EVENT_CRITICAL10 EVENT10 +#define EVENT_CRITICAL11 EVENT11 +#define EVENT_CRITICAL12 EVENT12 +#define EVENT_CRITICAL13 EVENT13 +#define EVENT_CRITICAL14 EVENT14 + +#else /* !EVENT_ALL */ + +#define EVENT_CRITICAL0 EVENT_IGNORE0 +#define EVENT_CRITICAL1 EVENT_IGNORE1 +#define EVENT_CRITICAL2 EVENT_IGNORE2 +#define EVENT_CRITICAL3 EVENT_IGNORE3 +#define EVENT_CRITICAL4 EVENT_IGNORE4 +#define EVENT_CRITICAL5 EVENT_IGNORE5 +#define EVENT_CRITICAL6 EVENT_IGNORE6 +#define EVENT_CRITICAL7 EVENT_IGNORE7 +#define EVENT_CRITICAL8 EVENT_IGNORE8 +#define EVENT_CRITICAL9 EVENT_IGNORE9 +#define EVENT_CRITICAL10 EVENT_IGNORE10 +#define EVENT_CRITICAL11 EVENT_IGNORE11 +#define EVENT_CRITICAL12 EVENT_IGNORE12 +#define EVENT_CRITICAL13 EVENT_IGNORE13 +#define EVENT_CRITICAL14 EVENT_IGNORE14 + +#endif /* !EVENT_ALL */ + + +/* The following lines were generated with + python -c 'for i in range(15): print("#define EVENT_RECORD{i}(name{args}) EVENT_BEGIN(name, sizeof(Event##name##Struct)) {assign} EVENT_END\n#define EVENT_IGNORE{i}(name{args}) BEGIN {unused} END".format(i=i, args="".join(map(", p{}".format, range(i))), assign=" ".join(map("_event->f{0} = (p{0});".format, range(i))), unused=" ".join(map("UNUSED(p{});".format, range(i)))))' + */ +#define EVENT_RECORD0(name) EVENT_BEGIN(name, sizeof(Event##name##Struct)) EVENT_END +#define EVENT_IGNORE0(name) BEGIN END +#define EVENT_RECORD1(name, p0) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); EVENT_END +#define EVENT_IGNORE1(name, p0) BEGIN UNUSED(p0); END +#define EVENT_RECORD2(name, p0, p1) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); EVENT_END +#define EVENT_IGNORE2(name, p0, p1) BEGIN UNUSED(p0); UNUSED(p1); END +#define EVENT_RECORD3(name, p0, p1, p2) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); EVENT_END +#define EVENT_IGNORE3(name, p0, p1, p2) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); END +#define EVENT_RECORD4(name, p0, p1, p2, p3) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); EVENT_END +#define EVENT_IGNORE4(name, p0, p1, p2, p3) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); END +#define EVENT_RECORD5(name, p0, p1, p2, p3, p4) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); EVENT_END +#define EVENT_IGNORE5(name, p0, p1, p2, p3, p4) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); END +#define EVENT_RECORD6(name, p0, p1, p2, p3, p4, p5) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); EVENT_END +#define EVENT_IGNORE6(name, p0, p1, p2, p3, p4, p5) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); END +#define EVENT_RECORD7(name, p0, p1, p2, p3, p4, p5, p6) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); EVENT_END +#define EVENT_IGNORE7(name, p0, p1, p2, p3, p4, p5, p6) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); END +#define EVENT_RECORD8(name, p0, p1, p2, p3, p4, p5, p6, p7) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); EVENT_END +#define EVENT_IGNORE8(name, p0, p1, p2, p3, p4, p5, p6, p7) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); END +#define EVENT_RECORD9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); EVENT_END +#define EVENT_IGNORE9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); END +#define EVENT_RECORD10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); _event->f9 = (p9); EVENT_END +#define EVENT_IGNORE10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); UNUSED(p9); END +#define EVENT_RECORD11(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); _event->f9 = (p9); _event->f10 = (p10); EVENT_END +#define EVENT_IGNORE11(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); UNUSED(p9); UNUSED(p10); END +#define EVENT_RECORD12(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); _event->f9 = (p9); _event->f10 = (p10); _event->f11 = (p11); EVENT_END +#define EVENT_IGNORE12(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); UNUSED(p9); UNUSED(p10); UNUSED(p11); END +#define EVENT_RECORD13(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); _event->f9 = (p9); _event->f10 = (p10); _event->f11 = (p11); _event->f12 = (p12); EVENT_END +#define EVENT_IGNORE13(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); UNUSED(p9); UNUSED(p10); UNUSED(p11); UNUSED(p12); END +#define EVENT_RECORD14(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13) EVENT_BEGIN(name, sizeof(Event##name##Struct)) _event->f0 = (p0); _event->f1 = (p1); _event->f2 = (p2); _event->f3 = (p3); _event->f4 = (p4); _event->f5 = (p5); _event->f6 = (p6); _event->f7 = (p7); _event->f8 = (p8); _event->f9 = (p9); _event->f10 = (p10); _event->f11 = (p11); _event->f12 = (p12); _event->f13 = (p13); EVENT_END +#define EVENT_IGNORE14(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13) BEGIN UNUSED(p0); UNUSED(p1); UNUSED(p2); UNUSED(p3); UNUSED(p4); UNUSED(p5); UNUSED(p6); UNUSED(p7); UNUSED(p8); UNUSED(p9); UNUSED(p10); UNUSED(p11); UNUSED(p12); UNUSED(p13); END + +#endif /* event_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventcnv.c b/mps/code/eventcnv.c new file mode 100644 index 00000000000..9fb76665d67 --- /dev/null +++ b/mps/code/eventcnv.c @@ -0,0 +1,366 @@ +/* eventcnv.c: Simple event log converter + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This is a command-line tool that converts a binary format telemetry output + * stream from the MPS into a more-portable textual format. + * + * eventcnv can only read binary-format files that come from an MPS + * compiled on the same platform, whereas the text-format files it + * produces can be processed on any platform. + * + * The default MPS library will write a telemetry stream to a file + * when the environment variable MPS_TELEMETRY_CONTROL is set to an + * integer whose bits select event kinds. For example: + * + * MPS_TELEMETRY_CONTROL=7 amcss + * + * will run the amcss test program and emit a telemetry file with + * event kinds 0, 1, 2. The file can then be converted into a sorted + * text format log with a command like: + * + * eventcnv | sort > mps-events.txt + * + * These text-format files have one line per event, and can be + * manipulated by various programs systems in the usual Unix way. + * + * The binary telemetry filename can be specified with a -f + * command-line argument (use -f - to specify standard input). If no + * filename is specified on the command line, the environment variable + * MPS_TELEMETRY_FILENAME is consulted (this is the same environment + * variable used to specify the telemetry file to the MPS library). + * If the environment variable does not exist, the default filename of + * "mpsio.log" is used. + * + * $Id$ + */ + +#include "config.h" +#include "eventdef.h" +#include "eventcom.h" +#include "testlib.h" /* for ulongest_t and associated print formats */ + +#include /* for size_t */ +#include /* for printf */ +#include /* for EXIT_FAILURE */ +#include /* for assert */ +#include /* for strcmp */ +#include "mpstd.h" + +#define DEFAULT_TELEMETRY_FILENAME "mpsio.log" +#define TELEMETRY_FILENAME_ENVAR "MPS_TELEMETRY_FILENAME" + +static EventClock eventTime; /* current event time */ +static const char *prog; /* program name */ + +/* Errors and Warnings */ + +/* fevwarn -- flush stdout, write message to stderr */ + +ATTRIBUTE_FORMAT((printf, 2, 0)) +static void fevwarn(const char *prefix, const char *format, va_list args) +{ + (void)fflush(stdout); /* sync */ + (void)fprintf(stderr, "%s: %s @", prog, prefix); + (void)EVENT_CLOCK_PRINT(stderr, eventTime); + (void)fprintf(stderr, " "); + (void)vfprintf(stderr, format, args); + (void)fprintf(stderr, "\n"); +} + +/* evwarn -- flush stdout, warn to stderr */ + +ATTRIBUTE_FORMAT((printf, 1, 2)) +static void evwarn(const char *format, ...) +{ + va_list args; + + va_start(args, format); + fevwarn("Warning", format, args); + va_end(args); +} + +/* everror -- flush stdout, message to stderr, exit */ + +ATTRIBUTE_FORMAT((printf, 1, 2)) +static void everror(const char *format, ...) +{ + va_list args; + + va_start(args, format); + fevwarn("Error", format, args); + va_end(args); + exit(EXIT_FAILURE); +} + + +/* usage -- usage message */ + +static void usage(void) +{ + (void)fprintf(stderr, "Usage: %s [-f logfile] [-h]\n" + "See \"Telemetry\" in the reference manual for instructions.\n", + prog); +} + + +/* usageError -- explain usage and error */ + +static void usageError(void) +{ + usage(); + everror("Bad usage"); +} + + +/* parseArgs -- parse command line arguments, return log file name */ + +static char *parseArgs(int argc, char *argv[]) +{ + char *name = NULL; + int i = 1; + + if (argc >= 1) + prog = argv[0]; + else + prog = "unknown"; + + while (i < argc) { /* consider argument i */ + if (argv[i][0] == '-') { /* it's an option argument */ + switch (argv[i][1]) { + case 'f': /* file name */ + ++ i; + if (i == argc) + usageError(); + else + name = argv[i]; + break; + case '?': case 'h': /* help */ + usage(); + exit(EXIT_SUCCESS); + default: + usageError(); + } + } /* if option */ + ++ i; + } + return name; +} + + +/* Printing routines */ + +static void printHex(ulongest_t val) +{ + printf(" %"PRIXLONGEST, (ulongest_t)val); +} + +#define printParamP(p) printHex((ulongest_t)p) +#define printParamA(a) printHex((ulongest_t)a) +#define printParamU(u) printHex((ulongest_t)u) +#define printParamW(w) printHex((ulongest_t)w) +#define printParamB(b) printHex((ulongest_t)b) + +static void printParamD(double d) +{ + printf(" %.10G", d); +} + +static void printParamS(const char *str) +{ + size_t i; + putchar(' '); + putchar('"'); + for (i = 0; str[i] != '\0'; ++i) { + char c = str[i]; + if (c == '"' || c == '\\') + putchar('\\'); + putchar(c); + } + putchar('"'); +} + + +/* EventRead -- read one event from the file */ + +static Res eventRead(Bool *eofOut, EventUnion *event, FILE *stream) +{ + size_t n; + size_t rest; + + /* Read the prefix common to all event structures, in order to decode the + event size. */ + n = fread(&event->any, sizeof(event->any), 1, stream); + if (n < 1) { + if (feof(stream)) { + *eofOut = TRUE; + return ResOK; + } + return ResIO; + } + + if (event->any.size < sizeof(event->any)) + return ResFAIL; /* invalid size: too small */ + + if (event->any.size > sizeof(*event)) + return ResFAIL; /* invalid size: too large */ + + /* Read the rest of the event. */ + rest = event->any.size - sizeof(event->any); + if (rest > 0) { + n = fread((char *)event + sizeof(event->any), rest, 1, stream); + if (n < 1) { + if (feof(stream)) + return ResFAIL; /* truncated event */ + else + return ResIO; + } + } + + *eofOut = FALSE; + return ResOK; +} + +/* readLog -- read and parse log */ + +static void readLog(FILE *stream) +{ + for(;;) { /* loop for each event */ + EventUnion eventUnion; + Event event = &eventUnion; + EventCode code; + Res res; + Bool eof = FALSE; /* suppress warnings about uninitialized use */ + + /* Read and parse event. */ + res = eventRead(&eof, event, stream); + if (res == ResFAIL) + everror("Truncated log"); + else if (res == ResIO) + everror("I/O error reading log"); + else if (res != ResOK) + everror("Unknown error reading log"); + if (eof) + break; + + eventTime = event->any.clock; + code = event->any.code; + + /* Special handling for some events, prior to text output */ + + switch(code) { + case EventEventInitCode: + if ((event->EventInit.f0 != EVENT_VERSION_MAJOR) || + (event->EventInit.f1 != EVENT_VERSION_MEDIAN) || + (event->EventInit.f2 != EVENT_VERSION_MINOR)) + evwarn("Event log version does not match: %d.%d.%d vs %d.%d.%d", + event->EventInit.f0, + event->EventInit.f1, + event->EventInit.f2, + EVENT_VERSION_MAJOR, + EVENT_VERSION_MEDIAN, + EVENT_VERSION_MINOR); + + if (event->EventInit.f3 > EventCodeMAX) + evwarn("Event log may contain unknown events with codes from %d to %d", + EventCodeMAX+1, event->EventInit.f3); + + if (event->EventInit.f5 != MPS_WORD_WIDTH) + /* This probably can't happen; other things will break + * before we get here */ + evwarn("Event log has incompatible word width: %d instead of %d", + event->EventInit.f5, + MPS_WORD_WIDTH); + break; + default: + /* No special treatment needed. */ + break; + } + + (void)EVENT_CLOCK_PRINT(stdout, eventTime); + printf(" %4X", (unsigned)code); + + switch (code) { +#define EVENT_PARAM_PRINT(name, index, sort, ident, doc) \ + printParam##sort(event->name.f##index); +#define EVENT_PRINT(X, name, code, used, kind) \ + case code: \ + EVENT_##name##_PARAMS(EVENT_PARAM_PRINT, name) \ + break; + EVENT_LIST(EVENT_PRINT, X) + default: + evwarn("Unknown event code %d", code); + } + + putchar('\n'); + (void)fflush(stdout); + } /* while(!feof(input)) */ +} + + +/* CHECKCONV -- check t2 can be cast to t1 without loss */ + +#define CHECKCONV(t1, t2) \ + (sizeof(t1) >= sizeof(t2)) + + +/* main */ + +int main(int argc, char *argv[]) +{ + const char *filename; + FILE *input; + + assert(CHECKCONV(ulongest_t, Word)); + assert(CHECKCONV(ulongest_t, Addr)); + assert(CHECKCONV(ulongest_t, void *)); + assert(CHECKCONV(ulongest_t, EventCode)); + + filename = parseArgs(argc, argv); + if (!filename) { + filename = getenv(TELEMETRY_FILENAME_ENVAR); + if(!filename) + filename = DEFAULT_TELEMETRY_FILENAME; + } + + if (strcmp(filename, "-") == 0) + input = stdin; + else { + input = fopen(filename, "rb"); + if (input == NULL) + everror("unable to open \"%s\"\n", filename); + } + + readLog(input); + + return EXIT_SUCCESS; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventcom.h b/mps/code/eventcom.h new file mode 100644 index 00000000000..75886be3d9c --- /dev/null +++ b/mps/code/eventcom.h @@ -0,0 +1,168 @@ +/* -- Event Logging Common Definitions + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * $Id$ + * + * .sources: mps.design.telemetry + */ + +#ifndef eventcom_h +#define eventcom_h + +#include +#include "mpmtypes.h" /* for Word */ +#include "eventdef.h" +#include "clock.h" + + +/* Event Kinds --- see + * + * All events are classified as being of one event type. + * They are small enough to be able to be used as members of a bit set. + */ + +#define EventKindENUM(ENUM, X) \ + ENUM(X, Arena, "Per space or arena") \ + ENUM(X, Pool, "Per pool") \ + ENUM(X, Trace, "Per trace or scan") \ + ENUM(X, Seg, "Per seg") \ + ENUM(X, Ref, "Per ref or fix") \ + ENUM(X, Object, "Per alloc or object") \ + ENUM(X, User, "User-invoked") + +#define ENUM_DECLARE(name) \ + enum name##Enum { \ + name##ENUM(ENUM_DECLARE_ROW, name) \ + name##LIMIT \ + }; + +#define ENUM_DECLARE_ROW(enumName, rowName, rowDoc) \ + enumName##rowName, + +ENUM_DECLARE(EventKind) + + +/* Event type definitions + * + * Various constants for each event type to describe them, so that they + * can easily be looked up from macros by name. + */ + +/* Note that enum values can be up to fifteen bits long portably. */ +#define EVENT_ENUM(X, name, code, used, kind) \ + Event##name##Code = code, \ + Event##name##Used = used, \ + Event##name##Kind = EventKind##kind, + +enum EventDefinitionsEnum { + EVENT_LIST(EVENT_ENUM, X) + /* suppress comma-at-end-of-enum warning */ + EventEnumWarningSuppressor = USHRT_MAX +}; + + +/* Event*Struct -- Event Structures + * + * Declare the structures that are used to encode events in the internal event + * buffers and on the binary telemetry output stream. + */ + +/* Types for common event fields */ +typedef unsigned short EventCode; +typedef unsigned EventKind; +typedef unsigned short EventSize; +#define EventSizeMAX USHRT_MAX + +/* Common prefix for all event structures. The size field allows an event + reader to skip over events whose codes it does not recognise. */ +#define EVENT_ANY_FIELDS(X) \ + X(EventCode, code, "encoding of the event type") \ + X(EventSize, size, "allows reader to skip events of unknown code") \ + X(EventClock, clock, "when the event occurred") + +#define EVENT_ANY_STRUCT_FIELD(TYPE, NAME, DOC) TYPE NAME; + +typedef struct EventAnyStruct { + EVENT_ANY_FIELDS(EVENT_ANY_STRUCT_FIELD) +} EventAnyStruct; + +/* Event field types, for indexing by macro on the event parameter sort */ +typedef void *EventFP; /* pointer to C object */ +typedef Addr EventFA; /* address on the heap */ +typedef Word EventFW; /* word */ +typedef unsigned EventFU; /* unsigned integer */ +typedef char EventFS[EventStringLengthMAX + sizeof('\0')]; /* string */ +typedef double EventFD; /* double */ +typedef unsigned char EventFB; /* Boolean */ + +/* Event packing bitfield specifiers */ +#define EventFP_BITFIELD +#define EventFA_BITFIELD +#define EventFW_BITFIELD +#define EventFU_BITFIELD +#define EventFS_BITFIELD +#define EventFD_BITFIELD +#define EventFB_BITFIELD + +#define EVENT_STRUCT_FIELD(X, index, sort, ident, doc) \ + EventF##sort f##index EventF##sort##_BITFIELD; + +#define EVENT_STRUCT(X, name, _code, used, kind) \ + typedef struct Event##name##Struct { \ + EVENT_ANY_FIELDS(EVENT_ANY_STRUCT_FIELD) \ + EVENT_##name##_PARAMS(EVENT_STRUCT_FIELD, X) \ + } Event##name##Struct; + +EVENT_LIST(EVENT_STRUCT, X) + +/* Maximum alignment requirement of any event type. */ +#define EVENT_ALIGN (sizeof(EventFP)) + + +/* Event -- event union type + * + * Event is the type of a pointer to EventUnion, which is a union of all + * event structures. This can be used as the type of any event, decoded + * by examining event->any.code. + */ + +#define EVENT_UNION_MEMBER(X, name, code, used, kind) \ + Event##name##Struct name; + +typedef union EventUnion { + EventAnyStruct any; + EVENT_LIST(EVENT_UNION_MEMBER, X) +} EventUnion, *Event; + + +#endif /* eventcom_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventdef.h b/mps/code/eventdef.h new file mode 100644 index 00000000000..60617ca4b87 --- /dev/null +++ b/mps/code/eventdef.h @@ -0,0 +1,708 @@ +/* -- Event Logging Definitions + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .source: + * + * .desc: This file declares macros that define the types of events and their + * properties. + * + * TRANSGRESSIONS + * + * .kind.abuse: A few events have a kind which is not obvious from the + * type of the objects that the event relates to. They are given the + * kind that that have on the grounds of expected use. The kinds are + * used in controlling the overall volume of telemetry and these events are + * given kinds so that they are grouped under the same control as events + * you are likely to want to see them with. (So for example, lots of + * scanner events have the same kind, Seg, because if you are interested + * in one then you're probably interested in them all and it's a similar + * amount of data). + */ + +#ifndef eventdef_h +#define eventdef_h + + +/* EVENT_VERSION_* -- three part version number + * + * Increment the minor version when adding new events, + * the median version when changing an existing event, + * and the major version when changing the format of the event file. + * + * These are passed as parameters to the EventInit event at the start + * of a telemetry stream, allowing that stream to be identified. + */ + +#define EVENT_VERSION_MAJOR ((unsigned)2) +#define EVENT_VERSION_MEDIAN ((unsigned)0) +#define EVENT_VERSION_MINOR ((unsigned)0) + + +/* EVENT_LIST -- list of event types and general properties + * + * The columns are: + * + * 1. Type: The name of the event type, without the leading "Event"; + * 2. Code: The unique 16-bit code associated with this event type; + * 3. Used: Whether this event type is used at all; + * 4. Kind: Category into which this event falls, without the "EventKind"; + * + * When you retire an event type, don't delete it from the list, but + * set the "Used" column to FALSE. This serves as documentation for + * what the event code means in older logs, and prevents the codes + * being re-used. . + * + * When you add an event type, you must also add an EVENT_*_PARAMS + * macro specifying its parameters. + * + * TODO: Add a doc string to each event type. + */ + +#define EventNameMAX ((size_t)19) +#define EventCodeMAX ((EventCode)0x005c) + +#define EVENT_LIST(EVENT, X) \ + /* 0123456789012345678 <- don't exceed without changing EventNameMAX */ \ + EVENT(X, AMCScanNailed , 0x0001, TRUE, Seg) \ + EVENT(X, AWLDeclineSeg , 0x0002, TRUE, Seg) \ + EVENT(X, AWLDeclineTotal , 0x0003, TRUE, Seg) \ + EVENT(X, ArenaAccessBegin , 0x0004, TRUE, Arena) \ + EVENT(X, ArenaAccessEnd , 0x0005, TRUE, Arena) \ + EVENT(X, ArenaAlloc , 0x0006, TRUE, Arena) \ + EVENT(X, ArenaAllocFail , 0x0007, TRUE, Arena) \ + EVENT(X, ArenaCreateCL , 0x0008, TRUE, Arena) \ + EVENT(X, ArenaCreateVM , 0x0009, TRUE, Arena) \ + EVENT(X, ArenaDestroy , 0x000a, TRUE, Arena) \ + EVENT(X, ArenaExtend , 0x000b, TRUE, Arena) \ + EVENT(X, ArenaFree , 0x000c, TRUE, Arena) \ + EVENT(X, ArenaPollBegin , 0x000d, TRUE, Arena) \ + EVENT(X, ArenaPollEnd , 0x000e, TRUE, Arena) \ + EVENT(X, ArenaSetEmergency , 0x000f, TRUE, Arena) \ + EVENT(X, ArenaSetSpare , 0x0010, TRUE, Arena) \ + EVENT(X, ArenaUseFreeZone , 0x0011, TRUE, Arena) \ + EVENT(X, BufferCommit , 0x0012, TRUE, Object) \ + EVENT(X, BufferEmpty , 0x0013, TRUE, Seg) \ + EVENT(X, BufferFill , 0x0014, TRUE, Seg) \ + EVENT(X, BufferFinish , 0x0015, TRUE, Pool) /* see .kind.abuse */ \ + EVENT(X, BufferInit , 0x0016, TRUE, Pool) /* see .kind.abuse */ \ + EVENT(X, BufferInitRank , 0x0017, TRUE, Pool) \ + EVENT(X, BufferInitSeg , 0x0018, TRUE, Pool) \ + EVENT(X, BufferReserve , 0x0019, TRUE, Object) \ + EVENT(X, ChainCondemnAuto , 0x001a, TRUE, Trace) \ + EVENT(X, CommitLimitSet , 0x001b, TRUE, Arena) \ + EVENT(X, EventClockSync , 0x001c, TRUE, Arena) \ + EVENT(X, EventInit , 0x001d, TRUE, Arena) \ + EVENT(X, GenFinish , 0x001e, TRUE, Arena) \ + EVENT(X, GenInit , 0x001f, TRUE, Arena) \ + EVENT(X, GenZoneSet , 0x0020, TRUE, Arena) \ + EVENT(X, Intern , 0x0021, TRUE, User) \ + EVENT(X, Label , 0x0022, TRUE, User) \ + EVENT(X, LabelPointer , 0x0023, TRUE, User) \ + EVENT(X, LandInit , 0x0024, TRUE, Pool) \ + EVENT(X, MessagesDropped , 0x0025, TRUE, Arena) \ + EVENT(X, MessagesExist , 0x0026, TRUE, Arena) \ + EVENT(X, MeterInit , 0x0027, TRUE, Pool) \ + EVENT(X, MeterValues , 0x0028, TRUE, Pool) \ + EVENT(X, PauseTimeSet , 0x0029, TRUE, Arena) \ + EVENT(X, PoolAlloc , 0x002a, TRUE, Object) \ + EVENT(X, PoolFinish , 0x002b, TRUE, Pool) \ + EVENT(X, PoolFree , 0x002c, TRUE, Object) \ + EVENT(X, PoolInit , 0x002d, TRUE, Pool) \ + EVENT(X, PoolInitAMC , 0x002e, TRUE, Pool) \ + EVENT(X, PoolInitAMCZ , 0x002f, TRUE, Pool) \ + EVENT(X, PoolInitAMS , 0x0030, TRUE, Pool) \ + EVENT(X, PoolInitAWL , 0x0031, TRUE, Pool) \ + EVENT(X, PoolInitLO , 0x0032, TRUE, Pool) \ + EVENT(X, PoolInitMFS , 0x0033, TRUE, Pool) \ + EVENT(X, PoolInitMVFF , 0x0034, TRUE, Pool) \ + EVENT(X, PoolInitMVT , 0x0035, TRUE, Pool) \ + EVENT(X, PoolInitSNC , 0x0036, TRUE, Pool) \ + EVENT(X, RootScan , 0x0037, TRUE, Seg) /* see .kind.abuse */ \ + EVENT(X, SegAlloc , 0x0038, TRUE, Seg) \ + EVENT(X, SegAllocFail , 0x0039, TRUE, Seg) \ + EVENT(X, SegFree , 0x003a, TRUE, Seg) \ + EVENT(X, SegMerge , 0x003b, TRUE, Seg) \ + EVENT(X, SegReclaim , 0x003c, TRUE, Seg) \ + EVENT(X, SegScan , 0x003d, TRUE, Seg) \ + EVENT(X, SegSetGrey , 0x003e, TRUE, Seg) \ + EVENT(X, SegSetSummary , 0x003f, TRUE, Seg) \ + EVENT(X, SegSplit , 0x0040, TRUE, Seg) \ + EVENT(X, TraceAccess , 0x0041, TRUE, Seg) \ + EVENT(X, TraceBandAdvance , 0x0042, TRUE, Trace) \ + EVENT(X, TraceCondemnAll , 0x0043, TRUE, Trace) \ + EVENT(X, TraceCreate , 0x0044, TRUE, Trace) \ + EVENT(X, TraceCreatePoolGen , 0x0045, TRUE, Trace) \ + EVENT(X, TraceDestroy , 0x0046, TRUE, Trace) \ + EVENT(X, TraceEndGen , 0x0047, TRUE, Trace) \ + EVENT(X, TraceFindGrey , 0x0048, TRUE, Seg) \ + EVENT(X, TraceFix , 0x0049, TRUE, Ref) \ + EVENT(X, TraceFixSeg , 0x004a, TRUE, Ref) \ + EVENT(X, TraceFlipBegin , 0x004b, TRUE, Trace) \ + EVENT(X, TraceFlipEnd , 0x004c, TRUE, Trace) \ + EVENT(X, TraceReclaim , 0x004d, TRUE, Trace) \ + EVENT(X, TraceScanArea , 0x004e, TRUE, Seg) /* see .kind.abuse */ \ + EVENT(X, TraceScanAreaTagged, 0x004f, TRUE, Seg) /* see .kind.abuse */ \ + EVENT(X, TraceScanSingleRef , 0x0050, TRUE, Seg) /* see .kind.abuse */ \ + EVENT(X, TraceStart , 0x0051, TRUE, Trace) \ + EVENT(X, TraceStatFix , 0x0052, TRUE, Trace) \ + EVENT(X, TraceStatReclaim , 0x0053, TRUE, Trace) \ + EVENT(X, TraceStatScan , 0x0054, TRUE, Trace) \ + EVENT(X, VMArenaExtendDone , 0x0055, TRUE, Arena) \ + EVENT(X, VMArenaExtendFail , 0x0056, TRUE, Arena) \ + EVENT(X, VMArenaExtendStart , 0x0057, TRUE, Arena) \ + EVENT(X, VMCompact , 0x0058, TRUE, Arena) \ + EVENT(X, VMFinish , 0x0059, TRUE, Arena) \ + EVENT(X, VMInit , 0x005a, TRUE, Arena) \ + EVENT(X, VMMap , 0x005b, TRUE, Seg) \ + EVENT(X, VMUnmap , 0x005c, TRUE, Seg) + + +/* Remember to update EventNameMAX and EventCodeMAX above! + (These are checked in EventInit.) */ + + +/* EVENT_*_PARAMS -- definition of event parameters + * + * For each event type in EVENT_LIST, these macros list the parameters of + * the event. The columns are: + * + * 1. index (used to define numeric field names); + * 2. sort (Pointer, Addr, Word, Unsigned, String, Double, Bool); + * 3. identifier (for display or use in code). + * 4. documentation. + */ + +#define EVENT_AMCScanNailed_PARAMS(PARAM, X) \ + PARAM(X, 0, W, loops, "number of times around the loop") \ + PARAM(X, 1, W, summary, "summary of segment being scanned") \ + PARAM(X, 2, W, white, "scan state white set") \ + PARAM(X, 3, W, unfixed, "scan state unfixed summary") \ + PARAM(X, 4, W, fixed, "scan state fixed summary") \ + PARAM(X, 5, W, refset, "scan state refset") + +#define EVENT_AWLDeclineSeg_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "segment declined single access") \ + PARAM(X, 1, W, singleAccesses, "single accesses this cycle") + +#define EVENT_AWLDeclineTotal_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "segment declined single access") \ + PARAM(X, 1, W, succAccesses, "total successive accesses") + +#define EVENT_ArenaAccessBegin_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, addr, "address that was accessed") \ + PARAM(X, 2, U, mode, "set of access modes") + +#define EVENT_ArenaAccessEnd_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") + +#define EVENT_ArenaAlloc_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, baseTract, "first allocated tract") \ + PARAM(X, 2, A, base, "base of the allocated block") \ + PARAM(X, 3, W, size, "size of the allocated block in bytes") \ + PARAM(X, 4, P, pool, "pool that requested the allocation") + +#define EVENT_ArenaAllocFail_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, size, "requested allocation size") \ + PARAM(X, 2, P, pool, "pool that requested allocation") \ + PARAM(X, 3, U, res, "result code") + +#define EVENT_ArenaCreateCL_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, size, "size of memory given to arena in bytes") \ + PARAM(X, 2, A, base, "base address of memory given to arena") \ + PARAM(X, 3, W, grainSize, "arena's grain size in bytes") \ + PARAM(X, 4, P, arenaClass, "arena's class") \ + PARAM(X, 5, W, systemPools, "number of system pools") \ + PARAM(X, 6, U, serial, "arena's serial number") + +#define EVENT_ArenaCreateVM_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, userSize, "requested address space in bytes") \ + PARAM(X, 2, W, chunkSize, "arena's chunk size in bytes") \ + PARAM(X, 3, W, grainSize, "arena's grain size in bytes") \ + PARAM(X, 4, P, arenaClass, "arena's class") \ + PARAM(X, 5, W, systemPools, "number of system pools") \ + PARAM(X, 6, U, serial, "arena's serial number") + +#define EVENT_ArenaDestroy_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") + +#define EVENT_ArenaExtend_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, A, base, "base of new chunk") \ + PARAM(X, 2, W, size, "size of new chunk") + +#define EVENT_ArenaFree_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, A, base, "base of the freed block") \ + PARAM(X, 2, W, size, "size of the freed block in bytes") \ + PARAM(X, 3, P, pool, "pool that freed the block") + +#define EVENT_ArenaPollBegin_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "arena about to be polled") + +#define EVENT_ArenaPollEnd_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "arena that was polled") \ + PARAM(X, 1, B, workWasDone, "any collection work done in poll?") + +#define EVENT_ArenaSetEmergency_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, B, emergency, "emergency mode?") + +#define EVENT_ArenaSetSpare_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, D, spare, "spare committed fraction") + +#define EVENT_ArenaUseFreeZone_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, zoneSet, "zones that aren't free any longer") + +#define EVENT_BufferCommit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, A, p, "committed object") \ + PARAM(X, 2, W, size, "size of committed object") \ + PARAM(X, 3, A, clientClass, "format's class of 0 if no format") + +#define EVENT_BufferEmpty_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, W, spare, "remaining free memory") + +#define EVENT_BufferFill_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, W, size, "size of client request") \ + PARAM(X, 2, A, base, "base of pool allocation") \ + PARAM(X, 3, W, filled, "size of pool allocation") + +#define EVENT_BufferFinish_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") + +#define EVENT_BufferInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, P, pool, "buffer's pool") \ + PARAM(X, 2, B, isMutator, "belongs to client program?") + +#define EVENT_BufferInitRank_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, P, pool, "buffer's pool") \ + PARAM(X, 2, B, isMutator, "belongs to client program?") \ + PARAM(X, 3, U, rank, "rank of references in buffer") + +#define EVENT_BufferInitSeg_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, P, pool, "buffer's pool") \ + PARAM(X, 2, B, isMutator, "belongs to client program?") + +#define EVENT_BufferReserve_PARAMS(PARAM, X) \ + PARAM(X, 0, P, buffer, "the buffer") \ + PARAM(X, 1, A, init, "buffer's init pointer") \ + PARAM(X, 2, W, size, "size of client request") + +#define EVENT_ChainCondemnAuto_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "chain's arena") \ + PARAM(X, 1, P, chain, "chain with gens being condemned") \ + PARAM(X, 2, P, trace, "trace for which gens condemned") \ + PARAM(X, 3, W, topCondemnedGenIndex, "condemned gens [0..this]") \ + PARAM(X, 4, W, genCount, "total gens in chain") + +#define EVENT_CommitLimitSet_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, limit, "new commit limit") \ + PARAM(X, 2, U, res, "result code") + +#define EVENT_EventClockSync_PARAMS(PARAM, X) \ + PARAM(X, 0, W, clock, "mps_clock() value") + +#define EVENT_EventInit_PARAMS(PARAM, X) \ + PARAM(X, 0, U, major, "EVENT_VERSION_MAJOR") \ + PARAM(X, 1, U, median, "EVENT_VERSION_MEDIAN") \ + PARAM(X, 2, U, minor, "EVENT_VERSION_MINOR") \ + PARAM(X, 3, U, maxCode, "EventCodeMAX") \ + PARAM(X, 4, U, maxNameLen, "EventNameMAX") \ + PARAM(X, 5, U, wordWidth, "MPS_WORD_WIDTH") \ + PARAM(X, 6, W, clocksPerSec, "mps_clocks_per_sec()") + +#define EVENT_GenFinish_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "generation's arena") \ + PARAM(X, 1, P, gen, "the generation") \ + PARAM(X, 2, U, serial, "serial number within arena") + +#define EVENT_GenInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "generation's arena") \ + PARAM(X, 1, P, gen, "the generation") \ + PARAM(X, 2, U, serial, "serial number within arena") \ + PARAM(X, 3, W, capacity, "capacity in bytes") \ + PARAM(X, 4, D, mortality, "initial mortality estimate") + +#define EVENT_GenZoneSet_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "generation's arena") \ + PARAM(X, 1, P, gen, "the generation") \ + PARAM(X, 2, W, zoneSet, "generation's new summary") + +#define EVENT_Intern_PARAMS(PARAM, X) \ + PARAM(X, 0, W, stringId, "identifier of interned string") \ + PARAM(X, 1, S, string, "the interned string") + +#define EVENT_Label_PARAMS(PARAM, X) \ + PARAM(X, 0, A, address, "address") \ + PARAM(X, 1, W, stringId, "string identifier of its label") + +#define EVENT_LabelPointer_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pointer, "pointer") \ + PARAM(X, 1, W, stringId, "string identifier of its label") + +#define EVENT_LandInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, land, "the land") \ + PARAM(X, 1, P, owner, "owner pointer") + +#define EVENT_MessagesDropped_PARAMS(PARAM, X) \ + PARAM(X, 0, W, count, "count of messages dropped") + +#define EVENT_MessagesExist_PARAMS(PARAM, X) + +#define EVENT_MeterInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, meter, "the meter") \ + PARAM(X, 1, P, owner, "owner pointer") + +#define EVENT_MeterValues_PARAMS(PARAM, X) \ + PARAM(X, 0, P, meter, "the meter") \ + PARAM(X, 1, D, total, "sum of metered amounts") \ + PARAM(X, 2, D, meanSquared, "mean square of metered amounts") \ + PARAM(X, 3, W, count, "number of metered amounts") \ + PARAM(X, 4, W, max, "maximum metered amount") \ + PARAM(X, 5, W, min, "minimum metered amount") + +#define EVENT_PauseTimeSet_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, D, pauseTime, "the new maximum pause time, in seconds") + +#define EVENT_PoolAlloc_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, A, pReturn, "base of allocated memory") \ + PARAM(X, 2, W, size, "size of client request") + +#define EVENT_PoolFinish_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, arena, "pool's arena") + +#define EVENT_PoolFree_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, A, old, "base of freed memory") \ + PARAM(X, 2, W, size, "size of client request") + +#define EVENT_PoolInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, arena, "pool's arena") \ + PARAM(X, 2, P, poolClass, "pool's class") \ + PARAM(X, 3, U, serial, "pool's serial number within the arena") + +#define EVENT_PoolInitAMCZ_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_PoolInitAMC_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_PoolInitAMS_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_PoolInitAWL_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_PoolInitLO_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_PoolInitMFS_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, W, extendBy, "size of extents") \ + PARAM(X, 2, B, extendSelf, "automatically extend?") \ + PARAM(X, 3, W, unitSize, "size of allocations") + +#define EVENT_PoolInitMVFF_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, W, extendBy, "size of extents") \ + PARAM(X, 2, W, avgSize, "predicted mean size of blocks") \ + PARAM(X, 3, W, align, "alignment of blocks") \ + PARAM(X, 4, B, slotHigh, "allocate at high addresses within extents?") \ + PARAM(X, 5, B, arenaHigh, "allocate extents at high addresses?") \ + PARAM(X, 6, B, firstFit, "allocate from first free block?") + +#define EVENT_PoolInitMVT_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, W, minSize, "predicted minimum size of blocks") \ + PARAM(X, 2, W, meanSize, "predicted mean size of blocks") \ + PARAM(X, 3, W, maxSize, "predicted maximum size of blocks") \ + PARAM(X, 4, W, reserveDepth, "reserve space for this many allocations") \ + PARAM(X, 5, W, fragLimig, "fragmentation limit") + +#define EVENT_PoolInitSNC_PARAMS(PARAM, X) \ + PARAM(X, 0, P, pool, "the pool") \ + PARAM(X, 1, P, format, "pool's format") + +#define EVENT_RootScan_PARAMS(PARAM, X) \ + PARAM(X, 0, P, root, "the root") \ + PARAM(X, 1, W, ts, "scanning for this set of traces") \ + PARAM(X, 2, W, summary, "summary after scan") + +#define EVENT_SegAlloc_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, seg, "new segment") \ + PARAM(X, 2, A, base, "base address") \ + PARAM(X, 3, W, size, "segment size") \ + PARAM(X, 4, P, pool, "pool making request") + +#define EVENT_SegAllocFail_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, W, size, "requested segment size") \ + PARAM(X, 2, P, pool, "pool making request") \ + PARAM(X, 3, U, res, "result code") + +#define EVENT_SegFree_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "segment's arena") \ + PARAM(X, 1, P, seg, "the segment") + +#define EVENT_SegMerge_PARAMS(PARAM, X) \ + PARAM(X, 0, P, segLo, "low segment") \ + PARAM(X, 1, P, segHi, "high segment") + +#define EVENT_SegReclaim_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "the segment") \ + PARAM(X, 1, P, pool, "segment's pool") \ + PARAM(X, 2, P, arena, "pool's arena") \ + PARAM(X, 3, P, trace, "reclaiming for this trace") + +#define EVENT_SegScan_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "the segment") \ + PARAM(X, 1, P, pool, "segment's pool") \ + PARAM(X, 2, P, arena, "pool's arena") \ + PARAM(X, 3, U, ts, "scanning for this set of traces") \ + PARAM(X, 4, U, rank, "scanning at this rank") + +#define EVENT_SegSetGrey_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "segment's arena") \ + PARAM(X, 1, P, seg, "the segment") \ + PARAM(X, 2, U, grey, "greyen for this set of traces") + +#define EVENT_SegSetSummary_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "segment's arena") \ + PARAM(X, 1, P, seg, "the segment") \ + PARAM(X, 2, W, size, "its size in bytes") \ + PARAM(X, 3, W, oldSummary, "old summary") \ + PARAM(X, 4, W, newSummary, "new summary") + +#define EVENT_SegSplit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "old segment") \ + PARAM(X, 1, P, segLo, "new low segment") \ + PARAM(X, 2, P, segHi, "new high segment") \ + PARAM(X, 3, A, at, "split address") + +#define EVENT_TraceAccess_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, seg, "segment accessed") \ + PARAM(X, 2, U, mode, "set of access modes") + +#define EVENT_TraceBandAdvance_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, trace, "the trace") \ + PARAM(X, 2, W, rank, "new rank") + +#define EVENT_TraceCondemnAll_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "trace's arena") \ + PARAM(X, 1, P, trace, "trace") + +#define EVENT_TraceCreate_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "trace that was created") \ + PARAM(X, 1, P, arena, "arena in which created") \ + PARAM(X, 2, U, why, "reason for creation") + +#define EVENT_TraceCreatePoolGen_PARAMS(PARAM, X) \ + PARAM(X, 0, P, gen, "generation") \ + PARAM(X, 1, W, capacity, "capacity of generation") \ + PARAM(X, 2, D, mortality, "mortality of generation") \ + PARAM(X, 3, W, zone, "zone set of generation") \ + PARAM(X, 4, P, pool, "pool") \ + PARAM(X, 5, W, totalSize, "total size of pool gen") \ + PARAM(X, 6, W, freeSize, "free size of pool gen") \ + PARAM(X, 7, W, newSize, "new size of pool gen") \ + PARAM(X, 8, W, oldSize, "old size of pool gen") \ + PARAM(X, 9, W, newDeferredSize, "new size (deferred) of pool gen") \ + PARAM(X, 10, W, oldDeferredSize, "old size (deferred) of pool gen") + +#define EVENT_TraceDestroy_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "trace's arena") \ + PARAM(X, 1, P, trace, "the trace") + +#define EVENT_TraceEndGen_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "the arena") \ + PARAM(X, 1, P, trace, "the trace") \ + PARAM(X, 2, P, gen, "the generation") \ + PARAM(X, 3, W, condemned, "bytes condemned in generation") \ + PARAM(X, 4, W, forwarded, "bytes forwarded from generation") \ + PARAM(X, 5, W, preservedInPlace, "bytes preserved in generation") \ + PARAM(X, 6, D, mortalityTrace, "mortality (in last trace only)") \ + PARAM(X, 7, D, mortalityAverage, "mortality (moving average)") + +#define EVENT_TraceFindGrey_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "trace's arena") \ + PARAM(X, 1, P, trace, "the trace") \ + PARAM(X, 2, P, seg, "grey segment found") \ + PARAM(X, 3, W, rank, "current rank") + +#define EVENT_TraceFix_PARAMS(PARAM, X) \ + PARAM(X, 0, P, ss, "the scan state") \ + PARAM(X, 1, P, refIO, "pointer to reference") \ + PARAM(X, 2, A, ref, "reference fixed") \ + PARAM(X, 3, U, rank, "current rank") + +#define EVENT_TraceFixSeg_PARAMS(PARAM, X) \ + PARAM(X, 0, P, seg, "the segment") + +#define EVENT_TraceFlipBegin_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") + +#define EVENT_TraceFlipEnd_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") + +#define EVENT_TraceReclaim_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") + +#define EVENT_TraceScanArea_PARAMS(PARAM, X) \ + PARAM(X, 0, P, ss, "the scan state") \ + PARAM(X, 1, P, base, "base of scanned area") \ + PARAM(X, 2, P, limit, "limit of scanned area") + +#define EVENT_TraceScanAreaTagged_PARAMS(PARAM, X) \ + PARAM(X, 0, P, ss, "the scan state") \ + PARAM(X, 1, P, base, "base of scanned area") \ + PARAM(X, 2, P, limit, "limit of scanned area") + +#define EVENT_TraceScanSingleRef_PARAMS(PARAM, X) \ + PARAM(X, 0, U, ts, "set of traces") \ + PARAM(X, 1, U, rank, "current rank") \ + PARAM(X, 2, P, arena, "traces' arena") \ + PARAM(X, 3, P, refIO, "pointer to reference") + +#define EVENT_TraceStart_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena, "trace's arena") \ + PARAM(X, 1, P, trace, "trace being started") \ + PARAM(X, 2, D, mortality, "as passed to TraceStart") \ + PARAM(X, 3, D, finishingTime, "as passed to TraceStart") \ + PARAM(X, 4, W, condemned, "condemned bytes") \ + PARAM(X, 5, W, notCondemned, "collectible but not condemned bytes") \ + PARAM(X, 6, W, foundation, "foundation size") \ + PARAM(X, 7, W, white, "white reference set") \ + PARAM(X, 8, W, quantumWork, "tracing work to be done in each poll") + +#define EVENT_TraceStatFix_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") \ + PARAM(X, 2, W, fixRefCount, "references which pass zone check") \ + PARAM(X, 3, W, segRefCount, "references which refer to segments") \ + PARAM(X, 4, W, whiteSegRefCount, "references which refer to white segments") \ + PARAM(X, 5, W, nailCount, "segments nailed by ambiguous references") \ + PARAM(X, 6, W, snapCount, "references snapped to forwarded objects") \ + PARAM(X, 7, W, forwardedCount, "objects preserved by moving") \ + PARAM(X, 8, W, forwardedSize, "bytes preserved by moving") \ + PARAM(X, 9, W, preservedInPlaceCount, "objects preserved in place") \ + PARAM(X, 10, W, preservedInPlaceSize, "bytes preserved in place") + +#define EVENT_TraceStatReclaim_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") \ + PARAM(X, 2, W, reclaimCount, "segments reclaimed") \ + PARAM(X, 3, W, reclaimSize, "bytes reclaimed") + +#define EVENT_TraceStatScan_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace, "the trace") \ + PARAM(X, 1, P, arena, "trace's arena") \ + PARAM(X, 2, W, rootScanCount, "number of roots scanned") \ + PARAM(X, 3, W, rootScanSize, "total size of scanned roots") \ + PARAM(X, 4, W, rootCopiedSize, "bytes copied by scanning roots ") \ + PARAM(X, 5, W, segScanCount, "number of segments scanned") \ + PARAM(X, 6, W, segScanSize, "total size of scanned segments") \ + PARAM(X, 7, W, segCopiedSize, "bytes copied by scanning segments") \ + PARAM(X, 8, W, singleScanCount, "number of single references scanned") \ + PARAM(X, 9, W, singleScanSize, "total size of single references scanned") \ + PARAM(X, 10, W, singleCopiedSize, "bytes copied by scanning single references") \ + PARAM(X, 11, W, readBarrierHitCount, "read barrier faults") \ + PARAM(X, 12, W, greySegMax, "maximum number of grey segments") \ + PARAM(X, 13, W, pointlessScanCount, "pointless segment scans") + +#define EVENT_VMArenaExtendDone_PARAMS(PARAM, X) \ + PARAM(X, 0, W, chunkSize, "request succeeded for chunkSize bytes") \ + PARAM(X, 1, W, reserved, "new VMArenaReserved") + +#define EVENT_VMArenaExtendFail_PARAMS(PARAM, X) \ + PARAM(X, 0, W, chunkMin, "no remaining address space chunk >= chunkMin") \ + PARAM(X, 1, W, reserved, "current VMArenaReserved") + +#define EVENT_VMArenaExtendStart_PARAMS(PARAM, X) \ + PARAM(X, 0, W, size, "size to accommodate") \ + PARAM(X, 1, W, chunkSize, "chunkSize to try") \ + PARAM(X, 2, W, reserved, "current VMArenaReserved") + +#define EVENT_VMCompact_PARAMS(PARAM, X) \ + PARAM(X, 0, W, vmem0, "pre-collection reserved size") \ + PARAM(X, 1, W, vmem1, "pre-compact reserved size") \ + PARAM(X, 2, W, vmem2, "post-compact reserved size") + +#define EVENT_VMFinish_PARAMS(PARAM, X) \ + PARAM(X, 0, P, vm, "the VM") + +#define EVENT_VMInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, vm, "the VM") \ + PARAM(X, 1, A, base, "base of VM") \ + PARAM(X, 2, A, limit, "limit of VM") + +#define EVENT_VMMap_PARAMS(PARAM, X) \ + PARAM(X, 0, P, vm, "the VM") \ + PARAM(X, 1, A, base, "base of mapped addresses") \ + PARAM(X, 2, A, limit, "limit of mapped addresses") + +#define EVENT_VMUnmap_PARAMS(PARAM, X) \ + PARAM(X, 0, P, vm, "the VM") \ + PARAM(X, 1, A, base, "base of unmapped addresses") \ + PARAM(X, 2, A, limit, "limit of unmapped addresses") + +#endif /* eventdef_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventpy.c b/mps/code/eventpy.c new file mode 100644 index 00000000000..c8990ddc5bf --- /dev/null +++ b/mps/code/eventpy.c @@ -0,0 +1,226 @@ +/* eventpy.c: GENERATE PYTHON INTERFACE TO EVENTS + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * This command-line program emits Python data structures that can be + * used to parse a telemetry stream in the binary format for the + * platform it was compiled for. + */ + +#include /* assert */ +#include /* offsetof */ +#include /* printf, puts */ + +#include "event.h" + +/* See */ +#if defined(MPS_ARCH_A6) || defined(MPS_ARCH_I3) || defined(MPS_ARCH_I6) +#define BYTE_ORDER "<" +#else +#error "Can't determine byte order for platform architecture." +#endif + + +/* format -- output struct format code corresponding to event field + * + * size is the size of the field in bytes + * sort is a one-character string corresponding to the EventF* typedef + * for the field, thus "P" for a field of type EventFP. + * + * See + */ + +static void format(size_t size, const char *sort) +{ + switch (sort[0]) { + case 'B': + printf("?"); + break; + case 'D': + printf("d"); + break; + case 'S': + /* Strings can't be handled through the struct format mechanism + because we don't know their length until the header has been + read. */ + break; + default: + switch (size) { + case 1: + printf("B"); + break; + case 2: + printf("H"); + break; + case 4: + printf("L"); + break; + case 8: + printf("Q"); + break; + default: + assert(FALSE); + break; + } + } +} + +int main(int argc, char *argv[]) +{ + size_t size, prev_offset; + char prev_sort; + UNUSED(argc); + UNUSED(argv); + + puts("from collections import namedtuple"); + + printf("\n__version__ = %d, %d, %d\n", EVENT_VERSION_MAJOR, + EVENT_VERSION_MEDIAN, EVENT_VERSION_MINOR); + + puts("\n# Description of an event kind."); + puts("KindDesc = namedtuple('KindDesc', 'name code doc')"); + + puts("\n# Namespace containing a KindDesc for every kind."); + puts("class Kind:"); +#define ENUM(X, NAME, DOC) \ + printf(" " #NAME " = KindDesc('" #NAME "', %d, '%s')\n", \ + EventKind ## NAME, DOC); + EventKindENUM(ENUM, X); +#undef ENUM + + puts("\n# Mapping from kind number to KindDesc."); + puts("KIND = {"); +#define ENUM(X, NAME, DOC) \ + printf(" %d: Kind." #NAME ",\n", EventKind ## NAME); + EventKindENUM(ENUM, X); +#undef ENUM + puts("}"); + + puts("\n# Description of a parameter of an event."); + puts("EventParam = namedtuple('EventParam', 'sort name doc')"); + + puts("\n# Description of the parameters of an event."); + puts("EventDesc = namedtuple('EventDesc', " + "'name code used kind params maxsize format')"); + + puts("\n# Namespace containing an EventDesc for every event."); + puts("class Event:"); +#define PAD_TO(OFFSET) \ + BEGIN { \ + size_t offset = (OFFSET); \ + if (prev_sort != 'S' && prev_offset < offset) \ + printf("%ux", (unsigned)(offset - prev_offset)); \ + prev_offset = offset; \ + } END +#define EVENT_PARAM(X, INDEX, SORT, IDENT, DOC) \ + puts(" EventParam('" #SORT "', '" #IDENT "', \"" DOC "\"),"); \ + prev_sort = #SORT[0]; +#define EVENT_FORMAT(NAME, INDEX, SORT, IDENT, DOC) \ + PAD_TO(offsetof(Event##NAME##Struct, f##INDEX)); \ + format(sizeof(EventF##SORT), #SORT); \ + prev_offset += sizeof(EventF##SORT); +#define EVENT_DEFINE(X, NAME, CODE, USED, KIND) \ + printf(" " #NAME " = EventDesc('" #NAME "', %d, %s, Kind." #KIND ", [\n", \ + CODE, USED ? "True" : "False"); \ + EVENT_ ## NAME ## _PARAMS(EVENT_PARAM, X); \ + size = sizeof(Event##NAME##Struct) - sizeof(EventAnyStruct); \ + printf(" ], %u, '%s", (unsigned)size, BYTE_ORDER); \ + prev_offset = sizeof(EventAnyStruct); \ + EVENT_ ## NAME ## _PARAMS(EVENT_FORMAT, NAME); \ + PAD_TO(sizeof(Event##NAME##Struct)); \ + puts("')"); + EVENT_LIST(EVENT_DEFINE, 0); +#undef EVENT_DEFINE +#undef EVENT_PARAM +#undef EVENT_FORMAT + + puts("\n# Mapping from event number to EventDesc."); + puts("EVENT = {"); +#define EVENT_ITEM(X, NAME, CODE, USED, KIND) \ + printf(" %d: Event." #NAME ",\n", CODE); + EVENT_LIST(EVENT_ITEM, 0); +#undef EVENT_ITEM + puts("}"); + + puts("\n# Description of an event header."); + printf("HeaderDesc = namedtuple('HeaderDesc', '"); +#define EVENT_FIELD(TYPE, NAME, DOC) printf("%s ", #NAME); + EVENT_ANY_FIELDS(EVENT_FIELD) +#undef EVENT_FIELD + puts("')\nHeaderDesc.__doc__ = '''"); +#define EVENT_FIELD(TYPE, NAME, DOC) printf("%s -- %s\n", #NAME, DOC); + EVENT_ANY_FIELDS(EVENT_FIELD) +#undef EVENT_FIELD + puts("'''"); + + puts("\n# Size of event header in bytes."); + printf("HEADER_SIZE = %u\n", (unsigned)sizeof(EventAnyStruct)); + + puts("\n# Struct format for event header."); + printf("HEADER_FORMAT = '%s", BYTE_ORDER); + prev_offset = 0; +#define EVENT_FIELD(TYPE, NAME, DOC) \ + PAD_TO(offsetof(EventAnyStruct, NAME)); \ + format(sizeof(TYPE), "?"); \ + prev_offset += sizeof(TYPE); + EVENT_ANY_FIELDS(EVENT_FIELD) +#undef EVENT_FIELD + PAD_TO(sizeof(EventAnyStruct)); + puts("'"); + + puts("\n# Mapping from access mode to its name."); + puts("ACCESS_MODE = {"); + printf(" %u: \"READ\",\n", (unsigned)AccessREAD); + printf(" %u: \"WRITE\",\n", (unsigned)AccessWRITE); + printf(" %u: \"READ/WRITE\",\n", + (unsigned)BS_UNION(AccessREAD, AccessWRITE)); + puts("}"); + + puts("\n# Mapping from rank to its name."); + puts("RANK = {"); +#define X(RANK) printf(" %u: \"%s\",\n", (unsigned)Rank ## RANK, #RANK); + RANK_LIST(X) +#undef X + puts("}"); + + puts("\n# Mapping from trace start reason to its short description."); + puts("TRACE_START_WHY = {"); +#define X(WHY, SHORT, LONG) \ + printf(" %u: \"%s\",\n", (unsigned)TraceStartWhy ## WHY, SHORT); + TRACE_START_WHY_LIST(X) +#undef X + puts("}"); + + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventsql.c b/mps/code/eventsql.c new file mode 100644 index 00000000000..b96ed9c159b --- /dev/null +++ b/mps/code/eventsql.c @@ -0,0 +1,997 @@ +/* eventsql.c: event log to SQLite importer. + * + * $Id$ + * + * Copyright (c) 2012-2020 Ravenbrook Limited. See end of file for license. + * + * This is a command-line tool that imports events from a text-format + * MPS telemetry file into a SQLite database file. + * + * The default MPS library will write a binary-format telemetry file + * which can be converted into a text-format file using the eventcnv + * program (q.v.). + * + * Each event type gets its own table in the database. These tables + * are created from the definitions in eventdef.h if they don't + * already exist. Each event becomes a single row in the appropriate + * table, which has a column for each event parameter, a time column + * for the event time field, and a log_serial column to identify the + * source log file. Because the database schema depends on the event + * definitions in eventdef.h, eventsql has to be compiled using the + * same event header files as those used to compile the MPS and + * eventcnv which generated and processed the telemetry output. + * + * The program also creates several other tables: three 'glue' tables + * containing event metadata - event_kind (one row per kind), + * event_type (one row per type), and event_param (one row per + * parameter), all derived from eventdef.h - and the event_log table + * which has one row per log file imported (the log_serial column in + * the event tables is a primary key to this event_log table). + * + * No tables are created if they already exist, unless the -r + * (rebuild) switch is given. + * + * Options: + * + * -v (verbose): Increase verbosity. eventsql logs to stderr. By + * default, it doesn't log much; it can be made more and more + * loquacious by adding more -v switches. + * + * -p (progress): Show progress with a series of dots written to + * standard output (one dot per 100,000 events processed). Defaults + * on if -v specified, off otherwise. + * + * -t (test): Run unit tests on parts of eventsql. There aren't many + * of these. TODO: write more unit tests. + * + * -d (delete): Delete the SQL file before importing. + * + * -f (force): Import the events to SQL even if the SQL database + * already includes a record of importing a matching log file. + * + * -r (rebuild): Drop the glue tables from SQL, which will force them + * to be recreated. Important if you change event types or kinds in + * eventdef.h. + * + * -i : Import events from the named logfile. Defaults to + * standard input. If the specified file (matched by size and + * modtime) has previously been imported to the same database, it will + * not be imported again unless -f is specified. + * + * -o : Import events to the named database file. If not + * specified, eventsql will use the MPS_TELEMETRY_DATABASE environment + * variable, and default to "mpsevent.db". + * + * $Id$ + */ + +#include "misc.h" +#include "config.h" +#include "eventdef.h" +#include "eventcom.h" + +#include +#include +#include +#include + +/* on Windows, we build SQLite locally from the amalgamated sources */ +#ifdef MPS_BUILD_MV +#include "sqlite3.h" +#else +#include +#endif + +#define DATABASE_NAME_ENVAR "MPS_TELEMETRY_DATABASE" +#define DEFAULT_DATABASE_NAME "mpsevent.db" + +#ifdef MPS_BUILD_MV +#define strtoll _strtoi64 +#endif + + +typedef sqlite3_int64 int64; + +/* At non-zero verbosity levels we output rows of dots. One dot per + * SMALL_TICK events, BIG_TICK dots per row. */ + +#define SMALL_TICK 100000 +#define BIG_TICK 50 + +/* Utility code for logging to stderr with multiple log levels, + * and for reporting errors. + */ + +static unsigned int verbosity = 0; + +#define LOG_ALWAYS 0 +#define LOG_OFTEN 1 +#define LOG_SOMETIMES 2 +#define LOG_SELDOM 3 +#define LOG_RARELY 4 + +ATTRIBUTE_FORMAT((printf, 2, 0)) +static void vlog(unsigned int level, const char *format, va_list args) +{ + if (level <= verbosity) { + fflush(stderr); /* sync */ + fprintf(stderr, "log %d: ", level); + vfprintf(stderr, format, args); + fprintf(stderr, "\n"); + } +} + +ATTRIBUTE_FORMAT((printf, 2, 3)) +static void evlog(unsigned int level, const char *format, ...) +{ + va_list args; + va_start(args, format); + vlog(level, format, args); + va_end(args); +} + +ATTRIBUTE_FORMAT((printf, 1, 2)) +static void error(const char *format, ...) +{ + va_list args; + fprintf(stderr, "Fatal error: "); + va_start(args, format); + vlog(LOG_ALWAYS, format, args); + va_end(args); + exit(1); +} + +static void sqlite_error(int res, sqlite3 *db, const char *format, ...) +{ + va_list args; + evlog(LOG_ALWAYS, "Fatal SQL error %d", res); + va_start(args, format); + vlog(LOG_ALWAYS, format, args); + va_end(args); + evlog(LOG_ALWAYS, "SQLite message: %s\n", sqlite3_errmsg(db)); + exit(1); +} + +/* global control variables set by command-line parameters. */ + +static const char *prog; /* program name */ +static int rebuild = FALSE; +static int deleteDatabase = FALSE; +static int runTests = FALSE; +static int force = FALSE; +static int progress = FALSE; +static const char *databaseName = NULL; +static const char *logFileName = NULL; + +static void usage(void) +{ + fprintf(stderr, + "Usage: %s [-rfdvt] [-i ] [-o ]\n" + " -h (help) : this message.\n" + " -r (rebuild) : re-create glue tables.\n" + " -f (force) : ignore previous import of same logfile.\n" + " -d (delete) : delete and recreate database file.\n" + " -v (verbose) : increase logging to stderr.\n" + " -p (progress): show progress with dots to stdout.\n" + " -t (test) : run self-tests.\n" + " -i : read logfile (defaults to stdin)\n" + " -o : write database (defaults to\n" + " " + DATABASE_NAME_ENVAR " or " DEFAULT_DATABASE_NAME ").\n", + prog); +} + +static void usageError(void) +{ + usage(); + error("Bad usage"); +} + +/* parseArgs -- parse command line arguments */ + +static void parseArgs(int argc, char *argv[]) +{ + int i = 1; + + if (argc >= 1) + prog = argv[0]; + else + prog = "unknown"; + + while(i < argc) { /* consider argument i */ + if (argv[i][0] == '-') { /* it's an option argument */ + char *p = argv[i] + 1; + while(*p) { + switch (*p) { + case 'v': /* verbosity */ + ++ verbosity; + break; + case 'p': /* progress */ + progress = TRUE; + break; + case 'r': /* rebuild */ + rebuild = TRUE; + break; + case 'd': /* rebuild */ + deleteDatabase = TRUE; + break; + case 'f': /* force */ + force = TRUE; + break; + case 't': /* run tests */ + runTests = TRUE; + break; + case 'i': /* input (log file) name */ + if (p[1] == '\0') { /* last character in this arg; name is next arg */ + logFileName = argv[i+1]; + ++ i; + } else { /* not last character in arg; name is rest of arg */ + logFileName = p+1; + } + goto next_i; + case 'o': /* output (database file) name */ + if (p[1] == '\0') { /* last character in this arg; name is next arg */ + databaseName = argv[i+1]; + ++ i; + } else { /* not last character in arg; name is rest of arg */ + databaseName = p+1; + } + goto next_i; + case 'h': + usage(); + exit(EXIT_SUCCESS); + default: + usageError(); + } + ++ p; + } + } else { /* not an option argument */ + usageError(); + } + next_i: + ++ i; + } + if (verbosity > LOG_ALWAYS) + progress = TRUE; +} + +/* openDatabase(p) opens the database file and returns a SQLite 3 + * database connection object. */ + +static sqlite3 *openDatabase(void) +{ + sqlite3 *db; + int res; + + if (!databaseName) { + databaseName = getenv(DATABASE_NAME_ENVAR); + if(!databaseName) + databaseName = DEFAULT_DATABASE_NAME; + } + + if (deleteDatabase) { + res = remove(databaseName); + if (res) + evlog(LOG_ALWAYS, "Could not remove database file %s", databaseName); + else + evlog(LOG_OFTEN, "Removed database file %s", databaseName); + } + + res = sqlite3_open_v2(databaseName, + &db, + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, + NULL); /* use default sqlite_vfs object */ + + if (res != SQLITE_OK) + sqlite_error(res, db, "Opening %s failed", databaseName); + + evlog(LOG_OFTEN, "Writing to %s.",databaseName); + + return db; +} + +/* closeDatabase(db) closes the database opened by openDatabase(). */ + +static void closeDatabase(sqlite3 *db) +{ + int res = sqlite3_close(db); + if (res != SQLITE_OK) + sqlite_error(res, db, "Closing database failed"); + evlog(LOG_SOMETIMES, "Closed %s.", databaseName); +} + +/* Utility functions for SQLite statements. */ + +static sqlite3_stmt *prepareStatement(sqlite3 *db, + const char *sql) +{ + int res; + sqlite3_stmt *statement; + evlog(LOG_SELDOM, "Preparing statement %s", sql); + res = sqlite3_prepare_v2(db, sql, + -1, /* prepare whole string as statement */ + &statement, + NULL); + if (res != SQLITE_OK) + sqlite_error(res, db, "statement preparation failed: %s", sql); + return statement; +} + +static void finalizeStatement(sqlite3 *db, + sqlite3_stmt *statement) +{ + int res; + res = sqlite3_finalize(statement); + if (res != SQLITE_OK) + sqlite_error(res, db, "statement finalize failed"); +} + +static void runStatement(sqlite3 *db, + const char *sql, + const char *description) +{ + int res; + evlog(LOG_SELDOM, "%s: %s", description, sql); + res = sqlite3_exec(db, + sql, + NULL, /* No callback */ + NULL, /* No callback closure */ + NULL); /* error messages handled by sqlite_error */ + if (res != SQLITE_OK) + sqlite_error(res, db, "%s failed - statement %s", description, sql); +} + +/* Test for the existence of a table using sqlite_master table. + */ + +static int tableExists(sqlite3* db, const char *tableName) +{ + int res; + int exists = 0; + sqlite3_stmt *statement = NULL; + + statement = prepareStatement(db, + "SELECT 1 FROM sqlite_master WHERE type='table' AND name=?"); + res = sqlite3_bind_text(statement, 1, tableName, -1, SQLITE_STATIC); + if (res != SQLITE_OK) + sqlite_error(res, db, "table existence bind of name failed."); + res = sqlite3_step(statement); + switch(res) { + case SQLITE_DONE: + exists = 0; + break; + case SQLITE_ROW: + exists = 1; + break; + default: + sqlite_error(res, db, "select from sqlite_master failed."); + } + finalizeStatement(db, statement); + return exists; +} + +/* Unit test for tableExists() */ + +static struct { + const char* name; + int exists; +} tableTests[] = { + {"event_kind", TRUE}, + {"spong", FALSE}, + {"EVENT_SegSplit", TRUE} +}; + +static void testTableExists(sqlite3 *db) +{ + size_t i; + int defects = 0; + int tests = 0; + for (i=0; i < NELEMS(tableTests); ++i) { + const char *name = tableTests[i].name; + int exists = tableExists(db, name); + if (exists) + evlog(LOG_OFTEN, "Table exists: %s", name); + else + evlog(LOG_OFTEN, "Table does not exist: %s", name); + if (exists != tableTests[i].exists) { + evlog(LOG_ALWAYS, "tableExists test failed on table %s", name); + ++ defects; + } + ++ tests; + } + evlog(LOG_ALWAYS, "%d tests, %d defects found.", tests, defects); +} + +/* Every time we put events from a log file into a database file, we + * add the log file to the event_log table, and get a serial number + * from SQL which is then attached to all event rows from that log. + * We use this to record overall SQL activity, to deter mistaken + * attempts to add the same log file twice, and to allow events from + * several different log files to share the same SQL file. + * + * When reading events from stdin, we can't so easily avoid the + * duplication (unless we, e.g., take a hash of the event set); we + * have to assume that the user is smart enough not to do that. + */ + +static int64 logSerial = 0; + +static void registerLogFile(sqlite3 *db, + const char *filename) +{ + sqlite3_stmt *statement; + int res; + const unsigned char *name; + int64 completed; + int64 file_size; + int64 file_modtime; + + if (filename) { + struct stat st; + res = stat(filename, &st); + if (res != 0) + error("Couldn't stat() %s", filename); + file_size = st.st_size; + file_modtime = st.st_mtime; + + statement = prepareStatement(db, + "SELECT name, serial, completed FROM event_log" + " WHERE size = ? AND modtime = ?"); + res = sqlite3_bind_int64(statement, 1, file_size); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log bind of size failed."); + res = sqlite3_bind_int64(statement, 2, file_modtime); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log bind of modtime failed."); + res = sqlite3_step(statement); + switch(res) { + case SQLITE_DONE: + evlog(LOG_SOMETIMES, "No log file matching '%s' found in database.", filename); + break; + case SQLITE_ROW: + name = sqlite3_column_text(statement, 0); + logSerial = sqlite3_column_int64(statement, 1); + completed = sqlite3_column_int64(statement, 2); + evlog(force ? LOG_OFTEN : LOG_ALWAYS, "Log file matching '%s' already in event_log, named \"%s\" (serial %llu, completed %llu).", + filename, name, logSerial, completed); + if (force) { + evlog(LOG_OFTEN, "Continuing anyway because -f specified."); + } else { + evlog(LOG_ALWAYS, "Exiting. Specify -f to force events into SQL anyway."); + exit(0); + } + break; + default: + sqlite_error(res, db, "select from event_log failed."); + } + finalizeStatement(db, statement); + } else { /* stdin */ + filename = ""; + file_size = 0; + file_modtime = 0; + } + statement = prepareStatement(db, + "INSERT into event_log (name, size, modtime, completed)" + " VALUES (?, ?, ?, 0)"); + res = sqlite3_bind_text(statement, 1, filename, -1, SQLITE_STATIC); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log insert bind of name failed."); + res = sqlite3_bind_int64(statement, 2, file_size); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log insert bind of size failed."); + res = sqlite3_bind_int64(statement, 3, file_modtime); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log insert bind of modtime failed."); + res = sqlite3_step(statement); + if (res != SQLITE_DONE) + sqlite_error(res, db, "insert into event_log failed."); + logSerial = sqlite3_last_insert_rowid(db); + evlog(LOG_SOMETIMES, "Log file %s added to event_log with serial %llu", + filename, logSerial); + finalizeStatement(db, statement); +} + +static void logFileCompleted(sqlite3 *db, + int64 completed) +{ + sqlite3_stmt *statement; + int res; + + statement = prepareStatement(db, + "UPDATE event_log SET completed=? WHERE serial=?"); + res = sqlite3_bind_int64(statement, 2, logSerial); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log update bind of serial failed."); + res = sqlite3_bind_int64(statement, 1, completed); + if (res != SQLITE_OK) + sqlite_error(res, db, "event_log update bind of completed failed."); + res = sqlite3_step(statement); + if (res != SQLITE_DONE) + sqlite_error(res, db, "insert into event_log failed."); + evlog(LOG_SOMETIMES, "Marked in event_log: %llu events", completed); + finalizeStatement(db, statement); +} + +/* Macro magic to make a CREATE TABLE statement for each event type. */ + +#define EVENT_PARAM_SQL_TYPE_A "INTEGER" +#define EVENT_PARAM_SQL_TYPE_P "INTEGER" +#define EVENT_PARAM_SQL_TYPE_U "INTEGER" +#define EVENT_PARAM_SQL_TYPE_W "INTEGER" +#define EVENT_PARAM_SQL_TYPE_D "REAL " +#define EVENT_PARAM_SQL_TYPE_S "TEXT " +#define EVENT_PARAM_SQL_TYPE_B "INTEGER" + +#define EVENT_PARAM_SQL_COLUMN(X, index, sort, ident, doc) \ + "\"" #ident "\" " EVENT_PARAM_SQL_TYPE_##sort ", " + +#define EVENT_TABLE_CREATE(X, name, code, used, kind) \ + "CREATE TABLE IF NOT EXISTS EVENT_" #name " ( " \ + EVENT_##name##_PARAMS(EVENT_PARAM_SQL_COLUMN, X) \ + "time INTEGER, " \ + "log_serial INTEGER)", + +/* An array of table-creation statement strings. */ + +static const char *createStatements[] = { + "CREATE TABLE IF NOT EXISTS event_kind (name TEXT," + " description TEXT," + " enum INTEGER PRIMARY KEY)", + + "CREATE TABLE IF NOT EXISTS event_type (name TEXT," + " code INTEGER PRIMARY KEY," + " used INTEGER," + " kind INTEGER," + " FOREIGN KEY (kind) REFERENCES event_kind(enum));", + + "CREATE TABLE IF NOT EXISTS event_param (type INTEGER," + " param_index INTEGER," + " sort TEXT," + " ident TEXT," + " doc TEXT," + " FOREIGN KEY (type) REFERENCES event_type(code));", + + "CREATE TABLE IF NOT EXISTS event_log (name TEXT," + " size INTEGER," + " modtime INTEGER," + " completed INTEGER," + " serial INTEGER PRIMARY KEY AUTOINCREMENT)", + + EVENT_LIST(EVENT_TABLE_CREATE, X) +}; + +/* makeTables makes all the tables. */ + +static void makeTables(sqlite3 *db) +{ + size_t i; + evlog(LOG_SOMETIMES, "Creating tables."); + + for (i=0; i < NELEMS(createStatements); ++i) { + runStatement(db, createStatements[i], "Table creation"); + } +} + +static const char *glueTables[] = { + "event_kind", + "event_type", + "event_param", +}; + +static void dropGlueTables(sqlite3 *db) +{ + size_t i; + int res; + char sql[1024]; + + evlog(LOG_ALWAYS, "Dropping glue tables so they are rebuilt."); + + for (i=0; i < NELEMS(glueTables); ++i) { + evlog(LOG_SOMETIMES, "Dropping table %s", glueTables[i]); + sprintf(sql, "DROP TABLE %s", glueTables[i]); + res = sqlite3_exec(db, + sql, + NULL, /* No callback */ + NULL, /* No callback closure */ + NULL); /* error messages handled by sqlite_error */ + /* Don't check for errors. */ + (void)res; + } +} + +/* Populate the metadata "glue" tables event_kind, event_type, and + * event_param. */ + +#define EVENT_KIND_DO_INSERT(X, name, description) \ + res = sqlite3_bind_text(statement, 1, #name, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_kind bind of name \"" #name "\" failed."); \ + res = sqlite3_bind_text(statement, 2, description, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_kind bind of description \"" description "\" failed."); \ + res = sqlite3_bind_int(statement, 3, i); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_kind bind of enum %d failed.", i); \ + ++i; \ + res = sqlite3_step(statement); \ + if (res != SQLITE_DONE) \ + sqlite_error(res, db, "event_kind insert of name \"" #name "\" failed."); \ + if (sqlite3_changes(db) != 0) \ + evlog(LOG_SOMETIMES, "Insert of event_kind row for \"" #name "\" affected %d rows.", sqlite3_changes(db)); \ + res = sqlite3_reset(statement); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "Couldn't reset event_kind insert statement."); + +#define EVENT_TYPE_DO_INSERT(X, name, code, used, kind) \ + res = sqlite3_bind_text(statement, 1, #name, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of name \"" #name "\" failed."); \ + res = sqlite3_bind_int(statement, 2, code); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of code %d failed.", code); \ + res = sqlite3_bind_int(statement, 3, used); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of used for name \"" #name "\" failed."); \ + res = sqlite3_bind_int(statement, 4, EventKind##kind); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of kind for name \"" #name "\" failed."); \ + res = sqlite3_step(statement); \ + if (res != SQLITE_DONE) \ + sqlite_error(res, db, "event_type insert of name \"" #name "\" failed."); \ + if (sqlite3_changes(db) != 0) \ + evlog(LOG_SOMETIMES, "Insert of event_type row for \"" #name "\" affected %d rows.", sqlite3_changes(db)); \ + res = sqlite3_reset(statement); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "Couldn't reset event_type insert statement."); + +#define EVENT_PARAM_DO_INSERT(code, index, sort, ident, doc) \ + res = sqlite3_bind_int(statement, 1, code); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_param bind of code %d failed.", code); \ + res = sqlite3_bind_int(statement, 2, index); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_param bind of index %d failed.", index); \ + res = sqlite3_bind_text(statement, 3, #sort, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of sort \"" #sort "\" failed."); \ + res = sqlite3_bind_text(statement, 4, #ident, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of ident \"" #ident "\" failed."); \ + res = sqlite3_bind_text(statement, 5, doc, -1, SQLITE_STATIC); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "event_type bind of doc \"" doc "\" failed."); \ + res = sqlite3_step(statement); \ + if (res != SQLITE_DONE) \ + sqlite_error(res, db, "event_param insert of \"" #ident "\" for code %d failed.", code); \ + if (sqlite3_changes(db) != 0) \ + evlog(LOG_SOMETIMES, "Insert of event_param row for code %d, ident \"" #ident "\" affected %d rows.", code, sqlite3_changes(db)); \ + res = sqlite3_reset(statement); \ + if (res != SQLITE_OK) \ + sqlite_error(res, db, "Couldn't reset event_param insert statement."); + +#define EVENT_TYPE_INSERT_PARAMS(X, name, code, used, kind) \ + EVENT_##name##_PARAMS(EVENT_PARAM_DO_INSERT, code) + +static void fillGlueTables(sqlite3 *db) +{ + int i; + sqlite3_stmt *statement; + int res; + + statement = prepareStatement(db, + "INSERT OR IGNORE INTO event_kind (name, description, enum)" + "VALUES (?, ?, ?)"); + + i = 0; + EventKindENUM(EVENT_KIND_DO_INSERT, X); + + finalizeStatement(db, statement); + + statement = prepareStatement(db, + "INSERT OR IGNORE INTO event_type (name, code, used, kind)" + "VALUES (?, ?, ?, ?)"); + EVENT_LIST(EVENT_TYPE_DO_INSERT, X); + + finalizeStatement(db, statement); + + statement = prepareStatement(db, + "INSERT OR IGNORE INTO event_param (type, param_index, sort, ident, doc)" + "VALUES (?, ?, ?, ?, ?)"); + EVENT_LIST(EVENT_TYPE_INSERT_PARAMS, X); + + finalizeStatement(db, statement); +} + +/* Populate the actual event tables. */ + +#define EVENT_TYPE_DECLARE_STATEMENT(X, name, code, used, kind) \ + sqlite3_stmt *stmt_##name; + +#define EVENT_PARAM_PREPARE_IDENT(X, index, sort, ident, doc) "\"" #ident "\", " + +#define EVENT_PARAM_PREPARE_PLACE(X, index, sort, ident, doc) "?, " + +#define EVENT_TYPE_PREPARE_STATEMENT(X, name, code, used, kind) \ + stmt_##name = \ + prepareStatement(db, \ + "INSERT INTO EVENT_" #name " (" \ + EVENT_##name##_PARAMS(EVENT_PARAM_PREPARE_IDENT, X) \ + "log_serial, time) VALUES (" \ + EVENT_##name##_PARAMS(EVENT_PARAM_PREPARE_PLACE,X) \ + "?, ?)"); + +#define EVENT_TYPE_FINALIZE_STATEMENT(X, name, code, used, kind) \ + finalizeStatement(db, stmt_##name); + +#define EVENT_PARAM_BIND_A bind_int +#define EVENT_PARAM_BIND_P bind_int +#define EVENT_PARAM_BIND_U bind_int +#define EVENT_PARAM_BIND_W bind_int +#define EVENT_PARAM_BIND_D bind_real +#define EVENT_PARAM_BIND_S bind_text +#define EVENT_PARAM_BIND_B bind_int + +#define EVENT_PARAM_BIND(X, index, sort, ident, doc) \ + p = EVENT_PARAM_BIND_##sort (db, statement, eventCount, index+1, p); \ + last_index = index+1; + +#define EVENT_TYPE_WRITE_SQL(X, name, code, used, kind) \ + case code: \ + statement = stmt_##name; \ + /* bind all the parameters of this particular event with macro magic. */ \ + EVENT_##name##_PARAMS(EVENT_PARAM_BIND, X) \ + break; + +static char *bind_int(sqlite3 *db, sqlite3_stmt *stmt, int64 count, int field, char *p) +{ + char *q; + long long val; + int res; + + while(*p == ' ') + ++p; + + val = strtoll(p, &q, 16); + if (q == p) + error("event %llu field %d not an integer: %s", + count, field, p); + + res = sqlite3_bind_int64(stmt, field, val); + if (res != SQLITE_OK) + sqlite_error(res, db, "event %llu field %d bind failed", count, field); + return q; +} + +static char *bind_real(sqlite3 *db, sqlite3_stmt *stmt, int64 count, int field, char *p) +{ + char *q; + double val; + int res; + + while(*p == ' ') + ++p; + + val = strtod(p, &q); + if (q == p) + error("event %llu field %d not a floating-point value: %s", + count, field, p); + + res = sqlite3_bind_double(stmt, field, val); + if (res != SQLITE_OK) + sqlite_error(res, db, "event %llu field %d bind failed", count, field); + return q; +} + +static char *bind_text(sqlite3 *db, sqlite3_stmt *stmt, int64 count, int field, char *p) +{ + char *q; + int res; + + while(*p == ' ') + ++p; + + q = p; + while((*q != '\n') && (*q != '\0')) { + ++ q; + } + if ((q == p) || (q[-1] != '"')) + error("event %llu string field %d has no closing quote mark.", + count, field); + + res = sqlite3_bind_text(stmt, field, p, (int)(q-p-1), SQLITE_STATIC); + if (res != SQLITE_OK) + sqlite_error(res, db, "event %llu field %d bind failed", count, field); + return q; +} + +/* this is overkill, at present. */ + +#define MAX_LOG_LINE_LENGTH 1024 + +/* readLog -- read and parse log. Returns the number of events written. + */ + +static int64 readLog(FILE *input, + sqlite3 *db) +{ + int64 eventCount = 0; + + /* declare statements for every event type */ + EVENT_LIST(EVENT_TYPE_DECLARE_STATEMENT, X); + + /* prepare statements for every event type */ + EVENT_LIST(EVENT_TYPE_PREPARE_STATEMENT, X); + + runStatement(db, "BEGIN", "Transaction start"); + + while (TRUE) { /* loop for each event */ + char line[MAX_LOG_LINE_LENGTH]; + char *p; + char *q; + int last_index=0; + sqlite3_stmt *statement = NULL; + int res; + int64 clock_field; + long code; + + p = fgets(line, MAX_LOG_LINE_LENGTH, input); + if (!p) { + if (feof(input)) + break; + else + error("Couldn't read line after event %llu", eventCount); + } + + eventCount++; + + clock_field = strtoll(p, &q, 16); + + if (q == p) + error("event %llu clock field not a hex integer: %s", + eventCount, p); + + if (*q != ' ') + error("event %llu code field not preceded by ' ': %s", + eventCount, q); + while(*q == ' ') + ++q; + + p = q; + code = strtol(p, &q, 16); + if (q == p) + error("event %llu code field not an integer: %s", + eventCount, p); + p = q; + + /* Write event to SQLite. */ + switch (code) { + /* this macro sets statement and last_index */ + EVENT_LIST(EVENT_TYPE_WRITE_SQL, X); + default: + error("Event %llu has Unknown event code %ld", eventCount, code); + } + /* bind the fields we store for every event */ \ + res = sqlite3_bind_int64(statement, last_index+1, logSerial); + if (res != SQLITE_OK) + sqlite_error(res, db, "Event %llu bind of log_serial failed.", eventCount); + res = sqlite3_bind_int64(statement, last_index+2, clock_field); + if (res != SQLITE_OK) + sqlite_error(res, db, "Event %llu bind of clock failed.", eventCount); + res = sqlite3_step(statement); + if (res != SQLITE_DONE) + sqlite_error(res, db, "insert of event %llu failed.", eventCount); + res = sqlite3_reset(statement); + if (res != SQLITE_OK) + sqlite_error(res, db, "Couldn't reset insert statement of event %llu", eventCount); + + if (progress) { + if ((eventCount % SMALL_TICK) == 0) { + printf("."); + fflush(stdout); + if (((eventCount / SMALL_TICK) % BIG_TICK) == 0) { + printf("\n"); + fflush(stdout); + evlog(LOG_SOMETIMES, "%lu events.", (unsigned long)eventCount); + } + } + } + } + if (progress) { + printf("\n"); + fflush(stdout); + } + runStatement(db, "COMMIT", "Transaction finish"); + logFileCompleted(db, eventCount); + + /* finalize all the statements */ + EVENT_LIST(EVENT_TYPE_FINALIZE_STATEMENT, X); + + return eventCount; +} + +/* openLog -- open the log file doors, HAL */ + +static FILE *openLog(sqlite3 *db) +{ + FILE *input; + + registerLogFile(db, logFileName); + if (!logFileName) { + input = stdin; + logFileName = ""; + } else { + input = fopen(logFileName, "r"); + if (input == NULL) + error("unable to open %s", logFileName); + } + + evlog(LOG_OFTEN, "Reading %s.", logFileName ? logFileName : "standard input"); + + return input; +} + +static int64 writeEventsToSQL(sqlite3 *db) +{ + FILE *input; + int64 count; + input = openLog(db); + count = readLog(input, db); + (void)fclose(input); + return count; +} + + +int main(int argc, char *argv[]) +{ + sqlite3 *db; + int64 count; + + parseArgs(argc, argv); + + db = openDatabase(); + if (rebuild) { + dropGlueTables(db); + } + makeTables(db); + fillGlueTables(db); + count = writeEventsToSQL(db); + evlog(LOG_ALWAYS, "Imported %llu events from %s to %s, serial %llu.", + count, logFileName, databaseName, logSerial); + + if (runTests) { + /* TODO: more unit tests in here */ + testTableExists(db); + } + + closeDatabase(db); + return 0; +} + +/* COPYRIGHT AND LICENSE + * + * Copyright (C) 2012-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/eventtxt.c b/mps/code/eventtxt.c new file mode 100644 index 00000000000..3709e577afe --- /dev/null +++ b/mps/code/eventtxt.c @@ -0,0 +1,661 @@ +/* eventtxt.c: event text log to human-friendly format. + * + * $Id$ + * + * Copyright (c) 2012-2020 Ravenbrook Limited. See end of file for license. + * + * This is a command-line tool that converts events from a text-format + * MPS telemetry file into a more human-readable format. + * + * The default MPS library will write a binary-format telemetry file + * which can be converted into a text-format file using the eventcnv + * program (q.v.). + * + * For efficiency, eventcnv writes all event times, codes, and + * parameters (apart from EventFS - strings - and EventFD - + * floating-point) as hexadecimal strings, separated by single spaces. + * For human-readable purposes, we'd prefer a format in which + * parameters are named; event codes are converted to event type + * names; integers are in decimal; booleans are 'True ' or 'False'; + * pointers, addresses, and words are in hex; and labelled addresses + * are shown with their label strings. This program performs that + * conversion. + * + * Options: + * + * -l : Import events from the named logfile. Defaults to + * stdin. + * + * $Id$ + */ + +#include "check.h" +#include "config.h" +#include "eventcom.h" +#include "eventdef.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscmvff.h" +#include "table.h" +#include "testlib.h" /* for ulongest_t and associated print formats */ + +#include +#include +#include +#include /* exit, EXIT_FAILURE, EXIT_SUCCESS */ +#include /* strcpy, strerror, strlen */ + +static const char *prog; /* program name */ +static const char *logFileName = NULL; + +/* everror -- error signalling */ + +ATTRIBUTE_FORMAT((printf, 1, 2)) +static void everror(const char *format, ...) +{ + va_list args; + + (void)fflush(stdout); /* sync */ + (void)fprintf(stderr, "%s: ", prog); + va_start(args, format); + (void)vfprintf(stderr, format, args); + (void)fprintf(stderr, "\n"); + va_end(args); + exit(EXIT_FAILURE); +} + +static void usage(void) +{ + (void)fprintf(stderr, "Usage: %s [-l ]\n", prog); +} + +static void usageError(void) +{ + usage(); + everror("Bad usage"); +} + +/* parseArgs -- parse command line arguments */ + +static void parseArgs(int argc, char *argv[]) +{ + int i = 1; + + if (argc >= 1) + prog = argv[0]; + else + prog = "unknown"; + + while (i < argc) { /* consider argument i */ + if (argv[i][0] == '-') { /* it's an option argument */ + switch (argv[i][1]) { + case 'l': /* log file name */ + ++ i; + if (i == argc) + usageError(); + else + logFileName = argv[i]; + break; + case '?': case 'h': /* help */ + usage(); + exit(EXIT_SUCCESS); + default: + usageError(); + } + } /* if option */ + ++ i; + } +} + +/* table methods for a table of interned strings, and another of + * labelled addresses. */ + +static void *tableAlloc(void *closure, size_t size) +{ + mps_pool_t pool = closure; + mps_addr_t p; + mps_res_t res; + res = mps_alloc(&p, pool, size); + if (res != MPS_RES_OK) + everror("allocation failed: %d", res); + return p; +} + +static void tableFree(void *closure, void *p, size_t size) +{ + mps_pool_t pool = closure; + mps_free(pool, p, size); +} + +/* Printing routines */ + +/* printStr -- print an EventString */ + +static void printStr(const char *str) +{ + size_t i; + + putchar('"'); + for (i = 0; str[i] != '\0'; ++i) { + char c = str[i]; + if (c == '"' || c == '\\') + putchar('\\'); + putchar(c); + } + putchar('"'); +} + + +/* Reading clocks, hex numbers, and doubles, and quoted-and-escaped + * strings. */ + +static EventClock parseClock(char **pInOut) +{ + EventClock val; + int i, l; + unsigned long low, high; + char *p = *pInOut; + + i = sscanf(p, "%08lX%08lX%n", &high, &low, &l); + if (i != 2) + everror("Couldn't read a clock from '%s'", p); + EVENT_CLOCK_MAKE(val, low, high); + + *pInOut = p + l; + return val; +} + +static ulongest_t parseHex(char **pInOut) +{ + ulongest_t val; + int i, l; + char *p = *pInOut; + + i = sscanf(p, "%" SCNXLONGEST "%n", &val, &l); + if (i != 1) + everror("Couldn't read a hex number from '%s'", p); + *pInOut = p + l; + return val; +} + +static double parseDouble(char **pInOut) +{ + double val; + int i, l; + char *p = *pInOut; + + i = sscanf(p, "%lg%n", &val, &l); + if (i != 1) + everror("Couldn't read a float from '%s'", p); + *pInOut = p + l; + return val; +} + +/* parseString checks string syntax (opening and closing quotation + * marks) and takes a copy (stripping escaping backslashes) into a + * static buffer (callers must "use it or lose it"; the next + * invocation will over-write it). Probably not bullet-proof. */ + +#define MAX_STRING_LENGTH 1024 + +static char strBuf[MAX_STRING_LENGTH]; + +static char *parseString(char **pInOut) +{ + char *p = *pInOut; + char *q = strBuf; + while(*p == ' ') + ++p; + + if (*p != '"') + everror("String has no opening quotation mark: '%s'", p); + ++p; + + while(1) { + if (q - strBuf >= MAX_STRING_LENGTH) { + everror("String length exceeds %d", MAX_STRING_LENGTH); + } + if (*p == '\\') { /* escaped character */ + ++p; + if (*p == '\0') + everror("Closing NUL byte escaped by backslash."); + *q++ = *p++; + } else if (*p == '"') { /* end of string */ + *q = '\0'; + ++p; + *pInOut = p; + return strBuf; + } else if (*p == '\0') + everror("Unexpected closing NUL byte."); + else + *q++ = *p++; + } +} + +/* Event logs have interned strings (i.e. they construct a partial + * function from non-negative integer IDs to strings), and can label + * addresses and pointers with intern string IDs (i.e. they construct + * a partial function from address or pointer to string ID). We need + * three tables to keep track of these. */ + +static Table internTable; /* dictionary of intern ids to strings */ + +static Table labelAddrTable; /* dictionary of addrs to intern ids */ + +static Table labelPointerTable; /* dictionary of pointers to intern ids */ + +static void createTables(mps_pool_t pool) +{ + Res res; + /* MPS intern IDs are serials from zero up, so we can use -1 + * and -2 as specials. */ + res = TableCreate(&internTable, + (size_t)1<<4, + tableAlloc, tableFree, pool, + (TableKey)-1, (TableKey)-2); + if (res != ResOK) + everror("Couldn't make intern table."); + + /* We assume that 0 and 1 are invalid as Addrs. */ + res = TableCreate(&labelAddrTable, (size_t)1<<7, + tableAlloc, tableFree, pool, + 0, 1); + if (res != ResOK) + everror("Couldn't make address label table."); + + /* We assume that 0 and 1 are invalid as Pointers. */ + res = TableCreate(&labelPointerTable, (size_t)1<<7, + tableAlloc, tableFree, pool, + 0, 1); + if (res != ResOK) + everror("Couldn't make pointer label table."); +} + +/* recordIntern -- record an interned string in the table. a copy of +* the string from the parsed buffer into a newly-allocated block. */ + +static void recordIntern(mps_pool_t pool, char *p) +{ + ulongest_t stringId; + char *string; + mps_addr_t copy; + size_t len; + Res res; + + stringId = parseHex(&p); + string = parseString(&p); + len = strlen(string); + res = mps_alloc(©, pool, len + 1); + if (res != MPS_RES_OK) + everror("Couldn't allocate space for a string."); + (void)strcpy(copy, string); + res = TableDefine(internTable, (TableKey)stringId, (void *)copy); + if (res != ResOK) + everror("Couldn't create an intern mapping."); +} + +/* Over time there may be multiple labels associated with an address, + * so we keep a list, recording for each label the clock when the + * association was made. This means that printAddr can select the + * label that was in force at the time of the event. + */ + +typedef struct LabelStruct *Label; +typedef struct LabelStruct { + EventClock clock; /* clock of this label */ + ulongest_t id; /* string id of this label */ +} LabelStruct; + +typedef struct LabelListStruct *LabelList; +typedef struct LabelListStruct { + size_t n; /* number of labels in array */ + Label labels; /* labels, sorted in order by clock */ +} LabelListStruct; + +/* labelFind returns the index of the first entry in list with a clock + * value that's greater than 'clock', or list->n if there is no such + * label. The list is assumed to be sorted. + */ + +static size_t labelFind(LabelList list, EventClock clock) +{ + size_t low = 0, high = list->n; + while (low < high) { + size_t mid = (low + high) / 2; + assert(NONNEGATIVE(mid) && mid < list->n); + if (list->labels[mid].clock > clock) { + high = mid; + } else { + low = mid + 1; + } + } + assert(NONNEGATIVE(low) && low <= list->n); + assert(low == list->n || list->labels[low].clock > clock); + return low; +} + +/* recordLabel records a label: an association (made at the time given + * by 'clock') between a client address or an internal pointer and a + * string ID. These are encoded as two hexadecimal numbers in the + * string pointed to by 'p'. + * + * Note that the event log may have been generated on a platform with + * addresses larger than Word on the current platform. If that happens + * then we are scuppered because our Table code uses Word as the key + * type: there's nothing we can do except detect this bad case (see + * also the EventInit handling and warning code). + * + * We can and do handle the case where string IDs (which are Words on + * the MPS platform) are larger than void* on the current platform. + * This is probably in fact the same case, because Word should be the + * same size as void*. In practice, trying to analyse a log from a + * wide platform on a narrow one (e.g. - the only case which is likely + * to occur this decade - from a 64-bit platform on a 32-bit one) is + * probably a bad idea and maybe doomed to failure. + */ + +static void recordLabel(mps_pool_t pool, Table table, EventClock clock, char *p) +{ + ulongest_t address; + LabelList list; + Label newlabels; + mps_addr_t tmp; + size_t pos; + Res res; + + address = parseHex(&p); + if (address > (Word)-1) { + (void)printf("label address too large!"); + return; + } + + if (TableLookup(&tmp, table, (TableKey)address)) { + list = tmp; + } else { + /* First label for this address */ + res = mps_alloc(&tmp, pool, sizeof(LabelListStruct)); + if (res != MPS_RES_OK) + everror("Can't allocate space for a label list"); + list = tmp; + list->n = 0; + res = TableDefine(table, (TableKey)address, list); + if (res != ResOK) + everror("Couldn't create a label mapping."); + } + + res = mps_alloc(&tmp, pool, sizeof(LabelStruct) * (list->n + 1)); + if (res != ResOK) + everror("Couldn't allocate space for list of labels."); + newlabels = tmp; + + pos = labelFind(list, clock); + memcpy(newlabels, list->labels, sizeof(LabelStruct) * pos); + newlabels[pos].clock = clock; + newlabels[pos].id = parseHex(&p); + memcpy(newlabels + pos + 1, list->labels + pos, + sizeof(LabelStruct) * (list->n - pos)); + if (list->n > 0) + mps_free(pool, list->labels, sizeof(LabelStruct) * list->n); + list->labels = newlabels; + ++ list->n; +} + +/* output code */ + +/* hexWordWidth is the number of characters used to output a Word + * value in hexadecimal. Note that what we really care about is the + * width of a Word on the source platform, not here. So when we see + * an EventInit event, we update this variable to the necessary + * width. */ + +static int hexWordWidth = (MPS_WORD_WIDTH+3)/4; + +/* printLabelled -- output a ulongest_t in hex, with the interned + * string if the value is in the table */ + +static void printLabelled(EventClock clock, ulongest_t value, + const char *ident, Table table) +{ + void *tmp; + + printf("%s:%0*" PRIXLONGEST, ident, hexWordWidth, value); + if (table != NULL && TableLookup(&tmp, table, (TableKey)value)) { + LabelList list = tmp; + size_t pos = labelFind(list, clock); + if (pos > 0) { + ulongest_t id = list->labels[pos - 1].id; + putchar('['); + if (TableLookup(&tmp, internTable, (TableKey)id)) + printStr((char *)tmp); + else + printf("unknown label %" PRIXLONGEST, id); + putchar(']'); + } + } + putchar(' '); +} + +/* parameter processing. For each parameter we parse it and then + * print it, preceded by its name and a colon and followed by a + * space. */ + +#define processParamA(ident) \ + val_hex = parseHex(&p); \ + printLabelled(clock, val_hex, #ident, labelAddrTable); + +#define processParamP(ident) \ + val_hex = parseHex(&p); \ + printLabelled(clock, val_hex, #ident, labelPointerTable); + +#define processParamW(ident) \ + val_hex = parseHex(&p); \ + printLabelled(clock, val_hex, #ident, NULL); + +#define processParamU(ident) \ + val_hex = parseHex(&p); \ + printf(#ident ":%" PRIuLONGEST " ", val_hex); + +#define processParamD(ident) \ + val_float = parseDouble(&p); \ + printf(#ident ":%#8.3g ", val_float); + +#define processParamS(ident) \ + val_string = parseString(&p); \ + printf(#ident ":"); \ + printStr(val_string); \ + putchar(' '); + +#define processParamB(ident) \ + val_hex = parseHex(&p); \ + printf(#ident ":%s ", val_hex ? "True" : "False"); + +#define EVENT_PROCESS_PARAM(X, index, sort, ident, doc) \ + processParam##sort(ident); + +#define EVENT_PROCESS(X, name, code, used, kind) \ + case code: \ + EVENT_##name##_PARAMS(EVENT_PROCESS_PARAM, X) \ + break; + +/* a table of the event names */ + +static const char *eventName[EventCodeMAX+EventCodeMAX]; + +#define EVENT_SET_NAME(X, name, code, used, kind) \ + eventName[code] = #name; + +/* this is overkill, at present. */ + +#define MAX_LOG_LINE_LENGTH 1024 + +/* readLog -- read and parse log. Returns the number of events written. */ + +static void readLog(mps_pool_t pool, FILE *input) +{ + int i; + + for (i=0; i <= EventCodeMAX; ++i) + eventName[i] = NULL; + + EVENT_LIST(EVENT_SET_NAME, X); + + while (TRUE) { /* loop for each event */ + char line[MAX_LOG_LINE_LENGTH]; + char *p, *q; + EventClock clock; + int code; + ulongest_t val_hex; + double val_float; + const char *val_string; + + p = fgets(line, MAX_LOG_LINE_LENGTH, input); + if (!p) { + if (feof(input)) + break; + else + everror("Couldn't read line from input."); + } + + clock = parseClock(&p); + EVENT_CLOCK_PRINT(stdout, clock); + + code = (int)parseHex(&p); + printf(" %04X ", code); + if (eventName[code]) + printf("%-19s ", eventName[code]); + else + printf("%-19s ", "[Unknown]"); + + q = p; + + /* for a few particular codes, we do local processing. */ + if (code == EventInternCode) { + recordIntern(pool, q); + } else if (code == EventLabelCode) { + recordLabel(pool, labelAddrTable, clock, q); + } else if (code == EventLabelPointerCode) { + recordLabel(pool, labelPointerTable, clock, q); + } else if (code == EventEventInitCode) { + ulongest_t major, median, minor, maxCode, maxNameLen, wordWidth, clocksPerSec; + major = parseHex(&q); /* EVENT_VERSION_MAJOR */ + median = parseHex(&q); /* EVENT_VERSION_MEDIAN */ + minor = parseHex(&q); /* EVENT_VERSION_MINOR */ + maxCode = parseHex(&q); /* EventCodeMAX */ + maxNameLen = parseHex(&q); /* EventNameMAX */ + wordWidth = parseHex(&q); /* MPS_WORD_WIDTH */ + clocksPerSec = parseHex(&q); /* mps_clocks_per_sec() */ + UNUSED(clocksPerSec); + UNUSED(maxNameLen); + + if ((major != EVENT_VERSION_MAJOR) || + (median != EVENT_VERSION_MEDIAN) || + (minor != EVENT_VERSION_MINOR)) { + (void)fprintf(stderr, "Event log version does not match: " + "%d.%d.%d vs %d.%d.%d\n", + (int)major, (int)median, (int)minor, + EVENT_VERSION_MAJOR, + EVENT_VERSION_MEDIAN, + EVENT_VERSION_MINOR); + } + + if (maxCode > EventCodeMAX) { + (void)fprintf(stderr, "Event log may contain unknown events " + "with codes from %d to %d\n", + EventCodeMAX+1, (int)maxCode); + } + + if (wordWidth > MPS_WORD_WIDTH) { + int newHexWordWidth = (int)((wordWidth + 3) / 4); + if (newHexWordWidth > hexWordWidth) { + (void)fprintf(stderr, + "Event log word width is greater than on current " + "platform; previous values may be printed too " + "narrowly.\n"); + } + hexWordWidth = newHexWordWidth; + } + + if (wordWidth > sizeof(ulongest_t) * CHAR_BIT) { + everror("Event log word width %d is too wide for the current platform.", + (int)wordWidth); + } + } + + switch(code) { + EVENT_LIST(EVENT_PROCESS, X); + default: + printf("Unknown event."); + } + putchar('\n'); + + } +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_pool_t pool; + mps_res_t res; + FILE *input; + + parseArgs(argc, argv); + if (!logFileName) { + input = stdin; + logFileName = ""; + } else { + input = fopen(logFileName, "r"); + if (input == NULL) + everror("unable to open %s", logFileName); + } + + /* Ensure no telemetry output. */ + res = setenv("MPS_TELEMETRY_CONTROL", "0", 1); + if (res != 0) + everror("failed to set MPS_TELEMETRY_CONTROL: %s", strerror(errno)); + + res = mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none); + if (res != MPS_RES_OK) + everror("failed to create arena: %d", res); + + res = mps_pool_create_k(&pool, arena, mps_class_mvff(), mps_args_none); + if (res != MPS_RES_OK) + everror("failed to create pool: %d", res); + + createTables(pool); + readLog(pool, input); + + mps_pool_destroy(pool); + mps_arena_destroy(arena); + + (void)fclose(input); + return 0; +} + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2012-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/extcon.c b/mps/code/extcon.c new file mode 100644 index 00000000000..4bd04a14b22 --- /dev/null +++ b/mps/code/extcon.c @@ -0,0 +1,306 @@ +/* extcon.c: ARENA EXTENDED AND CONTRACTED CALLBACK TEST + * + * $Id$ + * Copyright (c) 2022-2023 Ravenbrook Limited. See end of file for license. + * + * .overview: This test case allocates a bunch of large objects, of a size + * similar to the size of the arena, to force the arena to extend. It then + * discards the base pointers to those objects, and forces a collection. + * + * .limitations: This test checks that the EXTENDED and CONTRACTED + * callbacks were called at least once, and that they are called the + * same number of times. It does not check that the extensions and + * contractions themselves were performed correctly, nor does it check + * that an appropriate number of extensions and contractions took + * place, nor does it check that they took place at sensible times. + * + * .dylan: This test uses Dylan format objects in common with most + * other tests for convenience and brevity. + */ + +#include "mps.h" +#include "testlib.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include +#include + +/* Number of test objects to allocate */ +#define N_TESTOBJ 100 +/* The number of slots determines the size of each object */ +#define N_SLOT_TESTOBJ 10000 +/* The initial arena size is requested to be bigger the test object by + this many bytes */ +#define SIZEDIFF 10 + +/* Set alignment to mps_word_ts */ +#define ALIGNMENT sizeof(mps_word_t) + +/* Align size upwards to the next multiple of the word size. */ +#define ALIGN_WORD(size) \ + (((size) + ALIGNMENT - 1) & ~(ALIGNMENT - 1)) + +/* Global objects*/ +static mps_arena_t arena; /* the arena */ +static mps_pool_t obj_pool; /* pool for test objects */ +static mps_ap_t obj_ap; /* allocation point used to allocate objects */ + +/* Count of number of arena contractions and extensions */ +static int n_contract = 0; +static int n_extend = 0; + +/* Callback functions for arena extension and contraction */ +static void arena_extended_cb(mps_arena_t arena_in, mps_addr_t addr, size_t size) +{ + testlib_unused(arena_in); + testlib_unused(addr); + testlib_unused(size); + printf("Arena extended by %"PRIuLONGEST" bytes\n", (ulongest_t)size); + n_extend++; +} + +static void arena_contracted_cb(mps_arena_t arena_in, mps_addr_t addr, size_t size) +{ + testlib_unused(arena_in); + testlib_unused(addr); + testlib_unused(size); + printf("Arena contracted by %"PRIuLONGEST" bytes\n", (ulongest_t)size); + n_contract++; +} + +/* Messages for testbench debugging */ +static void print_messages(void) +{ + mps_message_type_t type; + + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + + cdie(mps_message_get(&message, arena, type), + "get"); + + switch(type) { + case mps_message_type_gc_start(): + printf("GC start at %"PRIuLONGEST": %s\n", + (ulongest_t)mps_message_clock(arena, message), + mps_message_gc_start_why(arena, message)); + break; + + case mps_message_type_gc(): + printf("GC end at %"PRIuLONGEST" " + "condemned %"PRIuLONGEST" " + "not condemned %"PRIuLONGEST" " + "live %"PRIuLONGEST"\n", + (ulongest_t)mps_message_clock(arena, message), + (ulongest_t)mps_message_gc_condemned_size(arena, message), + (ulongest_t)mps_message_gc_not_condemned_size(arena, message), + (ulongest_t)mps_message_gc_live_size(arena, message)); + break; + + default: + cdie(0, "message type"); + break; + } + + mps_message_discard(arena, message); + } +} + +/* Disabling inlining is necessary (but perhaps not sufficient) if using stack roots. + See comment below with link to GitHub issue*/ +ATTRIBUTE_NOINLINE +static void test_main(void *cold_stack_end) +{ + mps_fmt_t obj_fmt; + mps_thr_t thread; + mps_root_t stack_root, testobj_root; + size_t arena_size, obj_size; + int i; + /* In the original version of extcon this was a stack root, but we + observed unreliable failures to do with registering the cold end + of the stack. See GitHub issue #210 + . For now, we + declare this as a separate root. */ + static mps_word_t testobj[N_TESTOBJ]; + + /* The testobj array must be below (on all current Posix platforms) + the cold end of the stack in order for the MPS to scan it. We + have observed a Heisenbug where GCC will inline test_main into + main and lose this condition if the expression below is removed. + This is a problem we are analysing in GitHub issue #210 + . For now, we + disable this Insist to allow the test to run with a static + testobj array. */ +#if 0 + Insist((void *)&testobj[N_TESTOBJ] <= cold_stack_end); + if ((void *)&testobj[N_TESTOBJ] > cold_stack_end) + printf("Cold stack marker invalid!\n"); + else + printf("Cold stack marker probably valid.\n"); +#endif + + /* Make initial arena size slightly bigger than the test object size to force an extension as early as possible */ + /* See definition of make_dylan_vector() in fmtdytst.c for calculation of vector size */ + obj_size = ALIGN_WORD((N_SLOT_TESTOBJ + 2) * sizeof(mps_word_t)); + arena_size = ALIGN_WORD(obj_size + SIZEDIFF); + + /* Create arena and register callbacks */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arena_size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_EXTENDED, (mps_fun_t)&arena_extended_cb); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_CONTRACTED, (mps_fun_t)&arena_contracted_cb); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "mps_arena_create_k"); + } MPS_ARGS_END(args); + + printf("Initial reservation %"PRIuLONGEST".\n", (ulongest_t)mps_arena_reserved(arena)); + + die(dylan_fmt(&obj_fmt, arena), "dylan_fmt()"); + + /* Create new pool */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, obj_fmt); + die(mps_pool_create_k(&obj_pool, arena, mps_class_amcz(), args), + "mps_pool_create_k"); + } MPS_ARGS_END(args); + + /* Register thread */ + die(mps_thread_reg(&thread, arena), "Thread reg"); + + /* Register stack roots */ + /* Since this testbench is currently not using a stack root, #IF 0 this out */ + testlib_unused(cold_stack_end); + testlib_unused(stack_root); +#if 0 + die(mps_root_create_thread(&stack_root, arena, thread, cold_stack_end), "Create Stack root"); +#endif + + /* Register ambiguous array of object roots. */ + die(mps_root_create_area(&testobj_root, arena, + mps_rank_ambig(), (mps_rm_t)0, + &testobj[0], &testobj[N_TESTOBJ], + mps_scan_area, NULL), + "root_create_area(testobj)"); + + /* Create allocation point */ + die(mps_ap_create_k(&obj_ap, obj_pool, mps_args_none), "Create Allocation point"); + + mps_message_type_enable(arena, mps_message_type_gc_start()); + mps_message_type_enable(arena, mps_message_type_gc()); + + /* Allocate objects and force arena extension */ + for (i = 0; i < N_TESTOBJ; i++) { + + die(make_dylan_vector(&testobj[i], obj_ap, N_SLOT_TESTOBJ), "make_dylan_vector"); + + printf("Object %d committed. " + "Arena reserved: %"PRIuLONGEST".\n", + i, + (ulongest_t)mps_arena_reserved(arena)); + + print_messages(); + } + + /* overwrite all the references to the objects*/ + for (i = 0; i < N_TESTOBJ; i++) { + + /* bonus test of mps_addr_object */ +#if 0 /* Comment this out until mps_addr_object becomes available. */ + mps_addr_t out; + Insist(N_TESTOBJ <= N_INT_TESTOBJ); + + /* use "i" to as a convenient way to generate different interior pointers + To guarantee the i index will give us an interior pointer the number of test + objects must be <= the number of integers in each object */ + Insist(N_TESTOBJ <= N_INT_TESTOBJ); + die(mps_addr_object(&out, arena, &(testobj[i])->int_array[i]), "Address object"); + + Insist(out == testobj[i]); + + /* end piggy back testbench */ +#endif + + /* now overwrite the ref */ + testobj[i] = (mps_word_t)NULL; + + print_messages(); + } + + /* Collect */ + mps_arena_collect(arena); + + print_messages(); + + /* Clean up */ + mps_root_destroy(testobj_root); + /* mps_root_destroy(stack_root);*/ /*commented out while not using stack root */ + mps_thread_dereg(thread); + mps_ap_destroy(obj_ap); + mps_pool_destroy(obj_pool); + mps_fmt_destroy(obj_fmt); + mps_arena_destroy(arena); + + /* Destroying the arena should cause contraction callbacks on all + remaining chunks, even if they had contents. */ + Insist(n_extend == n_contract); + + printf("Arena extended %d times\n", n_extend); + printf("Arena contracted %d times\n", n_contract); + + /* comment out some diagnostics for investigating issue #210 mentioned above */ +#if 0 + printf("&testobj[N_TESTOBJ] = %p\n", (void *)&testobj[N_TESTOBJ]); + printf("cold_stack_end = %p\n", cold_stack_end); +#endif + if (n_extend == 0) + printf("No callbacks received upon arena extended!\n"); + if (n_contract == 0) + printf("No callbacks received upon arena contracted!\n"); + + if (n_contract == 0 || n_extend == 0) + exit(EXIT_FAILURE); +} + +int main(int argc, char* argv[]) +{ + void *stack_marker = &stack_marker; + + testlib_init(argc, argv); + + test_main(stack_marker); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2022-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/failover.c b/mps/code/failover.c new file mode 100644 index 00000000000..f6f213d213f --- /dev/null +++ b/mps/code/failover.c @@ -0,0 +1,369 @@ +/* failover.c: FAILOVER IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .design: + * + * .critical: In manual-allocation-bound programs using MVFF, many of + * these functions are on the critical paths via mps_alloc (and then + * PoolAlloc, MVFFAlloc, failoverFind*) and mps_free (and then + * MVFFFree, failoverInsert). + */ + +#include "failover.h" +#include "mpm.h" +#include "range.h" + +SRCID(failover, "$Id$"); + + +ARG_DEFINE_KEY(failover_primary, Pointer); +ARG_DEFINE_KEY(failover_secondary, Pointer); + + +Bool FailoverCheck(Failover fo) +{ + CHECKS(Failover, fo); + CHECKD(Land, &fo->landStruct); + CHECKD(Land, fo->primary); + CHECKD(Land, fo->secondary); + return TRUE; +} + + +static Res failoverInit(Land land, Arena arena, Align alignment, ArgList args) +{ + Failover fo; + ArgStruct arg; + Res res; + + AVER(land != NULL); + res = NextMethod(Land, Failover, init)(land, arena, alignment, args); + if (res != ResOK) + return res; + fo = CouldBeA(Failover, land); + + ArgRequire(&arg, args, FailoverPrimary); + fo->primary = arg.val.p; + ArgRequire(&arg, args, FailoverSecondary); + fo->secondary = arg.val.p; + + SetClassOfPoly(land, CLASS(Failover)); + fo->sig = FailoverSig; + AVERC(Failover, fo); + + return ResOK; +} + + +static void failoverFinish(Inst inst) +{ + Land land = MustBeA(Land, inst); + Failover fo = MustBeA(Failover, land); + fo->sig = SigInvalid; + NextMethod(Inst, Failover, finish)(inst); +} + + +static Size failoverSize(Land land) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + return LandSize(fo->primary) + LandSize(fo->secondary); +} + + +static Res failoverInsert(Range rangeReturn, Land land, Range range) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + Res res; + + AVER_CRITICAL(rangeReturn != NULL); + AVERT_CRITICAL(Range, range); + + /* Provide more opportunities for coalescence. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandInsert(rangeReturn, fo->primary, range); + if (res != ResOK && res != ResFAIL) + res = LandInsert(rangeReturn, fo->secondary, range); + + return res; +} + + +static Res failoverInsertSteal(Range rangeReturn, Land land, Range rangeIO) +{ + Failover fo = MustBeA(Failover, land); + Res res; + + AVER(rangeReturn != NULL); + AVER(rangeReturn != rangeIO); + AVERT(Range, rangeIO); + + /* Provide more opportunities for coalescence. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandInsertSteal(rangeReturn, fo->primary, rangeIO); + AVER(res == ResOK || res == ResFAIL); + return res; +} + + +static Res failoverDelete(Range rangeReturn, Land land, Range range) +{ + Failover fo = MustBeA(Failover, land); + Res res; + RangeStruct oldRange, dummyRange, left, right; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + + /* Prefer efficient search in the primary. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandDelete(&oldRange, fo->primary, range); + + if (res == ResFAIL) { + /* Range not found in primary: try secondary. */ + return LandDelete(rangeReturn, fo->secondary, range); + } else if (res != ResOK) { + /* Range was found in primary, but couldn't be deleted. The only + * case we expect to encounter here is the case where the primary + * is out of memory. (In particular, we don't handle the case of a + * CBS returning ResLIMIT because its block pool has been + * configured not to automatically extend itself.) + */ + AVER(ResIsAllocFailure(res)); + + /* Delete the whole of oldRange, and re-insert the fragments + * (which might end up in the secondary). See + * . + */ + res = LandDelete(&dummyRange, fo->primary, &oldRange); + if (res != ResOK) + return res; + + AVER(RangesEqual(&oldRange, &dummyRange)); + RangeInit(&left, RangeBase(&oldRange), RangeBase(range)); + if (!RangeIsEmpty(&left)) { + /* Don't call LandInsert(..., land, ...) here: that would be + * re-entrant and fail the landEnter check. */ + res = LandInsert(&dummyRange, fo->primary, &left); + if (res != ResOK) { + /* The range was successful deleted from the primary above. */ + AVER(res != ResFAIL); + res = LandInsert(&dummyRange, fo->secondary, &left); + AVER(res == ResOK); + } + } + RangeInit(&right, RangeLimit(range), RangeLimit(&oldRange)); + if (!RangeIsEmpty(&right)) { + res = LandInsert(&dummyRange, fo->primary, &right); + if (res != ResOK) { + /* The range was successful deleted from the primary above. */ + AVER(res != ResFAIL); + res = LandInsert(&dummyRange, fo->secondary, &right); + AVER(res == ResOK); + } + } + } + if (res == ResOK) { + AVER_CRITICAL(RangesNest(&oldRange, range)); + RangeCopy(rangeReturn, &oldRange); + } + return res; +} + + +static Res failoverDeleteSteal(Range rangeReturn, Land land, Range range) +{ + Failover fo = MustBeA(Failover, land); + Res res; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + + /* Prefer efficient search in the primary. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandDeleteSteal(rangeReturn, fo->primary, range); + if (res == ResFAIL) + /* Not found in primary: try secondary. */ + res = LandDeleteSteal(rangeReturn, fo->secondary, range); + AVER(res == ResOK || res == ResFAIL); + return res; +} + + +static Bool failoverIterate(Land land, LandVisitor visitor, void *closure) +{ + Failover fo = MustBeA(Failover, land); + + AVER(visitor != NULL); + + return LandIterate(fo->primary, visitor, closure) + && LandIterate(fo->secondary, visitor, closure); +} + + +static Bool failoverFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVERT_CRITICAL(FindDelete, findDelete); + + /* . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindFirst(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindFirst(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVERT_CRITICAL(FindDelete, findDelete); + + /* . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindLast(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindLast(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + AVERT_CRITICAL(FindDelete, findDelete); + + /* . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindLargest(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindLargest(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + Failover fo = MustBeA_CRITICAL(Failover, land); + Bool found = FALSE; + Res res; + + AVER_CRITICAL(FALSE); /* TODO: this code is completely untested! */ + AVER_CRITICAL(foundReturn != NULL); + AVER_CRITICAL(rangeReturn != NULL); + AVER_CRITICAL(oldRangeReturn != NULL); + /* AVERT_CRITICAL(ZoneSet, zoneSet); */ + AVERT_CRITICAL(Bool, high); + + /* . */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandFindInZones(&found, rangeReturn, oldRangeReturn, fo->primary, size, zoneSet, high); + if (res != ResOK || !found) + res = LandFindInZones(&found, rangeReturn, oldRangeReturn, fo->secondary, size, zoneSet, high); + + *foundReturn = found; + return res; +} + + +static Res failoverDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Land land = CouldBeA(Land, inst); + Failover fo = CouldBeA(Failover, land); + LandClass primaryClass, secondaryClass; + Res res; + + if (!TESTC(Failover, fo)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, Failover, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + primaryClass = ClassOfPoly(Land, fo->primary); + secondaryClass = ClassOfPoly(Land, fo->secondary); + + return WriteF(stream, depth + 2, + "primary = $P ($S)\n", + (WriteFP)fo->primary, + (WriteFS)ClassName(primaryClass), + "secondary = $P ($S)\n", + (WriteFP)fo->secondary, + (WriteFS)ClassName(secondaryClass), + NULL); +} + + +DEFINE_CLASS(Land, Failover, klass) +{ + INHERIT_CLASS(klass, Failover, Land); + klass->instClassStruct.describe = failoverDescribe; + klass->instClassStruct.finish = failoverFinish; + klass->size = sizeof(FailoverStruct); + klass->init = failoverInit; + klass->sizeMethod = failoverSize; + klass->insert = failoverInsert; + klass->insertSteal = failoverInsertSteal; + klass->delete = failoverDelete; + klass->deleteSteal = failoverDeleteSteal; + klass->iterate = failoverIterate; + klass->findFirst = failoverFindFirst; + klass->findLast = failoverFindLast; + klass->findLargest = failoverFindLargest; + klass->findInZones = failoverFindInZones; + AVERT(LandClass, klass); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/failover.h b/mps/code/failover.h new file mode 100644 index 00000000000..914af793344 --- /dev/null +++ b/mps/code/failover.h @@ -0,0 +1,61 @@ +/* failover.h: FAILOVER ALLOCATOR INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef failover_h +#define failover_h + +#include "mpmtypes.h" +#include "mpm.h" +#include "protocol.h" + +typedef struct FailoverStruct *Failover; + +#define FailoverLand(fo) (&(fo)->landStruct) + +extern Bool FailoverCheck(Failover failover); + +DECLARE_CLASS(Land, Failover, Land); + +extern const struct mps_key_s _mps_key_failover_primary; +#define FailoverPrimary (&_mps_key_failover_primary) +#define FailoverPrimary_FIELD p +extern const struct mps_key_s _mps_key_failover_secondary; +#define FailoverSecondary (&_mps_key_failover_secondary) +#define FailoverSecondary_FIELD p + +#endif /* failover.h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/finalcv.c b/mps/code/finalcv.c new file mode 100644 index 00000000000..810e18020ce --- /dev/null +++ b/mps/code/finalcv.c @@ -0,0 +1,278 @@ +/* finalcv.c: FINALIZATION COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * DESIGN + * + * . + * + * DEPENDENCIES + * + * This test uses the dylan object format, but the reliance on this + * particular format is not great and could be removed. + * + * NOTES + * + * This code was created by first copying + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpscams.h" +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpslib.h" +#include "mpstd.h" +#include "testlib.h" + +#include /* printf */ + + +#define testArenaSIZE ((size_t)16<<20) +#define rootCOUNT 20 +#define churnFACTOR 10 +#define finalizationRATE 6 +#define gcINTERVAL ((size_t)150 * 1024) +#define collectionCOUNT 3 +#define finalizationCOUNT 3 + +/* 3 words: wrapper | vector-len | first-slot */ +#define vectorSIZE (3*sizeof(mps_word_t)) +#define vectorSLOT 2 + +#define genCOUNT 2 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + { 150, 0.85 }, { 170, 0.45 } }; + + +/* tags an integer according to dylan format */ +static mps_word_t dylan_int(mps_word_t x) +{ + return (x << 2)|1; +} + + +/* converts a dylan format int to an int (untags) */ +static mps_word_t dylan_int_int(mps_word_t x) +{ + return x >> 2; +} + + +/* note: static, so auto-initialised to NULL */ +static void *root[rootCOUNT]; + + +/* churn -- allocate a lot of stuff (unreachable garbage, so it will */ +/* probably only ever cause a minor collection). */ +static void churn(mps_ap_t ap) +{ + int i; + mps_addr_t p; + mps_res_t e; + + for (i = 0; i < churnFACTOR; ++i) { + do { + MPS_RESERVE_BLOCK(e, p, ap, 4096); + die(e, "MPS_RESERVE_BLOCK"); + die(dylan_init(p, 4096, root, 1), "dylan_init"); + } while (!mps_commit(ap, p, 4096)); + } + p = NULL; +} + + +enum { + rootSTATE, + deadSTATE, + finalizableSTATE, + finalizedSTATE +}; + + +static void test(mps_arena_t arena, mps_pool_class_t pool_class) +{ + size_t i; /* index */ + mps_ap_t ap; + mps_fmt_t fmt; + mps_chain_t chain; + mps_pool_t pool; + mps_res_t e; + mps_root_t mps_root[2]; + mps_addr_t nullref = NULL; + int state[rootCOUNT]; + size_t finalizations = 0; + size_t collections = 0; + void *p; + + printf("---- finalcv: pool class %s ----\n", ClassName(pool_class)); + + die(mps_fmt_create_A(&fmt, arena, dylan_fmt_A()), "fmt_create\n"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, fmt); + die(mps_pool_create_k(&pool, arena, pool_class, args), "pool_create\n"); + } MPS_ARGS_END(args); + die(mps_root_create_table(&mps_root[0], arena, mps_rank_exact(), (mps_rm_t)0, + root, (size_t)rootCOUNT), + "root_create\n"); + die(mps_root_create_table(&mps_root[1], arena, mps_rank_exact(), (mps_rm_t)0, + &p, (size_t)1), + "root_create\n"); + die(mps_ap_create(&ap, pool, mps_rank_exact()), "ap_create\n"); + + /* Make registered-for-finalization objects. */ + /* */ + for(i = 0; i < rootCOUNT; ++i) { + do { + MPS_RESERVE_BLOCK(e, p, ap, vectorSIZE); + die(e, "MPS_RES_OK"); + die(dylan_init(p, vectorSIZE, &nullref, 1), "dylan_init"); + } while (!mps_commit(ap, p, vectorSIZE)); + + /* store index in vector's slot */ + ((mps_word_t *)p)[vectorSLOT] = dylan_int(i); + + /* mps_definalize fails when there have been no calls to mps_finalize + yet, or for an address that was not registered for finalization. */ + Insist(mps_definalize(arena, &p) == MPS_RES_FAIL); + + die(mps_finalize(arena, &p), "finalize\n"); + root[i] = p; state[i] = rootSTATE; + } + p = NULL; + + mps_message_type_enable(arena, mps_message_type_finalization()); + mps_message_type_enable(arena, mps_message_type_gc()); + + /* */ + while (finalizations < finalizationCOUNT && collections < collectionCOUNT) { + mps_message_type_t type; + + /* Perhaps cause (minor) collection */ + churn(ap); + + /* Maybe make some objects ready-to-finalize */ + /* */ + for (i = 0; i < rootCOUNT; ++i) { + if (root[i] != NULL && state[i] == rootSTATE) { + if (rnd() % finalizationRATE == 0) { + /* for this object, either... */ + if (rnd() % 2 == 0) { + /* ...definalize it, or */ + die(mps_definalize(arena, &root[i]), "definalize\n"); + state[i] = deadSTATE; + } else { + /* ...expect it to be finalized soon */ + state[i] = finalizableSTATE; + } + /* Drop the root reference to it; this makes it */ + /* non-E-reachable: so either dead, or ready-to-finalize. */ + root[i] = NULL; + } + } + } + + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + cdie(mps_message_get(&message, arena, type), "message_get"); + if (type == mps_message_type_finalization()) { + /* Check finalized object, and perhaps resurrect it. */ + mps_word_t *obj; + mps_word_t objind; + mps_addr_t objaddr; + + /* */ + cdie(0 == mps_message_clock(arena, message), + "message clock should be 0 (unset) for finalization messages"); + mps_message_finalization_ref(&objaddr, arena, message); + obj = objaddr; + objind = dylan_int_int(obj[vectorSLOT]); + printf("Finalizing: object %"PRIuLONGEST" at %p\n", + (ulongest_t)objind, objaddr); + /* */ + cdie(root[objind] == NULL, "finalized live"); + cdie(state[objind] == finalizableSTATE, "finalized dead"); + state[objind] = finalizedSTATE; + /* sometimes resurrect */ + if (rnd() % 2 == 0) + root[objind] = objaddr; + ++ finalizations; + } else if (type == mps_message_type_gc()) { + ++ collections; + } else { + error("Unexpected message type %lu.", (unsigned long)type); + } + mps_message_discard(arena, message); + } + } + + mps_ap_destroy(ap); + mps_root_destroy(mps_root[1]); + mps_root_destroy(mps_root[0]); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(fmt); +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create\n"); + + test(arena, mps_class_amc()); + test(arena, mps_class_amcz()); + test(arena, mps_class_awl()); + test(arena, mps_class_ams()); + test(arena, mps_class_lo()); + + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/finaltest.c b/mps/code/finaltest.c new file mode 100644 index 00000000000..d262f9a06ab --- /dev/null +++ b/mps/code/finaltest.c @@ -0,0 +1,350 @@ +/* finaltest.c: LARGE-SCALE FINALIZATION TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * DESIGN + * + * .mode: This test has two modes. + * + * .mode.park: In this mode, we use the arena's default generation + * chain, leave the arena parked and call mps_arena_collect. This + * tests that the default generation chain works and that all segments + * get condemned via TraceStartCollectAll. (See job003771 item 4.) + * + * .mode.poll: In this mode, we use our own generation chain (with + * small generations), allocate into generation 1, unclamp the arena, + * and provoke collection by allocating. This tests that custom + * generation chains work, and that segments get condemned via + * TracePoll even if there is no allocation into generation 0 of the + * chain. (See job003771 item 5.) + * + * DEPENDENCIES + * + * This test uses the dylan object format, but the reliance on this + * particular format is not great and could be removed. + * + * NOTES + * + * This code was created by first copying + */ + +#include "mpm.h" +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" +#include "mpscamc.h" +#include "mpscams.h" +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpsavm.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpstd.h" + +#include /* HUGE_VAL */ +#include /* fflush, printf, stdout */ + +enum { + ModePARK, /* .mode.park */ + ModePOLL /* .mode.poll */ +}; + + +#define testArenaSIZE ((size_t)16<<20) +#define rootCOUNT 20 +#define maxtreeDEPTH 9 +#define collectionCOUNT 10 + + +/* global object counter */ + +static mps_word_t object_count = 0; + +static mps_word_t make_numbered_cons(mps_word_t car, mps_word_t cdr, + mps_ap_t ap) +{ + mps_word_t cons; + die(make_dylan_vector(&cons, ap, 3), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(cons, 0) = car; + DYLAN_VECTOR_SLOT(cons, 1) = cdr; + DYLAN_VECTOR_SLOT(cons, 2) = DYLAN_INT(object_count); + ++ object_count; + return cons; +} + +static mps_word_t make_numbered_tree(mps_word_t depth, + mps_ap_t ap) +{ + mps_word_t left, right; + if (depth < 2) { + left = DYLAN_INT(object_count); + right = DYLAN_INT(object_count); + } else { + left = make_numbered_tree(depth-1, ap); + right = make_numbered_tree(depth-1, ap); + } + return make_numbered_cons(left, right, ap); +} + +static void register_numbered_tree(mps_word_t tree, mps_arena_t arena) +{ + /* don't finalize ints */ + if ((tree & 1) == 0) { + mps_addr_t tree_ref = (mps_addr_t)tree; + die(mps_finalize(arena, &tree_ref), "mps_finalize"); + register_numbered_tree(DYLAN_VECTOR_SLOT(tree, 0), arena); + register_numbered_tree(DYLAN_VECTOR_SLOT(tree, 1), arena); + } +} + +static mps_word_t make_indirect_cons(mps_word_t car, mps_word_t cdr, + mps_ap_t ap) +{ + mps_word_t cons, indirect; + die(make_dylan_vector(&indirect, ap, 1), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(indirect, 0) = DYLAN_INT(object_count); + die(make_dylan_vector(&cons, ap, 3), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(cons, 0) = car; + DYLAN_VECTOR_SLOT(cons, 1) = cdr; + DYLAN_VECTOR_SLOT(cons, 2) = indirect; + ++ object_count; + return cons; +} + +static mps_word_t make_indirect_tree(mps_word_t depth, + mps_ap_t ap) +{ + mps_word_t left, right; + if (depth < 2) { + left = DYLAN_INT(object_count); + right = DYLAN_INT(object_count); + } else { + left = make_indirect_tree(depth-1, ap); + right = make_indirect_tree(depth-1, ap); + } + return make_indirect_cons(left, right, ap); +} + +static void register_indirect_tree(mps_word_t tree, mps_arena_t arena) +{ + /* don't finalize ints */ + if ((tree & 1) == 0) { + mps_word_t indirect = DYLAN_VECTOR_SLOT(tree,2); + mps_addr_t indirect_ref = (mps_addr_t)indirect; + die(mps_finalize(arena, &indirect_ref), "mps_finalize"); + register_indirect_tree(DYLAN_VECTOR_SLOT(tree, 0), arena); + register_indirect_tree(DYLAN_VECTOR_SLOT(tree, 1), arena); + } +} + +static void *root[rootCOUNT]; + +static void test_trees(int mode, const char *name, mps_arena_t arena, + mps_pool_t pool, mps_ap_t ap, + mps_word_t (*make)(mps_word_t, mps_ap_t), + void (*reg)(mps_word_t, mps_arena_t)) +{ + size_t collections = 0; + size_t finals = 0; + size_t i; + int object_alloc; + PoolClass klass = ClassOfPoly(Pool, pool); + + object_count = 0; + + printf("---- Mode %s, pool class %s, %s trees ----\n", + mode == ModePARK ? "PARK" : "POLL", + ClassName(klass), name); + mps_arena_park(arena); + mps_message_type_enable(arena, mps_message_type_gc()); + + /* make some trees */ + for(i = 0; i < rootCOUNT; ++i) { + root[i] = (void *)(*make)(maxtreeDEPTH, ap); + (*reg)((mps_word_t)root[i], arena); + } + + /* clean out the roots */ + for(i = 0; i < rootCOUNT; ++i) { + root[i] = 0; + } + + while (finals < object_count && collections < collectionCOUNT) { + mps_message_type_t type; + mps_word_t final_this_time = 0; + switch (mode) { + default: + case ModePARK: + printf("Collecting..."); + (void)fflush(stdout); + die(mps_arena_collect(arena), "collect"); + printf(" Done.\n"); + break; + case ModePOLL: + mps_arena_release(arena); + printf("Allocating..."); + (void)fflush(stdout); + object_alloc = 0; + while (object_alloc < 1000 && !mps_message_poll(arena)) + (void)DYLAN_INT(object_alloc++); + printf(" Done.\n"); + break; + } + { + size_t live_size = (object_count - finals) * sizeof(void *) * 3; + size_t total_size = mps_pool_total_size(pool); + size_t free_size = mps_pool_free_size(pool); + Insist(free_size <= total_size); + Insist(free_size + live_size <= total_size); + } + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + cdie(mps_message_get(&message, arena, type), "message_get"); + if (type == mps_message_type_finalization()) { + mps_addr_t objaddr; + mps_message_finalization_ref(&objaddr, arena, message); + ++ final_this_time; + } else if (type == mps_message_type_gc()) { + ++ collections; + } else { + error("Unexpected message type %lu.", (unsigned long)type); + } + mps_message_discard(arena, message); + } + finals += final_this_time; + printf("%"PRIuLONGEST" objects finalized: total %"PRIuLONGEST + " of %"PRIuLONGEST"\n", (ulongest_t)final_this_time, + (ulongest_t)finals, (ulongest_t)object_count); + } + + if (finals != object_count) { + PoolClass poolClass = ClassOfPoly(Pool, BufferOfAP(ap)->pool); + error("Not all objects were finalized for %s in mode %s.", + ClassName(poolClass), + mode == ModePOLL ? "POLL" : "PARK"); + } + + if (collections > collectionCOUNT) + error("Expected no more than %lu collections but got %lu.", + (unsigned long)collectionCOUNT, (unsigned long)collections); +} + +static void test_pool(int mode, mps_arena_t arena, mps_chain_t chain, + mps_pool_class_t pool_class) +{ + mps_ap_t ap; + mps_fmt_t fmt; + mps_pool_t pool; + mps_root_t mps_root; + + die(mps_fmt_create_A(&fmt, arena, dylan_fmt_A()), "fmt_create\n"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, fmt); + if (mode == ModePOLL) { + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + MPS_ARGS_ADD(args, MPS_KEY_GEN, 1); + } + die(mps_pool_create_k(&pool, arena, pool_class, args), + "pool_create\n"); + } MPS_ARGS_END(args); + die(mps_root_create_table(&mps_root, arena, mps_rank_exact(), (mps_rm_t)0, + root, (size_t)rootCOUNT), + "root_create\n"); + die(mps_ap_create(&ap, pool, mps_rank_exact()), "ap_create\n"); + + test_trees(mode, "numbered", arena, pool, ap, make_numbered_tree, + register_numbered_tree); + test_trees(mode, "indirect", arena, pool, ap, make_indirect_tree, + register_indirect_tree); + + mps_ap_destroy(ap); + mps_root_destroy(mps_root); + mps_pool_destroy(pool); + mps_fmt_destroy(fmt); +} + + +static void test_mode(int mode, mps_arena_t arena, mps_chain_t chain) +{ + test_pool(mode, arena, chain, mps_class_amc()); + test_pool(mode, arena, chain, mps_class_amcz()); + test_pool(mode, arena, chain, mps_class_ams()); + test_pool(mode, arena, chain, mps_class_awl()); + test_pool(mode, arena, chain, mps_class_lo()); +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + mps_chain_t chain; + mps_gen_param_s params[2]; + size_t gens = 2; + size_t i; + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + /* Randomize pause time as a regression test for job004007. */ + double t = rnd_double(); + if (t == 0.0) + t = HUGE_VAL; /* Would prefer to use INFINITY but it's not in C89. */ + else + t = 1 / t - 1; + MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, t); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), + "arena_create\n"); + } MPS_ARGS_END(args); + mps_message_type_enable(arena, mps_message_type_finalization()); + die(mps_thread_reg(&thread, arena), "thread_reg\n"); + for (i = 0; i < gens; ++i) { + params[i].mps_capacity = 1; + params[i].mps_mortality = 0.5; + } + die(mps_chain_create(&chain, arena, gens, params), "chain_create\n"); + + test_mode(ModePOLL, arena, chain); + test_mode(ModePARK, arena, NULL); + + mps_arena_park(arena); + mps_chain_destroy(chain); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtdy.c b/mps/code/fmtdy.c new file mode 100644 index 00000000000..6cfbca681fe --- /dev/null +++ b/mps/code/fmtdy.c @@ -0,0 +1,886 @@ +/* fmtdy.c: DYLAN OBJECT FORMAT IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * .readership: MPS developers, Dylan developers + * + * .layouts: + * + * All objects, B: + * + * B W pointer to wrapper + * B+1 object body + * + * Forwarded (or padding) one-word objects, B: + * + * B N | 0b01 new address | 1 + * + * Forwarded (or padding) multi-word objects, B: + * + * B N | 0b10 new address | 2 + * B+1 L limit of object (addr of end + 1) + * + * Wrappers, W: + * + * W WW pointer to wrapper wrapper + * W+1 class DylanWorks class pointer (traceable) + * W+2 subtype_mask DylanWorks subtype_mask (untraceable) + * W+3 (FL << 2) | FF fixed part length and format + * W+4 (VS << 3) | VF variable part format and element size + * W+5 (WT << 2) | 1 tagged pattern vector length + * W+6 pattern 0 patterns for fixed part fields + * W+6+WT-1 pattern WT-1 + * + * The wrapper wrapper, WW: + * + * WW WW WW is it's own wrapper + * WW+1 class DylanWorks class of wrappers + * WW+2 subtype_mask DylanWorks subtype_mask for WW + * WW+3 (4 << 2) | 2 wrappers have four patterned fields + * WW+4 (0 << 3) | 0 wrappers have a non-traceable vector + * WW+5 (1 << 2) | 1 one pattern word follows + * WW+6 0b001 only field 0 is traceable + * + * .improve.abstract.access: There are severe common subexpression + * problems. In particular, code for accessing subfields in the + * fh and vh words is repeated. It should be abstracted into + * macros (or functions). This is particularly bad for the vh + * word which has 4 subfields (version, vb, es, vf). + */ + + +#include "fmtdy.h" +#include "fmtno.h" +#include "mps.h" +#include +#include +#include +#include + + +#define notreached() assert(0) +#define unused(param) ((void)param) + + +#ifdef _MSC_VER + +/* MPS_END causes "constant conditional" warnings. */ +#pragma warning(disable: 4127) + +#endif /* _MSC_VER */ + + +#define ALIGN sizeof(mps_word_t) + +#define FMTDY_WORD_WIDTH (sizeof(mps_word_t) * CHAR_BIT) +#define FMTDY_WORD_SHIFT (FMTDY_WORD_WIDTH == 64 ? 6 : 5) +/* FMTDY_WORD_SHIFT is a bit hacky, but good enough for tests. */ + +#ifdef FMTDY_COUNTING +#define FMTDY_COUNT(x) x +#define FMTDY_FL_LIMIT 16 +static unsigned long dylan_vff_counts[4*8]; +static unsigned long dylan_fl_counts[FMTDY_FL_LIMIT]; +static unsigned long dylan_fl_oversize_count; +static unsigned long dylan_fw_counts[2]; +#else +#define FMTDY_COUNT(x) +#endif /* FMTDY_COUNTING */ + + +int dylan_wrapper_check(mps_word_t *w) +{ + mps_word_t *ww; + mps_word_t vh; + mps_word_t version; + mps_word_t reserved; + mps_word_t klass; + mps_word_t fh, fl, ff; + mps_word_t vb, es, vf; + mps_word_t vt, t; + + assert(w != NULL); + assert(((mps_word_t)w & 3) == 0); + + /* The first word of the wrapper is a pointer to a wrapper wrapper, */ + /* which always has the same contents. Check it. */ + + /* .improve.unique.wrapper: When this becomes part of the Dylan + * run-time, it would be possible to know the address of a unique + * wrapper wrapper and check that instead. */ + + assert(w[WW] != 0); + assert((w[WW] & 3) == 0); /* wrapper wrapper is aligned */ + ww = (mps_word_t *)w[WW]; + assert(ww[WW] == w[WW]); /* wrapper wrapper is own wrapper */ + assert(ww[WC] != 0); /* wrapper class exists */ + assert((ww[WC] & 3) == 0); /* wrapper class is aligned */ + assert(ww[WF] == (((WS - 1) << 2) | 2)); /* fields with patterns */ + assert((ww[WV] & 0x00ffffff) == 0);/* non-traceable vector */ + /* Code in this file only works for version 2 */ + assert(((ww[WV] >> (FMTDY_WORD_WIDTH - 8)) & 0xff) == 2); + assert(ww[WS] == ((1 << 2) | 1)); /* one pattern word in wrapper wrapper */ + /* The first field is traceable, the second field can be traced, */ + /* but doesn't need to be. */ + assert((ww[WP] == 1) || (ww[WP] == 3)); + unused(ww); + + /* Unpack the wrapper. */ + + klass = w[WC]; /* class */ + unused(klass); + fh = w[WF]; /* fixed part header word */ + fl = fh >> 2; /* fixed part length */ + ff = fh & 3; /* fixed part format code */ + vh = w[WV]; /* variable part header */ + version = (vh >> (FMTDY_WORD_WIDTH - 8)) & 0xff; + assert(version == 2); /* Code in this file only works for version 2 */ + unused(version); + reserved = (vh >> 8) & 0xff; + assert(reserved == 0); + unused(reserved); + vb = (vh >> 16) & 0xff; + unused(vb); + es = (vh & 0xff) >> 3;/* element size */ + vf = vh & 7; /* variable part format code */ + vt = w[WS]; /* vector total word (Dylan-tagged) */ + t = vt >> 2; /* vector total length */ + unused(t); + + /* The second word is the class of the wrapped object. */ + /* It would be good to check which pool this is in. */ + + assert(klass != 0); /* class exists */ + assert((klass & 3) == 0); /* class is aligned */ + + /* The third word contains the fixed part format and length. */ + /* The only illegal format is 3. Anything else is possible, although */ + /* we could do some bound checking on the length if we knew more about */ + /* the surroundings of the object. */ + + /* Fixed part format 3 is reserved. */ + assert(ff != 3); + unused(ff); + + /* Zero length fixed part is only legal in format 0. */ + /* Current Dylan run-time does not honour this so I remove it for now */ + /* We probably want this check as then we can scan without having to */ + /* check for 0 fixed length fields as a special case */ + /* assert(ff == 0 || fl != 0); */ + unused(fl); + /* The fourth word contains the variable part format and element */ + /* size. This assumes that DylanWorks is only going to use byte */ + /* vectors in the non-word case. */ + + /* Variable part format 6 is reserved. */ + assert(vf != 6); + unused(vf); + + /* There should be no shift in word vector formats. */ + assert((vf & 6) == 4 || es == 0); + unused(es); + + /* The fifth word is the number of patterns in the pattern */ + /* vector. This can be calculated from the fixed part length. */ + /* The word is also tagged like a DylanWorks integer. */ + + assert((vt & 3) == 1); + + /* The pattern vector in the wrapper should be of non-zero length */ + /* only if there is a patterned fixed part. */ + assert(ff == 2 || t == 0); + + /* The number of patterns is (fixed fields+31)/32. */ + assert(ff != 2 || t == ((fl + FMTDY_WORD_WIDTH - 1) / FMTDY_WORD_WIDTH)); + + /* The patterns are random bits, so we can't check them. However, */ + /* the left-over bits in the last pattern should be zero. */ + + assert(ff != 2 || (w[WS+t] >> ((fh>>2) & (FMTDY_WORD_WIDTH-1))) == 0); + + return 1; +} + + +/* Scan a contiguous array of references in [base, limit). */ +/* This code has been hand-optimised and examined using Metrowerks */ +/* Codewarrior on a 68K and also Microsoft Visual C on a 486. The */ +/* variables in the loop allocate nicely into registers. Alter with */ +/* care. */ + +static mps_res_t dylan_scan_contig(mps_ss_t mps_ss, + mps_addr_t *base, mps_addr_t *limit) +{ + mps_res_t res; + mps_addr_t *p; /* reference cursor */ + mps_addr_t r; /* reference to be fixed */ + + MPS_SCAN_BEGIN(mps_ss) { + p = base; + loop: if(p >= limit) goto out; + r = *p++; + if(((mps_word_t)r&3) != 0) /* pointers tagged with 0 */ + goto loop; /* not a pointer */ + if(!MPS_FIX1(mps_ss, r)) goto loop; + res = MPS_FIX2(mps_ss, p-1); + if(res == MPS_RES_OK) goto loop; + return res; + out: assert(p == limit); + } MPS_SCAN_END(mps_ss); + + return MPS_RES_OK; +} + +/* dylan_weak_dependent -- returns the linked object, if any. + */ + +mps_addr_t dylan_weak_dependent(mps_addr_t parent) +{ + mps_word_t *object; + mps_word_t *wrapper; + mps_word_t fword; + mps_word_t fl; + mps_word_t ff; + + assert(parent != NULL); + object = (mps_word_t *)parent; + wrapper = (mps_word_t *)object[0]; + assert(dylan_wrapper_check(wrapper)); + fword = wrapper[3]; + ff = fword & 3; + /* traceable fixed part */ + assert(ff == 1); + unused(ff); + fl = fword & ~(mps_word_t)3; + /* at least one fixed field */ + assert(fl >= 1); + unused(fl); + return (mps_addr_t) object[1]; +} + + +/* Scan weakly a contiguous array of references in [base, limit). */ +/* Only required to scan vectors for Dylan Weak Tables. */ +/* Depends on the vector length field being scannable (ie a tagged */ +/* integer). */ +/* When a reference that has been fixed to NULL is detected the */ +/* corresponding reference in the associated table (pointed to be the */ +/* assoc variable) will be deleted. */ + +static mps_res_t +dylan_scan_contig_weak(mps_ss_t mps_ss, + mps_addr_t *base, mps_addr_t *limit, + mps_addr_t *objectBase, mps_addr_t *assoc) +{ + mps_addr_t *p; + mps_res_t res; + mps_addr_t r; + + MPS_SCAN_BEGIN(mps_ss) { + p = base; + goto skip_inc; + loop: + ++p; + skip_inc: + if(p >= limit) + goto out; + r = *p; + if(((mps_word_t)r & 3) != 0) /* non-pointer */ + goto loop; + if(!MPS_FIX1(mps_ss, r)) + goto loop; + res = MPS_FIX2(mps_ss, p); + if(res == MPS_RES_OK) { + if(*p == 0 && r != 0) { + if(assoc != NULL) { + assoc[p-objectBase] = 0; /* delete corresponding entry */ + } + } + goto loop; + } + return res; + out: + assert(p == limit); + } MPS_SCAN_END(mps_ss); + + return MPS_RES_OK; +} + + +/* dylan_scan_pat -- scan according to pattern */ + +/* Scan an array of words in [base, limit) using the patterns at pats */ +/* to determine which words can be fixed. */ +/* This code has been hand-optimised and examined using Metrowerks */ +/* Codewarrior on a 68K and also Microsoft Visual C on a 486. The */ +/* variables in the loop allocate nicely into registers. Alter with */ +/* care. */ + +static mps_res_t dylan_scan_pat(mps_ss_t mps_ss, + mps_addr_t *base, mps_addr_t *limit, + mps_word_t *pats, mps_word_t nr_pats) +{ + mps_res_t res; + mps_word_t *pc = pats;/* pattern cursor */ + mps_word_t pat; /* pattern register */ + mps_addr_t *p; /* reference cursor */ + mps_addr_t *pp; /* inner loop cursor */ + int b; /* bit */ + mps_addr_t r; /* reference to be fixed */ + + unused(nr_pats); + + MPS_SCAN_BEGIN(mps_ss) { + p = base; + goto in; + pat: p += FMTDY_WORD_WIDTH; + if(p >= limit) goto out; + in: pp = p; + pat = *pc++; + loop: if(pat == 0) goto pat; + ++pp; + b = (int)(pat & 1); + pat >>= 1; + if(b == 0) goto loop; + r = *(pp-1); + if(((mps_word_t)r&3) != 0) /* pointers tagged with 0 */ + goto loop; /* not a pointer */ + if(!MPS_FIX1(mps_ss, r)) goto loop; + res = MPS_FIX2(mps_ss, pp-1); + if(res == MPS_RES_OK) goto loop; + return res; + out: assert(p < limit + FMTDY_WORD_WIDTH); + assert(pc == pats + nr_pats); + } MPS_SCAN_END(mps_ss); + + return MPS_RES_OK; +} + + +#define NONWORD_LENGTH(_vt, _es) \ + ((_es) < FMTDY_WORD_SHIFT ? \ + ((_vt) + ((mps_word_t)1 << (FMTDY_WORD_SHIFT - (_es))) - 1) >> \ + (FMTDY_WORD_SHIFT - (_es)) : \ + (_vt) << ((_es) - FMTDY_WORD_SHIFT)) + + +mps_res_t dylan_scan1(mps_ss_t mps_ss, mps_addr_t *object_io) +{ + mps_addr_t *p; /* cursor in object */ + mps_addr_t *q; /* cursor limit for loops */ + mps_word_t h; /* header word */ + mps_word_t *w; /* pointer to wrapper */ + mps_word_t fh; /* fixed part header word */ + mps_word_t fl; /* fixed part length, in words */ + mps_word_t vh; /* variable part header */ + mps_word_t vf; /* variable part format */ + mps_word_t vl; /* variable part actual length */ + unsigned vb; /* vector bias */ + unsigned es; /* variable part element size (log2 of bits) */ + mps_word_t vt; /* total vector length */ + mps_res_t res; + + assert(object_io != NULL); + + p = (mps_addr_t *)*object_io; + assert(p != NULL); + + h = (mps_word_t)p[0]; /* load the header word */ + + /* If the object is forwarded, simply skip it. */ + if(h & 3) { + mps_addr_t l; + + if((h & 3) == 1) { + /* single-word */ + l = (mps_addr_t)(p + 1); + FMTDY_COUNT(++dylan_fw_counts[0]); + } else { /* multi-word */ + assert((h & 3) == 2); + l = (mps_addr_t)p[1]; + FMTDY_COUNT(++dylan_fw_counts[1]); + } + + *object_io = l; + return MPS_RES_OK; + } + + MPS_SCAN_BEGIN(mps_ss) { + res = MPS_FIX12(mps_ss, p); /* fix the wrapper */ + } MPS_SCAN_END(mps_ss); + if (res != MPS_RES_OK) + return res; + w = (mps_word_t *)p[0]; /* wrapper is header word */ + assert(dylan_wrapper_check(w)); + + ++p; /* skip header */ + + /* Fixed Part */ + + fh = w[WF]; + fl = fh >> 2; /* get the fixed part length */ + + /* It might be worth inlining common cases here, for example, */ + /* pairs. This can be done by examining fh as a whole. */ + + FMTDY_COUNT(fl < FMTDY_FL_LIMIT ? ++dylan_fl_counts[fl] : + ++dylan_fl_oversize_count); + if(fl > 0) { + q = p + fl; /* set q to end of fixed part */ + switch(fh & 3) { /* switch on the fixed format */ + case 0: /* all non-traceable fields */ + p = q; + break; + + case 1: /* all traceable fields */ + res = dylan_scan_contig(mps_ss, p, q); + if(res) return res; + break; + + case 2: /* patterns */ + res = dylan_scan_pat(mps_ss, p, q, &w[WP], w[WS]>>2); + if(res) return res; + break; + + default: + notreached(); + break; + } + p = q; + } + + /* Variable Part */ + vh = w[WV]; + vf = vh & 7; /* get variable part format */ + FMTDY_COUNT(++dylan_vff_counts[(vf << 2)|(fh&3)]); + if(vf != 7) + { + vt = *(mps_word_t *)p; /* total vector length */ + assert((vt & 3) == 1); /* check Dylan integer tag */ + vt >>= 2; /* untag it */ + ++p; + + switch(vf) + { + case 0: /* non-stretchy non-traceable */ + p += vt; + break; + + case 1: /* stretchy non-traceable */ + p += vt + 1; + notreached(); /* Not used by DylanWorks yet */ + break; + + case 2: /* non-stretchy traceable */ + q = p + vt; + res = dylan_scan_contig(mps_ss, p, q); + if(res) return res; + p = q; + break; + + case 3: /* stretchy traceable */ + vl = *(mps_word_t *)p; /* vector length */ + assert((vl & 3) == 1); /* check Dylan integer tag */ + vl >>= 2; /* untag it */ + ++p; + res = dylan_scan_contig(mps_ss, p, p + vl); + if(res) return res; + p += vt; /* skip to end of whole vector */ + notreached(); /* DW doesn't create them yet */ + break; + + case 4: /* non-word */ + es = (unsigned)(vh & 0xff) >> 3; + vb = (unsigned)((vh >> 16) & 0xff); + vt += vb; + p += NONWORD_LENGTH(vt, es); + break; + + case 5: /* stretchy non-word */ + es = (unsigned)(vh & 0xff) >> 3; + vb = (unsigned)((vh >> 16) & 0xff); + vt += vb; + p += NONWORD_LENGTH(vt, es) + 1; + notreached(); /* DW doesn't create them yet */ + break; + + default: + notreached(); + break; + } + } + + *object_io = (mps_addr_t)p; + return MPS_RES_OK; +} + +static mps_res_t dylan_scan(mps_ss_t mps_ss, + mps_addr_t base, mps_addr_t limit) +{ + mps_res_t res; + mps_addr_t prev = base; + + while(base < limit) { + prev = base; + res = dylan_scan1(mps_ss, &base); + if(res) return res; + assert(prev < base); + } + + assert(base == limit); + + return MPS_RES_OK; +} + +/* dylan_class -- return pointer indicating class of object + * + * Return wrapper pointer, except for broken hearts or padding + */ + +static mps_addr_t dylan_class(mps_addr_t obj) +{ + mps_word_t first_word = ((mps_word_t *)obj)[0]; + + if((first_word & 3) != 0) /* broken heart or padding */ + return NULL; + else + return (mps_addr_t)first_word; +} + +mps_res_t dylan_scan1_weak(mps_ss_t mps_ss, mps_addr_t *object_io) +{ + mps_addr_t *assoc; + mps_addr_t *base; + mps_addr_t *p, q; + mps_res_t res; + mps_word_t *w; + mps_word_t fword, ff, fl; + mps_word_t h; + mps_word_t vword, vf, vl; + + assert(object_io != NULL); + base = (mps_addr_t *)*object_io; + assert(base != NULL); + p = base; + + h = (mps_word_t)p[0]; + /* object should not be forwarded (as there is no forwarding method) */ + assert((h & 3) == 0); + unused(h); + + MPS_SCAN_BEGIN(mps_ss) { + res = MPS_FIX12(mps_ss, p); + } MPS_SCAN_END(mps_ss); + if (res != MPS_RES_OK) + return res; + + /* w points to wrapper */ + w = (mps_word_t *)p[0]; + + assert(dylan_wrapper_check(w)); + + ++p; /* skip header */ + + fword = w[WF]; + fl = fword >> 2; + /* weak vectors should have at least one fixed field */ + /* (for assoc field) */ + assert(fl >= 1); + + ff = fword & 3; + + /* weak vectors should have traceable fixed format */ + assert(ff == 1); + unused(ff); + + assoc = (mps_addr_t *)p[0]; + + vword = w[WV]; + vf = vword & 7; + vl = (mps_word_t)p[fl] >> 2; + + /* weak vectors should be non-stretchy traceable */ + assert(vf == 2); + unused(vf); + + /* q is end of the object. There are fl fixed fields, vl variable */ + /* fields and another slot that contains the vector length */ + q = p + fl + vl + 1; + + res = dylan_scan_contig_weak(mps_ss, p, q, base, assoc); + if(res != MPS_RES_OK) { + return res; + } + + *object_io = q; + return MPS_RES_OK; +} + + +static mps_res_t dylan_scan_weak(mps_ss_t mps_ss, + mps_addr_t base, mps_addr_t limit) +{ + mps_res_t res; + + while(base < limit) { + res = dylan_scan1_weak(mps_ss, &base); + if(res) return res; + } + + assert(base == limit); + + return MPS_RES_OK; +} + +mps_addr_t dylan_skip(mps_addr_t object) +{ + mps_addr_t *p; /* cursor in object */ + mps_word_t *w; /* wrapper cursor */ + mps_word_t h; /* header word */ + mps_word_t vh; /* variable part header */ + mps_word_t vf; /* variable part format */ + mps_word_t vt; /* total vector length */ + unsigned vb; /* vector bias */ + unsigned es; /* variable part element size (log2 of bits) */ + + p = (mps_addr_t *)object; + assert(p != NULL); + + h = (mps_word_t)p[0]; /* load the header word */ + + /* If the object is forwarded, simply skip it. */ + if(h & 3) { + mps_addr_t l; + + if((h & 3) == 1) /* single-word */ + l = (mps_addr_t)(p + 1); + else { /* multi-word */ + assert((h & 3) == 2); + l = (mps_addr_t)p[1]; + } + + return l; + } + + w = (mps_word_t *)h; /* load the fixed wrapper */ + assert(dylan_wrapper_check(w)); + ++p; + + p += w[WF] >> 2; /* skip fixed part fields */ + + vh = w[WV]; + vf = vh & 7; /* get variable part format */ + if(vf != 7) + { + vt = *(mps_word_t *)p; + assert((vt & 3) == 1); /* check Dylan integer tag */ + vt = vt >> 2; /* total length */ + ++p; + + p += vf & 1; /* stretchy vectors have an extra word */ + + if((vf & 6) == 4) /* non-word */ + { + es = (unsigned)(vh & 0xff) >> 3; + vb = (unsigned)((vh >> 16) & 0xff); + vt += vb; + p += NONWORD_LENGTH(vt, es); + } + else + p += vt; + } + + return (mps_addr_t)p; +} + +static void dylan_copy(mps_addr_t old, mps_addr_t new) +{ + char *base = (char *)old; + char *limit = (char *)dylan_skip(old); + size_t length; + assert(base < limit); + length = (size_t)(limit - base); + assert(dylan_wrapper_check(*(mps_word_t **)old)); + /* .improve.memcpy: Can do better here as we know that new and old + will be aligned (to MPS_PF_ALIGN) */ + (void)memcpy(new, old, length); +} + +static mps_addr_t dylan_isfwd(mps_addr_t object) +{ + mps_word_t h, tag; + + h = *(mps_word_t *)object; + tag = h & 3; + if(tag != 0) + return (mps_addr_t)(h - tag); + else + return NULL; +} + +static void dylan_fwd(mps_addr_t old, mps_addr_t new) +{ + mps_word_t *p; + mps_addr_t limit; + + assert(dylan_isfwd(old) == NULL); + assert((*(mps_word_t *)old & 3) == 0); /* mustn't forward padding objects */ + assert(((mps_word_t)new & 3) == 0); + + p = (mps_word_t *)old; + limit = dylan_skip(old); + if(limit == &p[1]) /* single-word object? */ + p[0] = (mps_word_t)new | 1; + else { + p[0] = (mps_word_t)new | 2; + p[1] = (mps_word_t)limit; + } +} + +void dylan_pad(mps_addr_t addr, size_t size) +{ + mps_word_t *p; + + p = (mps_word_t *)addr; + if(size == sizeof(mps_word_t)) /* single-word object? */ + p[0] = 1; + else { + p[0] = 2; + p[1] = (mps_word_t)((char *)addr + size); + } +} + +mps_bool_t dylan_ispad(mps_addr_t addr) +{ + mps_word_t *p; + + p = (mps_word_t *)addr; + return p[0] == 1 || p[0] == 2; +} + + +/* The dylan format structures */ + +static struct mps_fmt_A_s dylan_fmt_A_s = +{ + ALIGN, + dylan_scan, + dylan_skip, + dylan_copy, + dylan_fwd, + dylan_isfwd, + dylan_pad +}; + +static struct mps_fmt_B_s dylan_fmt_B_s = +{ + ALIGN, + dylan_scan, + dylan_skip, + dylan_copy, + dylan_fwd, + dylan_isfwd, + dylan_pad, + dylan_class +}; + +/* Functions returning the dylan format structures */ + +mps_fmt_A_s *dylan_fmt_A(void) +{ + return &dylan_fmt_A_s; +} + +mps_fmt_B_s *dylan_fmt_B(void) +{ + return &dylan_fmt_B_s; +} + +/* Format variety-independent version that picks the right format + * variety and creates it. */ + +mps_res_t dylan_fmt(mps_fmt_t *mps_fmt_o, mps_arena_t arena) +{ + return mps_fmt_create_B(mps_fmt_o, arena, dylan_fmt_B()); +} + +/* The weak format structures */ + +static struct mps_fmt_A_s dylan_fmt_A_weak_s = +{ + ALIGN, + dylan_scan_weak, + dylan_skip, + no_copy, + no_fwd, + no_isfwd, + no_pad +}; + +static struct mps_fmt_B_s dylan_fmt_B_weak_s = +{ + ALIGN, + dylan_scan_weak, + dylan_skip, + no_copy, + no_fwd, + no_isfwd, + no_pad, + dylan_class +}; + +/* Functions returning the weak format structures */ + +mps_fmt_A_s *dylan_fmt_A_weak(void) +{ + return &dylan_fmt_A_weak_s; +} + + +mps_fmt_B_s *dylan_fmt_B_weak(void) +{ + return &dylan_fmt_B_weak_s; +} + + +/* Format variety-independent version that picks the right format + * variety and creates it. */ + +mps_res_t dylan_fmt_weak(mps_fmt_t *mps_fmt_o, mps_arena_t arena) +{ + return mps_fmt_create_B(mps_fmt_o, arena, dylan_fmt_B_weak()); +} + + + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtdy.h b/mps/code/fmtdy.h new file mode 100644 index 00000000000..0bf3dddda4c --- /dev/null +++ b/mps/code/fmtdy.h @@ -0,0 +1,75 @@ +/* fmtdy.h: DYLAN OBJECT FORMAT + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef fmtdy_h +#define fmtdy_h + +#include "mps.h" + +/* Low-level routines, exposed here so that the with-header format + * can use common code. */ +extern mps_res_t dylan_scan1(mps_ss_t, mps_addr_t *); +extern mps_res_t dylan_scan1_weak(mps_ss_t, mps_addr_t *); + +/* Format */ +extern mps_fmt_A_s *dylan_fmt_A(void); +extern mps_fmt_A_s *dylan_fmt_A_weak(void); +extern mps_fmt_B_s *dylan_fmt_B(void); +extern mps_fmt_B_s *dylan_fmt_B_weak(void); +extern mps_res_t dylan_fmt(mps_fmt_t *, mps_arena_t); +extern mps_res_t dylan_fmt_weak(mps_fmt_t *, mps_arena_t); + +extern mps_addr_t dylan_weak_dependent(mps_addr_t); + +extern mps_addr_t dylan_skip(mps_addr_t); +extern void dylan_pad(mps_addr_t, size_t); +extern mps_bool_t dylan_ispad(mps_addr_t); +extern int dylan_wrapper_check(mps_word_t *); + +/* Constants describing wrappers. Used only for debugging / testing */ +#define WW 0 /* offset of Wrapper-Wrapper */ +#define WC 1 /* offset of Class pointer*/ +#define WM 2 /* offset of subtype Mask */ +#define WF 3 /* offset of Fixed part descriptor */ +#define WV 4 /* offset of Vector part descriptor */ +#define WS 5 /* offset of Size field for pattern vector */ +#define WP 6 /* offset of Pattern 0, if present */ + +#define BASIC_WRAPPER_SIZE (WS + 1) /* size of wrapper with no patterns */ + +#define ALIGN sizeof(mps_word_t) /* alignment for Dylan format */ + +#endif /* fmtdy_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtdytst.c b/mps/code/fmtdytst.c new file mode 100644 index 00000000000..6a48860fe9e --- /dev/null +++ b/mps/code/fmtdytst.c @@ -0,0 +1,248 @@ +/* fmtdytst.c: DYLAN FORMAT TEST CODE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers, Dylan developers. + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mps.h" +#include "testlib.h" +#include "mpslib.h" +#include +#include +#include + +#define unused(param) ((void)param) + + +static mps_word_t *ww = NULL; +static mps_word_t *tvw; + + +static mps_word_t dylan_make_WV(mps_word_t version, mps_word_t vb, + mps_word_t es, mps_word_t vf) +{ + assert((version & ((1 << 8) - 1)) == version); + assert((vb & ((1 << 8) - 1)) == vb); + assert((es & ((1 << 5) - 1)) == es); + assert((vf & ((1 << 3) - 1)) == vf); + + /* VERSION- ... VB------ reserved ES---VF- */ + return((version << (MPS_WORD_WIDTH - 8)) | + (vb << 16) | + (es << 3) | + vf); +} + +mps_res_t dylan_make_wrappers(void) +{ + if(ww == NULL) { + ww = malloc(sizeof(mps_word_t) * (BASIC_WRAPPER_SIZE + 1)); + if(ww == NULL) return MPS_RES_MEMORY; + tvw = malloc(sizeof(mps_word_t) * BASIC_WRAPPER_SIZE); + if(tvw == NULL) { + free(ww); + return MPS_RES_MEMORY; + } + + /* Build a wrapper wrapper. */ + ww[WW] = (mps_word_t)ww; + ww[WC] = (mps_word_t)ww; /* dummy class */ + ww[WM] = (1 << 2) | 1; /* dummy subtype_mask */ + ww[WF] = ((WS - 1) << 2) | 2; + ww[WV] = dylan_make_WV(2, 0, 0, 0); + ww[WS] = (1 << 2) | 1; + ww[WP] = 1; + + /* Build a wrapper for traceable vectors. */ + tvw[WW] = (mps_word_t)ww; + tvw[WC] = (mps_word_t)ww; /* dummy class */ + tvw[WM] = (1 << 2) | 1; /* dummy subtype_mask */ + tvw[WF] = 0; /* no fixed part */ + tvw[WV] = dylan_make_WV(2, 0, 0, 2); /* traceable variable part */ + tvw[WS] = 1; /* no patterns */ + } + return MPS_RES_OK; +} + +/* dylan_init -- turn raw memory into initialised dylan-vector (or pad) + * + * If the raw memory is large enough, initialises it to a dylan-vector, + * whose slots are initialised to either dylan-ints, or valid refs, at + * random. + * + * Caller must supply an array of valid refs to copy, via the "refs" + * and "nr_refs" arguments. If "nr_refs" is 0, all slots are + * initialized to dylan-ints: this may be useful for making leaf + * objects. + * + * (Makes a pad if the raw memory is too small to hold a dylan-vector) + */ + +mps_res_t dylan_init(mps_addr_t addr, size_t size, + mps_addr_t *refs, size_t nr_refs) +{ + mps_res_t res; + /* Make sure the size is aligned. */ + assert((size & (ALIGN-1)) == 0); + + res = dylan_make_wrappers(); + if (res != MPS_RES_OK) + return res; + + /* If there is enough room, make a vector, otherwise just */ + /* make a padding object. */ + if(size >= sizeof(mps_word_t) * 2) { + mps_word_t *p = (mps_word_t *)addr; + mps_word_t i, t = (size / sizeof(mps_word_t)) - 2; + + p[0] = (mps_word_t)tvw; /* install vector wrapper */ + p[1] = (t << 2) | 1; /* tag the vector length */ + for(i = 0; i < t; ++i) { + mps_word_t r = rnd(); + + if(nr_refs == 0 || (r & 1)) + p[2+i] = ((r & ~(mps_word_t)3) | 1); /* random int */ + else + p[2+i] = (mps_word_t)refs[(r >> 1) % nr_refs]; /* random ptr */ + } + } else { + dylan_pad(addr, size); + } + + return MPS_RES_OK; +} + +mps_res_t make_dylan_vector(mps_word_t *v, mps_ap_t ap, size_t slots) +{ + mps_res_t res; + mps_addr_t addr; + mps_word_t *p; + size_t size; + size_t i; + + res = dylan_make_wrappers(); + if (res != MPS_RES_OK) + return res; + + size = (slots + 2) * sizeof(mps_word_t); + + do { + MPS_RESERVE_BLOCK(res, addr, ap, size); + if (res != MPS_RES_OK) + return res; + + p = (mps_word_t *)addr; + p[0] = (mps_word_t)tvw; /* install vector wrapper */ + p[1] = (slots << 2) | 1; /* tag the vector length */ + /* fill all slots with zero ints. */ + for (i=0; i> 2; + + /* If the object is a vector, update a random entry. */ + if(p[0] == (mps_word_t)tvw && t > 0) { + mps_word_t r = rnd(); + size_t i = 2 + (rnd() % t); + + if(r & 1) + p[i] = ((r & ~(mps_word_t)3) | 1); /* random int */ + else + p[i] = (mps_word_t)refs[(r >> 1) % nr_refs]; /* random ptr */ + } +} + +/* Writes to a dylan object. + Currently just swaps two refs if it can. + This is only used in a certain way by certain tests, it doesn't have + to be very general. */ +void dylan_mutate(mps_addr_t addr) +{ + mps_word_t *p = (mps_word_t *)addr; + + if(p[0] == (mps_word_t)tvw) { + mps_word_t t = p[1] >> 2; + + if(t > 0) { + mps_word_t tmp; + size_t i, j; + + i = 2 + (rnd() % t); + j = 2 + (rnd() % t); + + tmp = p[i]; + p[i] = p[j]; + p[j] = tmp; + } + } + return; +} + +mps_addr_t dylan_read(mps_addr_t addr) +{ + mps_word_t *p = (mps_word_t *)addr; + + /* If the object is a vector, return a random entry. */ + if(p[0] == (mps_word_t)tvw) { + mps_word_t t = p[1] >> 2; + if(t > 0) + return (mps_addr_t)p[2 + (rnd() % t)]; + } + + return addr; +} + +mps_bool_t dylan_check(mps_addr_t addr) +{ + assert(addr != 0); + assert(((mps_word_t)addr & (ALIGN-1)) == 0); + assert(dylan_wrapper_check((mps_word_t *)((mps_word_t *)addr)[0])); + /* .assert.unused: Asserts throw away their conditions */ + /* in hot varieties, so UNUSED is needed. */ + unused(addr); + return 1; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtdytst.h b/mps/code/fmtdytst.h new file mode 100644 index 00000000000..cd74bfe317d --- /dev/null +++ b/mps/code/fmtdytst.h @@ -0,0 +1,70 @@ +/* fmtdytst.h: DYLAN OBJECT FORMAT TESTING + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef fmtdytst_h +#define fmtdytst_h + +#include "mps.h" +#include "testlib.h" + +extern mps_res_t dylan_init(mps_addr_t addr, size_t size, + mps_addr_t *refs, size_t nr_refs); +extern void dylan_write(mps_addr_t addr, + mps_addr_t *refs, size_t nr_refs); +extern void dylan_mutate(mps_addr_t addr); +extern mps_addr_t dylan_read(mps_addr_t addr); +extern mps_bool_t dylan_check(mps_addr_t addr); +extern void dylan_pad(mps_addr_t addr, size_t size); +extern int dylan_wrapper_check(mps_word_t *w); +extern mps_res_t dylan_make_wrappers(void); + +extern mps_res_t make_dylan_vector(mps_word_t *v, mps_ap_t ap, size_t slots); + +#define DYLAN_VECTOR_SLOT(o,n) (((mps_word_t *) (o))[(n)+2]) + +#define DYLAN_INT(n) (((mps_word_t)(n) << 2) | 1) + +#define INT_DYI(n) ( (n) <= DYLAN_UINT_MAX ? DYLAN_INT(n) : (mps_word_t)fail() ) + + +#define DYLAN_INT_INT(d) ((mps_word_t)(d) >> 2) + +#define DYI_INT(d) ( ((d) & 0x3) == 0x1 ? DYLAN_INT_INT(d) : (mps_word_t)fail() ) + +#define DYLAN_UINT_MAX ((mps_word_t)-1 >> 2) +#define DYLAN_UINT_MASK DYLAN_UINT_MAX + +#endif /* fmtdy_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmthe.c b/mps/code/fmthe.c new file mode 100644 index 00000000000..76f7239f69e --- /dev/null +++ b/mps/code/fmthe.c @@ -0,0 +1,229 @@ +/* fmthe.c: DYLAN-LIKE OBJECT FORMAT WITH HEADERS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * Uses for the actual Dylan format, and just adds + * a thin layer to handle the object headers themselves. + */ + +#include "fmtdy.h" +#include "fmtno.h" +#include "fmthe.h" +#include "mps.h" + +#include +#include + +#include "testlib.h" + + +#define assert(cond) Insist(cond) +#define notreached() assert(0) + +#define AddHeader(p) ((mps_addr_t)((char*)(p) + headerSIZE)) + +static mps_fmt_A_s *dylan_format; + +static mps_res_t dylan_header_scan(mps_ss_t mps_ss, + mps_addr_t base, mps_addr_t limit) +{ + mps_res_t res; + mps_addr_t p = base; + + while(p < limit) { + mps_word_t header = (mps_word_t)*(int*)((char*)p - headerSIZE); + switch(headerType(header)) { + case realTYPE: + assert(header == realHeader); + break; + case padTYPE: + p = (mps_addr_t)((char*)p + headerPadSize(header)); + continue; + default: + notreached(); + break; + } + res = dylan_scan1(mps_ss, &p); + if(res) return res; + p = AddHeader(p); + } + + assert(p <= AddHeader(limit)); + + return MPS_RES_OK; +} + + +static mps_res_t dylan_header_scan_weak(mps_ss_t mps_ss, + mps_addr_t base, + mps_addr_t limit) +{ + mps_res_t res; + + while(base < limit) { + mps_word_t header; + header = (mps_word_t)*(int*)((char*)base - headerSIZE); + switch(headerType(header)) { + case realTYPE: + assert(header == realHeader); + break; + case padTYPE: + base = (mps_addr_t)((char*)base + headerPadSize(header)); + continue; + default: + notreached(); + break; + } + + res = dylan_scan1_weak(mps_ss, &base); + if(res) return res; + base = AddHeader(base); + } + + assert(base <= AddHeader(limit)); + + return MPS_RES_OK; +} + +static mps_addr_t dylan_header_skip(mps_addr_t object) +{ + mps_addr_t *p; /* cursor in object */ + mps_word_t header; + header = (mps_word_t)*(int*)((char*)object - headerSIZE); + switch(headerType(header)) { + case realTYPE: + assert(header == realHeader); + break; + case padTYPE: + return (mps_addr_t)((char*)object + headerPadSize(header)); + default: + notreached(); + break; + } + + p = dylan_format->skip(object); + p = AddHeader(p); + return p; +} + + +static mps_addr_t dylan_header_isfwd(mps_addr_t object) +{ + mps_word_t header; + + header = (mps_word_t)*(int*)((char*)object - headerSIZE); + if (headerType(header) != realTYPE) + return NULL; + + assert(header == realHeader); + + return dylan_format->isfwd(object); +} + + +static void dylan_header_pad(mps_addr_t addr, size_t fullSize) +{ + *(int*)addr = (int)padHeader(fullSize); +} + + +/* HeaderFormat -- format descriptor for this format */ + +static struct mps_fmt_auto_header_s HeaderFormat = +{ + ALIGN, + dylan_header_scan, + dylan_header_skip, + NULL, /* later overwritten by dylan format forward method */ + dylan_header_isfwd, + dylan_header_pad, + (size_t)headerSIZE +}; + + +/* HeaderWeakFormat -- format descriptor for this format */ + +static struct mps_fmt_auto_header_s HeaderWeakFormat = +{ + ALIGN, + dylan_header_scan_weak, + dylan_header_skip, + no_fwd, + no_isfwd, + no_pad, + (size_t)headerSIZE +}; + + +/* EnsureHeaderFormat -- create a format object for this format */ + +mps_res_t EnsureHeaderFormat(mps_fmt_t *mps_fmt_o, mps_arena_t arena) +{ + dylan_format = dylan_fmt_A(); + HeaderFormat.fwd = dylan_format->fwd; + return mps_fmt_create_auto_header(mps_fmt_o, arena, &HeaderFormat); +} + + +/* EnsureHeaderWeakFormat -- create a format object for the weak format */ + +mps_res_t EnsureHeaderWeakFormat(mps_fmt_t *mps_fmt_o, mps_arena_t arena) +{ + dylan_format = dylan_fmt_A(); + return mps_fmt_create_auto_header(mps_fmt_o, arena, &HeaderWeakFormat); +} + + +/* HeaderFormatCheck -- check an object in this format */ + +mps_res_t HeaderFormatCheck(mps_addr_t addr) +{ + if (addr != 0 && ((mps_word_t)addr & (ALIGN-1)) == 0 + && dylan_wrapper_check((mps_word_t *)((mps_word_t *)addr)[0])) + return MPS_RES_OK; + else + return MPS_RES_FAIL; +} + +/* HeaderWeakFormatCheck -- check an object in this format */ + +mps_res_t HeaderWeakFormatCheck(mps_addr_t addr) +{ + if (addr != 0 && ((mps_word_t)addr & (ALIGN-1)) == 0 + && dylan_wrapper_check((mps_word_t *)((mps_word_t *)addr)[0])) + return MPS_RES_OK; + else + return MPS_RES_FAIL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmthe.h b/mps/code/fmthe.h new file mode 100644 index 00000000000..a500911a343 --- /dev/null +++ b/mps/code/fmthe.h @@ -0,0 +1,58 @@ +/* fmthe.h: HEADERS FOR DYLAN-LIKE OBJECT FORMATS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#ifndef fmthe_h +#define fmthe_h + +#include "mps.h" + +/* Formats */ +extern mps_res_t EnsureHeaderFormat(mps_fmt_t *, mps_arena_t); +extern mps_res_t EnsureHeaderWeakFormat(mps_fmt_t *, mps_arena_t); +extern mps_res_t HeaderFormatCheck(mps_addr_t addr); +extern mps_res_t HeaderWeakFormatCheck(mps_addr_t addr); + +#define headerSIZE (32) +#define headerTypeBits 8 +#define realTYPE 0x33 +#define realHeader (realTYPE + 0x12345600) +#define padTYPE 0xaa +#define headerType(header) ((header) & (((mps_word_t)1 << headerTypeBits) - 1)) +#define headerPadSize(header) ((header) >> headerTypeBits) +#define padHeader(size) ((size << headerTypeBits) | padTYPE) + +#endif /* fmthe_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtno.c b/mps/code/fmtno.c new file mode 100644 index 00000000000..4854e55ed8e --- /dev/null +++ b/mps/code/fmtno.c @@ -0,0 +1,149 @@ +/* fmtno.c: NULL OBJECT FORMAT IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers + */ + + +#include "fmtno.h" +#include "mps.h" +#include +#include +#include + + +#define notreached() assert(0) +#define unused(param) ((void)param) + + +#define ALIGN sizeof(mps_word_t) + +/* Functions for the null format. */ + +mps_res_t no_scan(mps_ss_t mps_ss, + mps_addr_t base, + mps_addr_t limit) +{ + unused(mps_ss); unused(base); unused(limit); + notreached(); + return 0; +} + +mps_addr_t no_skip(mps_addr_t object) +{ + unused(object); + notreached(); + return 0; +} + +void no_copy(mps_addr_t old, + mps_addr_t new) +{ + unused(old); unused(new); + notreached(); +} + +void no_fwd(mps_addr_t old, + mps_addr_t new) +{ + unused(old); unused(new); + notreached(); +} + +mps_addr_t no_isfwd(mps_addr_t object) +{ + unused(object); + notreached(); + return 0; +} + +void no_pad(mps_addr_t addr, + size_t size) +{ + unused(addr); unused(size); + notreached(); +} + +mps_addr_t no_class(mps_addr_t obj) +{ + unused(obj); + notreached(); + return 0; +} + +/* The null format structures */ + +static struct mps_fmt_A_s no_fmt_A_s = +{ + ALIGN, + no_scan, + no_skip, + no_copy, + no_fwd, + no_isfwd, + no_pad +}; + +static struct mps_fmt_B_s no_fmt_B_s = +{ + ALIGN, + no_scan, + no_skip, + no_copy, + no_fwd, + no_isfwd, + no_pad, + no_class +}; + +/* Functions returning the null format structures. */ + +mps_fmt_A_s *no_fmt_A(void) +{ + return &no_fmt_A_s; +} + +mps_fmt_B_s *no_fmt_B(void) +{ + return &no_fmt_B_s; +} + +/* Format variety-independent version that picks the right format + * variety and creates it. */ + +mps_res_t no_fmt(mps_fmt_t *mps_fmt_o, mps_arena_t arena) +{ + return mps_fmt_create_B(mps_fmt_o, arena, no_fmt_B()); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtno.h b/mps/code/fmtno.h new file mode 100644 index 00000000000..c7afc1de8d9 --- /dev/null +++ b/mps/code/fmtno.h @@ -0,0 +1,54 @@ +/* fmtdy.h: NULL OBJECT FORMAT + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef fmtno_h +#define fmtno_h + +#include "mps.h" + +extern mps_res_t no_scan(mps_ss_t, mps_addr_t, mps_addr_t); +extern mps_addr_t no_skip(mps_addr_t); +extern void no_copy(mps_addr_t, mps_addr_t); +extern void no_fwd(mps_addr_t, mps_addr_t); +extern mps_addr_t no_isfwd(mps_addr_t); +extern void no_pad(mps_addr_t, size_t); +extern mps_addr_t no_class(mps_addr_t); + +extern mps_fmt_A_s *no_fmt_A(void); +extern mps_fmt_B_s *no_fmt_B(void); +extern mps_res_t no_fmt(mps_fmt_t *, mps_arena_t); + +#endif /* fmtno_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtscheme.c b/mps/code/fmtscheme.c new file mode 100644 index 00000000000..08685215b85 --- /dev/null +++ b/mps/code/fmtscheme.c @@ -0,0 +1,488 @@ +/* fmtscheme.c: SCHEME OBJECT FORMAT IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include + +#include "fmtscheme.h" +#include "testlib.h" + + +/* special objects */ + +static obj_t obj_true; /* #t, boolean true */ +static obj_t obj_false; /* #f, boolean false */ + + +/* MPS globals */ + +mps_arena_t scheme_arena; /* the arena */ +mps_pool_t obj_pool; /* pool for ordinary Scheme objects */ +mps_ap_t obj_ap; /* allocation point used to allocate objects */ + + +/* make_* -- object constructors */ + +#define ALIGNMENT sizeof(mps_word_t) + +/* Align size upwards to the next multiple of the word size. */ +#define ALIGN_WORD(size) \ + (((size) + ALIGNMENT - 1) & ~(ALIGNMENT - 1)) + +/* Align size upwards to the next multiple of the word size, and + * additionally ensure that it's big enough to store a forwarding + * pointer. Evaluates its argument twice. */ +#define ALIGN_OBJ(size) \ + (ALIGN_WORD(size) >= ALIGN_WORD(sizeof(fwd_s)) \ + ? ALIGN_WORD(size) \ + : ALIGN_WORD(sizeof(fwd_s))) + +obj_t scheme_make_bool(int condition) +{ + return condition ? obj_true : obj_false; +} + +obj_t scheme_make_pair(mps_ap_t ap, obj_t car, obj_t cdr) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(pair_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_pair"); + obj = addr; + obj->pair.type = TYPE_PAIR; + CAR(obj) = car; + CDR(obj) = cdr; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_integer(mps_ap_t ap, long integer) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(integer_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_integer"); + obj = addr; + obj->integer.type = TYPE_INTEGER; + obj->integer.integer = integer; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_symbol(mps_ap_t ap, size_t length, char string[]) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(symbol_s, string) + length+1); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_symbol"); + obj = addr; + obj->symbol.type = TYPE_SYMBOL; + obj->symbol.length = length; + memcpy(obj->symbol.string, string, length+1); + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_string(mps_ap_t ap, size_t length, char string[]) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(string_s, string) + length+1); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_string"); + obj = addr; + obj->string.type = TYPE_STRING; + obj->string.length = length; + if (string) memcpy(obj->string.string, string, length+1); + else memset(obj->string.string, 0, length+1); + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_special(mps_ap_t ap, char *string) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(special_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_special"); + obj = addr; + obj->special.type = TYPE_SPECIAL; + obj->special.name = string; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_operator(mps_ap_t ap, char *name, + entry_t entry, obj_t arguments, + obj_t body, obj_t env, obj_t op_env) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(operator_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_operator"); + obj = addr; + obj->operator.type = TYPE_OPERATOR; + obj->operator.name = name; + obj->operator.entry = entry; + obj->operator.arguments = arguments; + obj->operator.body = body; + obj->operator.env = env; + obj->operator.op_env = op_env; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_port(mps_ap_t ap, obj_t name, FILE *stream) +{ + mps_addr_t port_ref; + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(port_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_port"); + obj = addr; + obj->port.type = TYPE_PORT; + obj->port.name = name; + obj->port.stream = stream; + } while(!mps_commit(ap, addr, size)); + port_ref = obj; + mps_finalize(scheme_arena, &port_ref); + return obj; +} + +obj_t scheme_make_character(mps_ap_t ap, char c) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(character_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_character"); + obj = addr; + obj->character.type = TYPE_CHARACTER; + obj->character.c = c; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_vector(mps_ap_t ap, size_t length, obj_t fill) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(vector_s, vector) + length * sizeof(obj_t)); + do { + size_t i; + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_vector"); + obj = addr; + obj->vector.type = TYPE_VECTOR; + obj->vector.length = length; + for(i = 0; i < length; ++i) + obj->vector.vector[i] = fill; + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_buckets(mps_ap_t ap, size_t length) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(buckets_s, bucket) + length * sizeof(obj->buckets.bucket[0])); + do { + size_t i; + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_buckets"); + obj = addr; + obj->buckets.type = TYPE_BUCKETS; + obj->buckets.length = length; + obj->buckets.used = 0; + obj->buckets.deleted = 0; + for(i = 0; i < length; ++i) { + obj->buckets.bucket[i].key = NULL; + obj->buckets.bucket[i].value = NULL; + } + } while(!mps_commit(ap, addr, size)); + return obj; +} + +obj_t scheme_make_table(mps_ap_t ap, size_t length, hash_t hashf, cmp_t cmpf) +{ + obj_t obj; + mps_addr_t addr; + size_t l, size = ALIGN_OBJ(sizeof(table_s)); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in make_table"); + obj = addr; + obj->table.type = TYPE_TABLE; + obj->table.buckets = NULL; + } while(!mps_commit(ap, addr, size)); + obj->table.hash = hashf; + obj->table.cmp = cmpf; + /* round up to next power of 2 */ + for(l = 1; l < length; l *= 2); + obj->table.buckets = scheme_make_buckets(ap, l); + mps_ld_reset(&obj->table.ld, scheme_arena); + return obj; +} + + +/* MPS Format */ + +static mps_res_t obj_scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) +{ +#define FIX(ref) \ + do { \ + mps_addr_t _addr = (ref); /* copy to local to avoid type pun */ \ + mps_res_t res = MPS_FIX12(ss, &_addr); \ + if (res != MPS_RES_OK) return res; \ + (ref) = _addr; \ + } while(0) + + MPS_SCAN_BEGIN(ss) { + while (base < limit) { + obj_t obj = base; + switch (TYPE(obj)) { + case TYPE_PAIR: + case TYPE_PROMISE: + FIX(CAR(obj)); + FIX(CDR(obj)); + base = (char *)base + ALIGN_OBJ(sizeof(pair_s)); + break; + case TYPE_INTEGER: + base = (char *)base + ALIGN_OBJ(sizeof(integer_s)); + break; + case TYPE_SYMBOL: + base = (char *)base + + ALIGN_OBJ(offsetof(symbol_s, string) + obj->symbol.length + 1); + break; + case TYPE_SPECIAL: + base = (char *)base + ALIGN_OBJ(sizeof(special_s)); + break; + case TYPE_OPERATOR: + FIX(obj->operator.arguments); + FIX(obj->operator.body); + FIX(obj->operator.env); + FIX(obj->operator.op_env); + base = (char *)base + ALIGN_OBJ(sizeof(operator_s)); + break; + case TYPE_STRING: + base = (char *)base + + ALIGN_OBJ(offsetof(string_s, string) + obj->string.length + 1); + break; + case TYPE_PORT: + FIX(obj->port.name); + base = (char *)base + ALIGN_OBJ(sizeof(port_s)); + break; + case TYPE_CHARACTER: + base = (char *)base + ALIGN_OBJ(sizeof(character_s)); + break; + case TYPE_VECTOR: + { + size_t i; + for (i = 0; i < obj->vector.length; ++i) + FIX(obj->vector.vector[i]); + } + base = (char *)base + + ALIGN_OBJ(offsetof(vector_s, vector) + + obj->vector.length * sizeof(obj->vector.vector[0])); + break; + case TYPE_BUCKETS: + { + size_t i; + for (i = 0; i < obj->buckets.length; ++i) { + FIX(obj->buckets.bucket[i].key); + FIX(obj->buckets.bucket[i].value); + } + } + base = (char *)base + + ALIGN_OBJ(offsetof(buckets_s, bucket) + + obj->buckets.length * sizeof(obj->buckets.bucket[0])); + break; + case TYPE_TABLE: + FIX(obj->table.buckets); + base = (char *)base + ALIGN_OBJ(sizeof(table_s)); + break; + case TYPE_FWD2: + base = (char *)base + ALIGN_WORD(sizeof(fwd2_s)); + break; + case TYPE_FWD: + base = (char *)base + ALIGN_WORD(obj->fwd.size); + break; + case TYPE_PAD1: + base = (char *)base + ALIGN_WORD(sizeof(pad1_s)); + break; + case TYPE_PAD: + base = (char *)base + ALIGN_WORD(obj->pad.size); + break; + default: + error("Unexpected object on the heap\n"); + return MPS_RES_FAIL; + } + } + } MPS_SCAN_END(ss); + return MPS_RES_OK; +} + +static mps_addr_t obj_skip(mps_addr_t base) +{ + obj_t obj = base; + switch (TYPE(obj)) { + case TYPE_PAIR: + case TYPE_PROMISE: + base = (char *)base + ALIGN_OBJ(sizeof(pair_s)); + break; + case TYPE_INTEGER: + base = (char *)base + ALIGN_OBJ(sizeof(integer_s)); + break; + case TYPE_SYMBOL: + base = (char *)base + + ALIGN_OBJ(offsetof(symbol_s, string) + obj->symbol.length + 1); + break; + case TYPE_SPECIAL: + base = (char *)base + ALIGN_OBJ(sizeof(special_s)); + break; + case TYPE_OPERATOR: + base = (char *)base + ALIGN_OBJ(sizeof(operator_s)); + break; + case TYPE_STRING: + base = (char *)base + + ALIGN_OBJ(offsetof(string_s, string) + obj->string.length + 1); + break; + case TYPE_PORT: + base = (char *)base + ALIGN_OBJ(sizeof(port_s)); + break; + case TYPE_CHARACTER: + base = (char *)base + ALIGN_OBJ(sizeof(character_s)); + break; + case TYPE_VECTOR: + base = (char *)base + + ALIGN_OBJ(offsetof(vector_s, vector) + + obj->vector.length * sizeof(obj->vector.vector[0])); + break; + case TYPE_BUCKETS: + base = (char *)base + + ALIGN_OBJ(offsetof(buckets_s, bucket) + + obj->buckets.length * sizeof(obj->buckets.bucket[0])); + break; + case TYPE_TABLE: + base = (char *)base + ALIGN_OBJ(sizeof(table_s)); + break; + case TYPE_FWD2: + base = (char *)base + ALIGN_WORD(sizeof(fwd2_s)); + break; + case TYPE_FWD: + base = (char *)base + ALIGN_WORD(obj->fwd.size); + break; + case TYPE_PAD: + base = (char *)base + ALIGN_WORD(obj->pad.size); + break; + case TYPE_PAD1: + base = (char *)base + ALIGN_WORD(sizeof(pad1_s)); + break; + default: + error("Unexpected object on the heap\n"); + return NULL; + } + return base; +} + +static mps_addr_t obj_isfwd(mps_addr_t addr) +{ + obj_t obj = addr; + switch (TYPE(obj)) { + case TYPE_FWD2: + return obj->fwd2.fwd; + case TYPE_FWD: + return obj->fwd.fwd; + default: + return NULL; + } +} + +static void obj_fwd(mps_addr_t old, mps_addr_t new) +{ + obj_t obj = old; + mps_addr_t limit = obj_skip(old); + size_t size = (size_t)((char *)limit - (char *)old); + cdie(size >= ALIGN_WORD(sizeof(fwd2_s)), "bad size in obj_fwd"); + if (size == ALIGN_WORD(sizeof(fwd2_s))) { + TYPE(obj) = TYPE_FWD2; + obj->fwd2.fwd = new; + } else { + TYPE(obj) = TYPE_FWD; + obj->fwd.fwd = new; + obj->fwd.size = size; + } +} + +static void obj_pad(mps_addr_t addr, size_t size) +{ + obj_t obj = addr; + cdie(size >= ALIGN_WORD(sizeof(pad1_s)), "bad size in obj_pad"); + if (size == ALIGN_WORD(sizeof(pad1_s))) { + TYPE(obj) = TYPE_PAD1; + } else { + TYPE(obj) = TYPE_PAD; + obj->pad.size = size; + } +} + +void scheme_fmt(mps_fmt_t *fmt) +{ + mps_res_t res; + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, ALIGNMENT); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, obj_scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, obj_skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, obj_fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, obj_isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, obj_pad); + res = mps_fmt_create_k(fmt, scheme_arena, args); + } MPS_ARGS_END(args); + if (res != MPS_RES_OK) error("Couldn't create obj format"); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fmtscheme.h b/mps/code/fmtscheme.h new file mode 100644 index 00000000000..15230f52141 --- /dev/null +++ b/mps/code/fmtscheme.h @@ -0,0 +1,221 @@ +/* fmtscheme.h: SCHEME OBJECT FORMAT INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef fmtscheme_h +#define fmtscheme_h + +#include +#include "mps.h" + +typedef union obj_u *obj_t; +typedef obj_t (*entry_t)(obj_t env, obj_t op_env, obj_t operator, obj_t rands); +typedef unsigned long (*hash_t)(obj_t obj, mps_ld_t ld); +typedef int (*cmp_t)(obj_t obj1, obj_t obj2); + +typedef int type_t; +enum { + TYPE_PAIR, + TYPE_INTEGER, + TYPE_SYMBOL, + TYPE_SPECIAL, + TYPE_OPERATOR, + TYPE_STRING, + TYPE_PORT, + TYPE_PROMISE, + TYPE_CHARACTER, + TYPE_VECTOR, + TYPE_TABLE, + TYPE_BUCKETS, + TYPE_FWD2, /* two-word forwarding object */ + TYPE_FWD, /* three words and up forwarding object */ + TYPE_PAD1, /* one-word padding object */ + TYPE_PAD /* two words and up padding object */ +}; + +typedef struct type_s { + type_t type; +} type_s; + +typedef struct pair_s { + type_t type; /* TYPE_PAIR */ + obj_t car, cdr; /* first and second projections */ +} pair_s; + +typedef struct symbol_s { + type_t type; /* TYPE_SYMBOL */ + size_t length; /* length of symbol string (excl. NUL) */ + char string[1]; /* symbol string, NUL terminated */ +} symbol_s; + +typedef struct integer_s { + type_t type; /* TYPE_INTEGER */ + long integer; /* the integer */ +} integer_s; + +typedef struct special_s { + type_t type; /* TYPE_SPECIAL */ + char *name; /* printed representation, NUL terminated */ +} special_s; + +typedef struct operator_s { + type_t type; /* TYPE_OPERATOR */ + char *name; /* printed name, NUL terminated */ + entry_t entry; /* entry point -- see eval() */ + obj_t arguments, body; /* function arguments and code */ + obj_t env, op_env; /* closure environments */ +} operator_s; + +typedef struct string_s { + type_t type; /* TYPE_STRING */ + size_t length; /* number of chars in string */ + char string[1]; /* string, NUL terminated */ +} string_s; + +typedef struct port_s { + type_t type; /* TYPE_PORT */ + obj_t name; /* name of stream */ + FILE *stream; +} port_s; + +typedef struct character_s { + type_t type; /* TYPE_CHARACTER */ + char c; /* the character */ +} character_s; + +typedef struct vector_s { + type_t type; /* TYPE_VECTOR */ + size_t length; /* number of elements */ + obj_t vector[1]; /* vector elements */ +} vector_s; + +typedef struct table_s { + type_t type; /* TYPE_TABLE */ + hash_t hash; /* hash function */ + cmp_t cmp; /* comparison function */ + mps_ld_s ld; /* location dependency */ + obj_t buckets; /* hash buckets */ +} table_s; + +typedef struct buckets_s { + type_t type; /* TYPE_BUCKETS */ + size_t length; /* number of buckets */ + size_t used; /* number of buckets in use */ + size_t deleted; /* number of deleted buckets */ + struct bucket_s { + obj_t key, value; + } bucket[1]; /* hash buckets */ +} buckets_s; + + +/* fwd2, fwd, pad1, pad -- MPS forwarding and padding objects */ + +typedef struct fwd2_s { + type_t type; /* TYPE_FWD2 */ + obj_t fwd; /* forwarded object */ +} fwd2_s; + +typedef struct fwd_s { + type_t type; /* TYPE_FWD */ + obj_t fwd; /* forwarded object */ + size_t size; /* total size of this object */ +} fwd_s; + +typedef struct pad1_s { + type_t type; /* TYPE_PAD1 */ +} pad1_s; + +typedef struct pad_s { + type_t type; /* TYPE_PAD */ + size_t size; /* total size of this object */ +} pad_s; + + +typedef union obj_u { + type_s type; /* one of TYPE_* */ + pair_s pair; + symbol_s symbol; + integer_s integer; + special_s special; + operator_s operator; + string_s string; + port_s port; + character_s character; + vector_s vector; + table_s table; + buckets_s buckets; + fwd2_s fwd2; + fwd_s fwd; + pad_s pad; +} obj_s; + + +/* structure macros */ + +#define TYPE(obj) ((obj)->type.type) +#define CAR(obj) ((obj)->pair.car) +#define CDR(obj) ((obj)->pair.cdr) +#define CAAR(obj) CAR(CAR(obj)) +#define CADR(obj) CAR(CDR(obj)) +#define CDAR(obj) CDR(CAR(obj)) +#define CDDR(obj) CDR(CDR(obj)) +#define CADDR(obj) CAR(CDDR(obj)) +#define CDDDR(obj) CDR(CDDR(obj)) +#define CDDAR(obj) CDR(CDAR(obj)) +#define CADAR(obj) CAR(CDAR(obj)) + + +extern obj_t scheme_make_bool(int condition); +extern obj_t scheme_make_pair(mps_ap_t ap, obj_t car, obj_t cdr); +extern obj_t scheme_make_integer(mps_ap_t ap, long integer); +extern obj_t scheme_make_symbol(mps_ap_t ap, size_t length, char string[]); +extern obj_t scheme_make_string(mps_ap_t ap, size_t length, char string[]); +extern obj_t scheme_make_special(mps_ap_t ap, char *string); +extern obj_t scheme_make_operator(mps_ap_t ap, char *name, entry_t entry, + obj_t arguments, obj_t body, obj_t env, + obj_t op_env); +extern obj_t scheme_make_port(mps_ap_t ap, obj_t name, FILE *stream); +extern obj_t scheme_make_character(mps_ap_t ap, char c); +extern obj_t scheme_make_vector(mps_ap_t ap, size_t length, obj_t fill); +extern obj_t scheme_make_buckets(mps_ap_t ap, size_t length); +extern obj_t scheme_make_table(mps_ap_t ap, size_t length, hash_t hashf, + cmp_t cmpf); +extern void scheme_fmt(mps_fmt_t *fmt); + +extern mps_arena_t scheme_arena; +extern mps_pool_t obj_pool; +extern mps_ap_t obj_ap; + +#endif /* fmtscheme_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/forktest.c b/mps/code/forktest.c new file mode 100644 index 00000000000..a4a1b98319e --- /dev/null +++ b/mps/code/forktest.c @@ -0,0 +1,219 @@ +/* forktest.c: FORK SAFETY TEST + * + * $Id: //info.ravenbrook.com/project/mps/branch/2018-06-13/fork/code/tagtest.c#1 $ + * Copyright (c) 2018-2020 Ravenbrook Limited. See end of file for license. + * + * .overview: This test case is a regression test for job004062. It + * checks that the MPS correctly runs in the child process after a + * fork() on FreeBSD, Linux or macOS. + * + * .format: This test case uses a trivial object format in which each + * object contains a single reference. + */ + +#include +#include +#include + +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "testlib.h" + +enum { + TYPE_REF, + TYPE_FWD, + TYPE_PAD +}; + +typedef struct obj_s { + unsigned type; /* One of the TYPE_ enums */ + union { + struct obj_s *ref; /* TYPE_REF */ + mps_addr_t fwd; /* TYPE_FWD */ + size_t pad; /* TYPE_PAD */ + } u; +} obj_s, *obj_t; + +static void obj_fwd(mps_addr_t old, mps_addr_t new) +{ + obj_t obj = old; + obj->type = TYPE_FWD; + obj->u.fwd = new; +} + +static mps_addr_t obj_isfwd(mps_addr_t addr) +{ + obj_t obj = addr; + if (obj->type == TYPE_FWD) { + return obj->u.fwd; + } else { + return NULL; + } +} + +static void obj_pad(mps_addr_t addr, size_t size) +{ + obj_t obj = addr; + obj->type = TYPE_PAD; + obj->u.pad = size; +} + +static mps_addr_t obj_skip(mps_addr_t addr) +{ + obj_t obj = addr; + size_t size; + if (obj->type == TYPE_PAD) { + size = obj->u.pad; + } else { + size = sizeof(obj_s); + } + return (char *)addr + size; +} + +static mps_res_t obj_scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) +{ + MPS_SCAN_BEGIN(ss) { + while (base < limit) { + obj_t obj = base; + if (obj->type == TYPE_REF) { + mps_addr_t p = obj->u.ref; + mps_res_t res = MPS_FIX12(ss, &p); + if (res != MPS_RES_OK) { + return res; + } + obj->u.ref = p; + } + base = obj_skip(base); + } + } MPS_SCAN_END(ss); + return MPS_RES_OK; +} + +int main(int argc, char *argv[]) +{ + void *marker = ▮ + int pid; + mps_arena_t arena; + mps_fmt_t obj_fmt; + mps_pool_t pool; + mps_thr_t thread; + mps_root_t stack_root; + mps_ap_t obj_ap; + size_t i; + obj_t obj, first; + + testlib_init(argc, argv); + + /* Set the pause time to be very small so that the incremental + collector (when it runs) will have to leave a read barrier in + place for us to hit. */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, 0.0); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), + "mps_arena_create"); + } MPS_ARGS_END(args); + mps_arena_park(arena); + + die(mps_thread_reg(&thread, arena), "Couldn't register thread"); + die(mps_root_create_thread(&stack_root, arena, thread, marker), + "Couldn't create thread root"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, sizeof(obj_s)); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, obj_scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, obj_skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, obj_fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, obj_isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, obj_pad); + die(mps_fmt_create_k(&obj_fmt, arena, args), "Couldn't create obj format"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, obj_fmt); + die(mps_pool_create_k(&pool, arena, mps_class_amc(), args), + "Couldn't create pool"); + } MPS_ARGS_END(args); + + die(mps_ap_create_k(&obj_ap, pool, mps_args_none), + "Couldn't create obj allocation point"); + + /* Create a linked list of objects. */ + first = NULL; + for (i = 0; i < 100000; ++i) { + size_t size = sizeof(obj_s); + mps_addr_t addr; + do { + die(mps_reserve(&addr, obj_ap, size), "Couldn't allocate."); + obj = addr; + obj->type = TYPE_REF; + obj->u.ref = NULL; + } while (!mps_commit(obj_ap, addr, size)); + obj->u.ref = first; + first = obj; + } + + pid = fork(); + cdie(pid >= 0, "fork failed"); + + /* Allow a collection to start, which will cause a read barrier to + be applied to any segment containing live objects that was + scanned. */ + mps_arena_release(arena); + + /* Read all the objects, so that if there is read barrier in place + we will hit it. */ + for (obj = first; obj != NULL; obj = obj->u.ref) { + Insist(obj->type == TYPE_REF); + } + + mps_arena_park(arena); + + if (pid != 0) { + /* Parent: wait for child and check that its exit status is zero. */ + int stat; + cdie(pid == waitpid(pid, &stat, 0), "waitpid failed"); + cdie(WIFEXITED(stat), "child did not exit normally"); + cdie(WEXITSTATUS(stat) == 0, "child exited with nonzero status"); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + } + + mps_ap_destroy(obj_ap); + mps_pool_destroy(pool); + mps_fmt_destroy(obj_fmt); + mps_root_destroy(stack_root); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2018-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/format.c b/mps/code/format.c new file mode 100644 index 00000000000..8d513578e7d --- /dev/null +++ b/mps/code/format.c @@ -0,0 +1,247 @@ +/* format.c: OBJECT FORMATS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * DESIGN + * + * See protocol.mps.format. + */ + +#include "mpm.h" + +SRCID(format, "$Id$"); + + +/* FormatCheck -- check a format */ + +Bool FormatCheck(Format format) +{ + CHECKS(Format, format); + CHECKU(Arena, format->arena); + CHECKL(format->serial < format->arena->formatSerial); + CHECKD_NOSIG(Ring, &format->arenaRing); + CHECKL(AlignCheck(format->alignment)); + /* TODO: Define the concept of the maximum alignment it is possible to + request from the MPS, document and provide an interface to it, and then + check that this alignment is not greater than that, as well as all other + alignments. */ + CHECKL(FUNCHECK(format->scan)); + CHECKL(FUNCHECK(format->skip)); + CHECKL(FUNCHECK(format->move)); + CHECKL(FUNCHECK(format->isMoved)); + CHECKL(FUNCHECK(format->pad)); + CHECKL(FUNCHECK(format->klass)); + + return TRUE; +} + + +/* FormatNo methods -- default values for format keyword arguments */ + +mps_res_t FormatNoScan(mps_ss_t mps_ss, mps_addr_t base, mps_addr_t limit) +{ + UNUSED(mps_ss); + UNUSED(base); + UNUSED(limit); + NOTREACHED; + return ResFAIL; +} + +static mps_addr_t FormatNoSkip(mps_addr_t object) +{ + UNUSED(object); + NOTREACHED; + return NULL; +} + +static void FormatNoMove(mps_addr_t old, mps_addr_t new) +{ + UNUSED(old); + UNUSED(new); + NOTREACHED; +} + +static mps_addr_t FormatNoIsMoved(mps_addr_t object) +{ + UNUSED(object); + NOTREACHED; + return NULL; +} + +static void FormatNoPad(mps_addr_t addr, size_t size) +{ + UNUSED(addr); + UNUSED(size); + NOTREACHED; +} + +static mps_addr_t FormatDefaultClass(mps_addr_t object) +{ + AVER(object != NULL); + + return ((mps_addr_t *)object)[0]; +} + + +/* FormatCreate -- create a format */ + +ARG_DEFINE_KEY(FMT_ALIGN, Align); +ARG_DEFINE_KEY(FMT_SCAN, Fun); +ARG_DEFINE_KEY(FMT_SKIP, Fun); +ARG_DEFINE_KEY(FMT_FWD, Fun); +ARG_DEFINE_KEY(FMT_ISFWD, Fun); +ARG_DEFINE_KEY(FMT_PAD, Fun); +ARG_DEFINE_KEY(FMT_HEADER_SIZE, Size); +ARG_DEFINE_KEY(FMT_CLASS, Fun); + +Res FormatCreate(Format *formatReturn, Arena arena, ArgList args) +{ + ArgStruct arg; + Format format; + Res res; + void *p; + Align fmtAlign = FMT_ALIGN_DEFAULT; + Size fmtHeaderSize = FMT_HEADER_SIZE_DEFAULT; + mps_fmt_scan_t fmtScan = FMT_SCAN_DEFAULT; + mps_fmt_skip_t fmtSkip = FMT_SKIP_DEFAULT; + mps_fmt_fwd_t fmtFwd = FMT_FWD_DEFAULT; + mps_fmt_isfwd_t fmtIsfwd = FMT_ISFWD_DEFAULT; + mps_fmt_pad_t fmtPad = FMT_PAD_DEFAULT; + mps_fmt_class_t fmtClass = FMT_CLASS_DEFAULT; + + AVER(formatReturn != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + + if (ArgPick(&arg, args, MPS_KEY_FMT_ALIGN)) + fmtAlign = arg.val.align; + if (ArgPick(&arg, args, MPS_KEY_FMT_HEADER_SIZE)) + fmtHeaderSize = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_FMT_SCAN)) + fmtScan = arg.val.fmt_scan; + if (ArgPick(&arg, args, MPS_KEY_FMT_SKIP)) + fmtSkip = arg.val.fmt_skip; + if (ArgPick(&arg, args, MPS_KEY_FMT_FWD)) + fmtFwd = arg.val.fmt_fwd; + if (ArgPick(&arg, args, MPS_KEY_FMT_ISFWD)) + fmtIsfwd = arg.val.fmt_isfwd; + if (ArgPick(&arg, args, MPS_KEY_FMT_PAD)) + fmtPad = arg.val.fmt_pad; + if (ArgPick(&arg, args, MPS_KEY_FMT_CLASS)) + fmtClass = arg.val.fmt_class; + + res = ControlAlloc(&p, arena, sizeof(FormatStruct)); + if(res != ResOK) + return res; + format = (Format)p; /* avoid pun */ + + format->arena = arena; + RingInit(&format->arenaRing); + format->poolCount = 0; + format->alignment = fmtAlign; + format->headerSize = fmtHeaderSize; + format->scan = fmtScan; + format->skip = fmtSkip; + format->move = fmtFwd; + format->isMoved = fmtIsfwd; + format->pad = fmtPad; + format->klass = fmtClass; + + format->sig = FormatSig; + format->serial = arena->formatSerial; + ++arena->formatSerial; + + AVERT(Format, format); + + RingAppend(&arena->formatRing, &format->arenaRing); + + *formatReturn = format; + return ResOK; +} + + +/* FormatDestroy -- destroy a format */ + +void FormatDestroy(Format format) +{ + AVERT(Format, format); + AVER(format->poolCount == 0); /* */ + + RingRemove(&format->arenaRing); + + format->sig = SigInvalid; + + RingFinish(&format->arenaRing); + + ControlFree(format->arena, format, sizeof(FormatStruct)); +} + + +/* FormatArena -- find the arena of a format + * + * Must be thread-safe. . */ + +Arena FormatArena(Format format) +{ + AVER(TESTT(Format, format)); + return format->arena; +} + + +/* FormatDescribe -- describe a format */ + +Res FormatDescribe(Format format, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Format $P ($U) {\n", (WriteFP)format, (WriteFU)format->serial, + " arena $P ($U)\n", + (WriteFP)format->arena, (WriteFU)format->arena->serial, + " poolCount $U\n", (WriteFU)format->poolCount, + " alignment $W\n", (WriteFW)format->alignment, + " scan $F\n", (WriteFF)format->scan, + " skip $F\n", (WriteFF)format->skip, + " move $F\n", (WriteFF)format->move, + " isMoved $F\n", (WriteFF)format->isMoved, + " pad $F\n", (WriteFF)format->pad, + " headerSize $W\n", (WriteFW)format->headerSize, + "} Format $P ($U)\n", (WriteFP)format, (WriteFU)format->serial, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fotest.c b/mps/code/fotest.c new file mode 100644 index 00000000000..a413ab0c8b4 --- /dev/null +++ b/mps/code/fotest.c @@ -0,0 +1,250 @@ +/* fotest.c: FAIL-OVER TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * This tests fail-over behaviour in low memory situations. The MVFF + * and MVT pool classes normally maintain their list of free blocks in + * a Coalescing Block Structure (CBS), but if the CBS cannot handle a + * request due to running out of memory, they fall back to a Freelist + * (which has zero memory overhead, at some cost in performance). + * + * This is a white box test: it monkey-patches the MFS pool's alloc + * method with a method that always returns a memory error code. + */ + + +#include "mpscmvff.h" +#include "mpscmvt.h" +#include "mpsavm.h" + +#include "testlib.h" + +#include "cbs.h" +#include "mpm.h" +#include "mpmst.h" +#include "mpmtypes.h" +#include "poolmfs.h" + +#include /* printf */ + + +#define testArenaSIZE ((((size_t)3)<<24) - 4) +#define testSetSIZE 200 /* TODO: 10 * arena grain size / sizeof cbs_struct */ +#define testLOOPS 10 + + +/* make -- allocate one object */ + +static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) +{ + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, *p, ap, size); + if(res != MPS_RES_OK) + return res; + } while(!mps_commit(ap, *p, size)); + + return MPS_RES_OK; +} + + +/* The original alloc method on the MFS pool. */ +static PoolAllocMethod mfs_alloc; + + +/* Are we currently in a part of the test that is allowed to fail in the case + * where we run out of memory? This controls the behaviour of oomAlloc. */ +static Bool simulate_allocation_failure = FALSE; + +/* How many times has oomAlloc failed on purpose. */ +static unsigned long failure_count = 0; + +/* oomAlloc -- allocation function that reliably fails + * + * Returns a randomly chosen memory error code (and increments + * `failure_count`) if `simulate_allocation_failure`. The point is to verify + * that none of these errors affects the caller. */ + +static Res oomAlloc(Addr *pReturn, Pool pool, Size size) +{ + if (simulate_allocation_failure) { + /* Simulate a single failure in order to enforce the fail-over behaviour. */ + ++ failure_count; + simulate_allocation_failure = 0; + switch (rnd() % 3) { + case 0: + return ResRESOURCE; + case 1: + return ResMEMORY; + default: + return ResCOMMIT_LIMIT; + } + } else { + /* Failure here is allowed, so attempt allocation as normal. + * (see job004041 and job004104). */ + return mfs_alloc(pReturn, pool, size); + } +} + + +/* stress -- create an allocation point and allocate in it */ + +static mps_res_t stress(size_t (*size)(unsigned long, mps_align_t), + mps_align_t alignment, mps_pool_t pool) +{ + mps_res_t res = MPS_RES_OK; + mps_ap_t ap; + unsigned long i, k; + int *ps[testSetSIZE]; + size_t ss[testSetSIZE]; + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + + /* allocate a load of objects */ + for (i=0; i= sizeof(ps[i])) + *ps[i] = 1; /* Write something, so it gets swap. */ + } + + failure_count = 0; + + for (k=0; k0 && rnd() % 2) ? mfs_alloc : oomAlloc; + + /* shuffle all the objects */ + for (i=0; i 0); + +allocFail: + mps_ap_destroy(ap); + + return res; +} + + +/* randomSizeAligned -- produce sizes both large and small, + * aligned by platform alignment */ + +static size_t randomSizeAligned(unsigned long i, mps_align_t alignment) +{ + size_t maxSize = 2 * 160 * 0x2000; + /* Reduce by a factor of 2 every 10 cycles. Total allocation about 40 MB. */ + return alignUp(rnd() % max((maxSize >> (i / 10)), 2) + 1, alignment); +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_pool_t pool; + mps_align_t alignment; + + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + mfs_alloc = CLASS_STATIC(MFSPool).alloc; + alignment = sizeof(void *) << (rnd() % 4); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, (64 + rnd() % 64) * 1024); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, (1 + rnd() % 8) * 8); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, alignment); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, rnd() % 2); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, rnd() % 2); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, rnd() % 2); + die(mps_pool_create_k(&pool, arena, mps_class_mvff(), args), "create MVFF"); + } MPS_ARGS_END(args); + die(stress(randomSizeAligned, alignment, pool), "stress MVFF"); + mps_pool_destroy(pool); + mps_arena_destroy(arena); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + alignment = sizeof(void *) << (rnd() % 4); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, alignment); + MPS_ARGS_ADD(args, MPS_KEY_MIN_SIZE, (1 + rnd() % 4) * 4); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, (1 + rnd() % 8) * 16); + MPS_ARGS_ADD(args, MPS_KEY_MAX_SIZE, (1 + rnd() % 4) * 1024); + MPS_ARGS_ADD(args, MPS_KEY_MVT_RESERVE_DEPTH, (1 + rnd() % 64) * 16); + MPS_ARGS_ADD(args, MPS_KEY_MVT_FRAG_LIMIT, (rnd() % 101) / 100.0); + die(mps_pool_create_k(&pool, arena, mps_class_mvt(), args), "create MVT"); + } MPS_ARGS_END(args); + die(stress(randomSizeAligned, alignment, pool), "stress MVT"); + mps_pool_destroy(pool); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/freelist.c b/mps/code/freelist.c new file mode 100644 index 00000000000..8d70780da22 --- /dev/null +++ b/mps/code/freelist.c @@ -0,0 +1,830 @@ +/* freelist.c: FREE LIST ALLOCATOR IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .sources: . + */ + +#include "freelist.h" +#include "mpm.h" +#include "range.h" + +SRCID(freelist, "$Id$"); + + +#define freelistAlignment(fl) LandAlignment(FreelistLand(fl)) + + +typedef union FreelistBlockUnion { + struct FreelistBlockSmall { + FreelistBlock next; /* tagged with low bit 1 */ + /* limit is (char *)this + freelistAlignment(fl) */ + } small; + struct FreelistBlockLarge { + FreelistBlock next; /* not tagged (low bit 0) */ + Addr limit; + } large; +} FreelistBlockUnion; + + +/* freelistEND -- the end of a list + * + * The end of a list should not be represented with NULL, as this is + * ambiguous. However, freelistEND is in fact a null pointer, for + * performance. To check whether you have it right, try temporarily + * defining freelistEND as ((FreelistBlock)2) or similar (it must be + * an even number because of the use of a tag). + */ + +#define freelistEND ((FreelistBlock)0) + + +/* freelistTag -- return the tag of word */ + +#define freelistTag(word) ((word) & 1) + + +/* freelistTagSet -- return word updated with the tag set */ + +#define freelistTagSet(word) ((FreelistBlock)((Word)(word) | 1)) + + +/* freelistTagReset -- return word updated with the tag reset */ + +#define freelistTagReset(word) ((FreelistBlock)((Word)(word) & ~(Word)1)) + + +/* freelistTagCopy -- return 'to' updated to have the same tag as 'from' */ + +#define freelistTagCopy(to, from) ((FreelistBlock)((Word)(to) | freelistTag((Word)(from)))) + + +/* freelistBlockIsSmall -- return true if block is small, false if large */ + +#define freelistBlockIsSmall(block) freelistTag((Word)((block)->small.next)) + + +/* freelistBlockBase -- return the base of a block. */ + +#define freelistBlockBase(block) ((Addr)(block)) + + +/* freelistBlockNext -- return the next block in the list, or + * freelistEND if there are no more blocks. + */ + +#define freelistBlockNext(block) freelistTagReset((block)->small.next) + + +/* freelistBlockLimit -- return the limit of a block. */ + +static Addr freelistBlockLimit(Freelist fl, FreelistBlock block) +{ + AVERT(Freelist, fl); + if (freelistBlockIsSmall(block)) { + return AddrAdd(freelistBlockBase(block), freelistAlignment(fl)); + } else { + return block->large.limit; + } +} + + +/* FreelistBlockCheck -- check a block. */ + +ATTRIBUTE_UNUSED +static Bool FreelistBlockCheck(FreelistBlock block) +{ + CHECKL(block != NULL); + /* block list is address-ordered */ + CHECKL(freelistBlockNext(block) == freelistEND + || block < freelistBlockNext(block)); + CHECKL(freelistBlockIsSmall(block) || (Addr)block < block->large.limit); + /* Would like to CHECKL(!freelistBlockIsSmall(block) || + * freelistBlockSize(fl, block) == freelistAlignment(fl)) but we + * don't have 'fl' here. This is checked in freelistBlockSetLimit. */ + + return TRUE; +} + + +/* freelistBlockSize -- return the size of a block. */ + +#define freelistBlockSize(fl, block) \ + AddrOffset(freelistBlockBase(block), freelistBlockLimit(fl, block)) + + +/* freelistBlockSetNext -- update the next block in the list */ + +static void freelistBlockSetNext(FreelistBlock block, FreelistBlock next) +{ + AVERT(FreelistBlock, block); + block->small.next = freelistTagCopy(next, block->small.next); +} + + +/* freelistBlockSetLimit -- update the limit of a block */ + +static void freelistBlockSetLimit(Freelist fl, FreelistBlock block, Addr limit) +{ + Size size; + + AVERT(Freelist, fl); + AVERT(FreelistBlock, block); + AVER(AddrIsAligned(limit, freelistAlignment(fl))); + AVER(freelistBlockBase(block) < limit); + + size = AddrOffset(block, limit); + if (size >= sizeof(block->large)) { + block->large.next = freelistTagReset(block->large.next); + block->large.limit = limit; + } else { + AVER(size >= sizeof(block->small)); + block->small.next = freelistTagSet(block->small.next); + AVER(freelistBlockSize(fl, block) == freelistAlignment(fl)); + } + AVER(freelistBlockLimit(fl, block) == limit); +} + + +/* freelistBlockInit -- initialize block storing the range [base, limit). */ + +static FreelistBlock freelistBlockInit(Freelist fl, Addr base, Addr limit) +{ + FreelistBlock block; + + AVERT(Freelist, fl); + AVER(base != NULL); + AVER(AddrIsAligned(base, freelistAlignment(fl))); + AVER(base < limit); + AVER(AddrIsAligned(limit, freelistAlignment(fl))); + + block = (FreelistBlock)base; + block->small.next = freelistTagSet(freelistEND); + freelistBlockSetLimit(fl, block, limit); + AVERT(FreelistBlock, block); + return block; +} + + +Bool FreelistCheck(Freelist fl) +{ + Land land; + CHECKS(Freelist, fl); + land = FreelistLand(fl); + CHECKD(Land, land); + CHECKL(AlignCheck(FreelistMinimumAlignment)); + CHECKL(sizeof(struct FreelistBlockSmall) < sizeof(struct FreelistBlockLarge)); + CHECKL(sizeof(struct FreelistBlockSmall) <= freelistAlignment(fl)); + /* */ + CHECKL(AlignIsAligned(freelistAlignment(fl), FreelistMinimumAlignment)); + CHECKL((fl->list == freelistEND) == (fl->listSize == 0)); + CHECKL((fl->list == freelistEND) == (fl->size == 0)); + CHECKL(SizeIsAligned(fl->size, freelistAlignment(fl))); + + return TRUE; +} + + +static Res freelistInit(Land land, Arena arena, Align alignment, ArgList args) +{ + Freelist fl; + Res res; + + AVER(land != NULL); + res = NextMethod(Land, Freelist, init)(land, arena, alignment, args); + if (res != ResOK) + return res; + fl = CouldBeA(Freelist, land); + + /* */ + AVER(AlignIsAligned(LandAlignment(land), FreelistMinimumAlignment)); + + fl->list = freelistEND; + fl->listSize = 0; + fl->size = 0; + + SetClassOfPoly(land, CLASS(Freelist)); + fl->sig = FreelistSig; + AVERC(Freelist, fl); + + return ResOK; +} + + +static void freelistFinish(Inst inst) +{ + Land land = MustBeA(Land, inst); + Freelist fl = MustBeA(Freelist, land); + fl->sig = SigInvalid; + fl->list = freelistEND; + NextMethod(Inst, Freelist, finish)(inst); +} + + +static Size freelistSize(Land land) +{ + Freelist fl = MustBeA(Freelist, land); + return fl->size; +} + + +/* freelistBlockSetPrevNext -- update list of blocks + * + * If prev and next are both freelistEND, make the block list empty. + * Otherwise, if prev is freelistEND, make next the first block in the list. + * Otherwise, if next is freelistEND, make prev the last block in the list. + * Otherwise, make next follow prev in the list. + * Update the count of blocks by 'delta'. + * + * It is tempting to try to simplify this code by putting a + * FreelistBlockUnion into the FreelistStruct and so avoiding the + * special case on prev. But the problem with that idea is that we + * can't guarantee that such a sentinel would respect the isolated + * range invariant (it would have to be at a lower address than the + * first block in the free list, which the MPS has no mechanism to + * enforce), and so it would still have to be special-cased. + */ + +static void freelistBlockSetPrevNext(Freelist fl, FreelistBlock prev, + FreelistBlock next, int delta) +{ + AVERT(Freelist, fl); + + if (prev == freelistEND) { + fl->list = next; + } else { + /* Isolated range invariant . */ + AVER(next == freelistEND + || freelistBlockLimit(fl, prev) < freelistBlockBase(next)); + freelistBlockSetNext(prev, next); + } + if (delta < 0) { + AVER(fl->listSize >= (Count)-delta); + fl->listSize -= (Count)-delta; + } else { + fl->listSize += (Count)delta; + } +} + + +static Res freelistInsert(Range rangeReturn, Land land, Range range) +{ + Freelist fl = MustBeA(Freelist, land); + FreelistBlock prev, cur, next, new; + Addr base, limit; + Bool coalesceLeft, coalesceRight; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + AVER(RangeIsAligned(range, freelistAlignment(fl))); + + base = RangeBase(range); + limit = RangeLimit(range); + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + if (base < freelistBlockLimit(fl, cur) && freelistBlockBase(cur) < limit) + return ResFAIL; /* range overlaps with cur */ + if (limit <= freelistBlockBase(cur)) + break; + next = freelistBlockNext(cur); + if (next != freelistEND) + /* Isolated range invariant . */ + AVER(freelistBlockLimit(fl, cur) < freelistBlockBase(next)); + prev = cur; + cur = next; + } + + /* Now we know that range does not overlap with any block, and if it + * coalesces then it does so with prev on the left, and cur on the + * right. + */ + coalesceLeft = (prev != freelistEND && base == freelistBlockLimit(fl, prev)); + coalesceRight = (cur != freelistEND && limit == freelistBlockBase(cur)); + + if (coalesceLeft && coalesceRight) { + base = freelistBlockBase(prev); + limit = freelistBlockLimit(fl, cur); + freelistBlockSetLimit(fl, prev, limit); + freelistBlockSetPrevNext(fl, prev, freelistBlockNext(cur), -1); + + } else if (coalesceLeft) { + base = freelistBlockBase(prev); + freelistBlockSetLimit(fl, prev, limit); + + } else if (coalesceRight) { + next = freelistBlockNext(cur); + limit = freelistBlockLimit(fl, cur); + cur = freelistBlockInit(fl, base, limit); + freelistBlockSetNext(cur, next); + freelistBlockSetPrevNext(fl, prev, cur, 0); + + } else { + /* failed to coalesce: add new block */ + new = freelistBlockInit(fl, base, limit); + freelistBlockSetNext(new, cur); + freelistBlockSetPrevNext(fl, prev, new, +1); + } + + fl->size += RangeSize(range); + RangeInit(rangeReturn, base, limit); + return ResOK; +} + + +/* freelistDeleteFromBlock -- delete range from block + * + * range must be a subset of block. Update rangeReturn to be the + * original range of block and update the block list accordingly: prev + * is on the list just before block, or freelistEND if block is the + * first block on the list. + */ + +static void freelistDeleteFromBlock(Range rangeReturn, Freelist fl, + Range range, FreelistBlock prev, + FreelistBlock block) +{ + FreelistBlock next, new; + Addr base, limit, blockBase, blockLimit; + + AVER(rangeReturn != NULL); + AVERT(Freelist, fl); + AVERT(Range, range); + AVER(RangeIsAligned(range, freelistAlignment(fl))); + AVER(prev == freelistEND || freelistBlockNext(prev) == block); + AVERT(FreelistBlock, block); + AVER(freelistBlockBase(block) <= RangeBase(range)); + AVER(RangeLimit(range) <= freelistBlockLimit(fl, block)); + + base = RangeBase(range); + limit = RangeLimit(range); + blockBase = freelistBlockBase(block); + blockLimit = freelistBlockLimit(fl, block); + next = freelistBlockNext(block); + + if (base == blockBase && limit == blockLimit) { + /* No fragment at left; no fragment at right. */ + freelistBlockSetPrevNext(fl, prev, next, -1); + + } else if (base == blockBase) { + /* No fragment at left; block at right. */ + block = freelistBlockInit(fl, limit, blockLimit); + freelistBlockSetNext(block, next); + freelistBlockSetPrevNext(fl, prev, block, 0); + + } else if (limit == blockLimit) { + /* Block at left; no fragment at right. */ + freelistBlockSetLimit(fl, block, base); + + } else { + /* Block at left; block at right. */ + freelistBlockSetLimit(fl, block, base); + new = freelistBlockInit(fl, limit, blockLimit); + freelistBlockSetNext(new, next); + freelistBlockSetPrevNext(fl, block, new, +1); + } + + AVER(fl->size >= RangeSize(range)); + fl->size -= RangeSize(range); + RangeInit(rangeReturn, blockBase, blockLimit); +} + + +static Res freelistDelete(Range rangeReturn, Land land, Range range) +{ + Freelist fl = MustBeA(Freelist, land); + FreelistBlock prev, cur, next; + Addr base, limit; + + AVER(rangeReturn != NULL); + AVERT(Range, range); + + base = RangeBase(range); + limit = RangeLimit(range); + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + Addr blockBase, blockLimit; + blockBase = freelistBlockBase(cur); + blockLimit = freelistBlockLimit(fl, cur); + + if (limit <= blockBase) + return ResFAIL; /* not found */ + if (base <= blockLimit) { + if (base < blockBase || blockLimit < limit) + return ResFAIL; /* partially overlapping */ + freelistDeleteFromBlock(rangeReturn, fl, range, prev, cur); + return ResOK; + } + + next = freelistBlockNext(cur); + prev = cur; + cur = next; + } + + /* Range not found in block list. */ + return ResFAIL; +} + + +static Bool freelistIterate(Land land, LandVisitor visitor, + void *closure) +{ + Freelist fl = MustBeA(Freelist, land); + FreelistBlock cur, next; + + AVER(FUNCHECK(visitor)); + /* closure arbitrary */ + + for (cur = fl->list; cur != freelistEND; cur = next) { + RangeStruct range; + Bool cont; + /* .next.first: Take next before calling the visitor, in case the + * visitor touches the block. */ + next = freelistBlockNext(cur); + RangeInit(&range, freelistBlockBase(cur), freelistBlockLimit(fl, cur)); + cont = (*visitor)(land, &range, closure); + if (!cont) + return FALSE; + } + return TRUE; +} + + +static Bool freelistIterateAndDelete(Land land, LandDeleteVisitor visitor, + void *closure) +{ + Freelist fl = MustBeA(Freelist, land); + FreelistBlock prev, cur, next; + + AVER(FUNCHECK(visitor)); + /* closure arbitrary */ + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + Bool delete = FALSE; + RangeStruct range; + Bool cont; + Size size; + next = freelistBlockNext(cur); /* See .next.first. */ + size = freelistBlockSize(fl, cur); + RangeInit(&range, freelistBlockBase(cur), freelistBlockLimit(fl, cur)); + cont = (*visitor)(&delete, land, &range, closure); + if (delete) { + freelistBlockSetPrevNext(fl, prev, next, -1); + AVER(fl->size >= size); + fl->size -= size; + } else { + prev = cur; + } + if (!cont) + return FALSE; + cur = next; + } + return TRUE; +} + + +/* freelistFindDeleteFromBlock -- delete size bytes from block + * + * Find a chunk of size bytes in block (which is known to be at least + * that big) and possibly delete that chunk according to the + * instruction in findDelete. Return the range of that chunk in + * rangeReturn. Return the original range of the block in + * oldRangeReturn. Update the block list accordingly, using prev, + * which is previous in list or freelistEND if block is the first + * block in the list. + */ + +static void freelistFindDeleteFromBlock(Range rangeReturn, Range oldRangeReturn, + Freelist fl, Size size, + FindDelete findDelete, + FreelistBlock prev, FreelistBlock block) +{ + Bool callDelete = TRUE; + Addr base, limit; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Freelist, fl); + AVER(SizeIsAligned(size, freelistAlignment(fl))); + AVERT(FindDelete, findDelete); + AVER(prev == freelistEND || freelistBlockNext(prev) == block); + AVERT(FreelistBlock, block); + AVER(freelistBlockSize(fl, block) >= size); + + base = freelistBlockBase(block); + limit = freelistBlockLimit(fl, block); + + switch (findDelete) { + case FindDeleteNONE: + callDelete = FALSE; + break; + + case FindDeleteLOW: + limit = AddrAdd(base, size); + break; + + case FindDeleteHIGH: + base = AddrSub(limit, size); + break; + + case FindDeleteENTIRE: + /* do nothing */ + break; + + default: + NOTREACHED; + break; + } + + RangeInit(rangeReturn, base, limit); + if (callDelete) { + freelistDeleteFromBlock(oldRangeReturn, fl, rangeReturn, prev, block); + } else { + RangeInit(oldRangeReturn, base, limit); + } +} + + +static Bool freelistFindFirst(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + Freelist fl = MustBeA(Freelist, land); + FreelistBlock prev, cur, next; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVER(SizeIsAligned(size, freelistAlignment(fl))); + AVERT(FindDelete, findDelete); + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + if (freelistBlockSize(fl, cur) >= size) { + freelistFindDeleteFromBlock(rangeReturn, oldRangeReturn, fl, size, + findDelete, prev, cur); + return TRUE; + } + next = freelistBlockNext(cur); + prev = cur; + cur = next; + } + + return FALSE; +} + + +static Bool freelistFindLast(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + Freelist fl = MustBeA(Freelist, land); + Bool found = FALSE; + FreelistBlock prev, cur, next; + FreelistBlock foundPrev = freelistEND, foundCur = freelistEND; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVER(SizeIsAligned(size, freelistAlignment(fl))); + AVERT(FindDelete, findDelete); + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + if (freelistBlockSize(fl, cur) >= size) { + found = TRUE; + foundPrev = prev; + foundCur = cur; + } + next = freelistBlockNext(cur); + prev = cur; + cur = next; + } + + if (found) + freelistFindDeleteFromBlock(rangeReturn, oldRangeReturn, fl, size, + findDelete, foundPrev, foundCur); + + return found; +} + + +static Bool freelistFindLargest(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) +{ + Freelist fl = MustBeA(Freelist, land); + Bool found = FALSE; + FreelistBlock prev, cur, next; + FreelistBlock bestPrev = freelistEND, bestCur = freelistEND; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(FindDelete, findDelete); + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + if (freelistBlockSize(fl, cur) >= size) { + found = TRUE; + size = freelistBlockSize(fl, cur); + bestPrev = prev; + bestCur = cur; + } + next = freelistBlockNext(cur); + prev = cur; + cur = next; + } + + if (found) + freelistFindDeleteFromBlock(rangeReturn, oldRangeReturn, fl, size, + findDelete, bestPrev, bestCur); + + return found; +} + + +static Res freelistFindInZones(Bool *foundReturn, Range rangeReturn, + Range oldRangeReturn, Land land, Size size, + ZoneSet zoneSet, Bool high) +{ + Freelist fl = MustBeA(Freelist, land); + LandFindMethod landFind; + RangeInZoneSet search; + Bool found = FALSE; + FreelistBlock prev, cur, next; + FreelistBlock foundPrev = freelistEND, foundCur = freelistEND; + RangeStruct foundRange; + + AVER(FALSE); /* TODO: this code is completely untested! */ + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + /* AVERT(ZoneSet, zoneSet); */ + AVERT(Bool, high); + + landFind = high ? freelistFindLast : freelistFindFirst; + search = high ? RangeInZoneSetLast : RangeInZoneSetFirst; + + if (zoneSet == ZoneSetEMPTY) + goto fail; + if (zoneSet == ZoneSetUNIV) { + FindDelete fd = high ? FindDeleteHIGH : FindDeleteLOW; + *foundReturn = (*landFind)(rangeReturn, oldRangeReturn, land, size, fd); + return ResOK; + } + if (ZoneSetIsSingle(zoneSet) && size > ArenaStripeSize(LandArena(land))) + goto fail; + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + Addr base, limit; + if ((*search)(&base, &limit, freelistBlockBase(cur), + freelistBlockLimit(fl, cur), + LandArena(land), zoneSet, size)) + { + found = TRUE; + foundPrev = prev; + foundCur = cur; + RangeInit(&foundRange, base, limit); + if (!high) + break; + } + next = freelistBlockNext(cur); + prev = cur; + cur = next; + } + + if (!found) + goto fail; + + freelistDeleteFromBlock(oldRangeReturn, fl, &foundRange, foundPrev, foundCur); + RangeCopy(rangeReturn, &foundRange); + *foundReturn = TRUE; + return ResOK; + +fail: + *foundReturn = FALSE; + return ResOK; +} + + +/* freelistDescribeVisitor -- visitor method for freelistDescribe + * + * Writes a description of the range into the stream pointed to by + * closure. + */ + +typedef struct FreelistDescribeClosureStruct { + mps_lib_FILE *stream; + Count depth; +} FreelistDescribeClosureStruct, *FreelistDescribeClosure; + +static Bool freelistDescribeVisitor(Land land, Range range, + void *closure) +{ + Res res; + FreelistDescribeClosure my = closure; + + if (!TESTT(Land, land)) + return FALSE; + if (!RangeCheck(range)) + return FALSE; + if (my->stream == NULL) + return FALSE; + + res = WriteF(my->stream, my->depth, + "[$P,", (WriteFP)RangeBase(range), + "$P)", (WriteFP)RangeLimit(range), + " {$U}\n", (WriteFU)RangeSize(range), + NULL); + + return res == ResOK; +} + + +static Res freelistDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Land land = CouldBeA(Land, inst); + Freelist fl = CouldBeA(Freelist, land); + Res res; + Bool b; + FreelistDescribeClosureStruct closure; + + if (!TESTC(Freelist, fl)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, Freelist, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "listSize $U\n", (WriteFU)fl->listSize, + "size $U\n", (WriteFU)fl->size, + NULL); + + closure.stream = stream; + closure.depth = depth + 2; + b = LandIterate(land, freelistDescribeVisitor, &closure); + if (!b) + return ResFAIL; + + return res; +} + + +DEFINE_CLASS(Land, Freelist, klass) +{ + INHERIT_CLASS(klass, Freelist, Land); + klass->instClassStruct.describe = freelistDescribe; + klass->instClassStruct.finish = freelistFinish; + klass->size = sizeof(FreelistStruct); + klass->init = freelistInit; + klass->sizeMethod = freelistSize; + klass->insert = freelistInsert; + klass->insertSteal = freelistInsert; /* doesn't need to allocate */ + klass->delete = freelistDelete; + klass->deleteSteal = freelistDelete; /* doesn't need to allocate */ + klass->iterate = freelistIterate; + klass->iterateAndDelete = freelistIterateAndDelete; + klass->findFirst = freelistFindFirst; + klass->findLast = freelistFindLast; + klass->findLargest = freelistFindLargest; + klass->findInZones = freelistFindInZones; + AVERT(LandClass, klass); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/freelist.h b/mps/code/freelist.h new file mode 100644 index 00000000000..7eeeb853b0e --- /dev/null +++ b/mps/code/freelist.h @@ -0,0 +1,57 @@ +/* freelist.h: FREE LIST ALLOCATOR INTERFACE + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef freelist_h +#define freelist_h + +#include "mpmtypes.h" +#include "mpm.h" +#include "protocol.h" + +typedef struct FreelistStruct *Freelist; + +#define FreelistLand(fl) (&(fl)->landStruct) + +extern Bool FreelistCheck(Freelist freelist); + +/* */ +#define FreelistMinimumAlignment ((Align)sizeof(FreelistBlock)) + +DECLARE_CLASS(Land, Freelist, Land); + +#endif /* freelist.h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fri3gc.gmk b/mps/code/fri3gc.gmk new file mode 100644 index 00000000000..a7817cc765f --- /dev/null +++ b/mps/code/fri3gc.gmk @@ -0,0 +1,61 @@ +# -*- makefile -*- +# +# fri3gc.gmk: BUILD FOR FreeBSD/i386/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = fri3gc + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcfri3.c \ + prmcix.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -pthread + +include gc.gmk + +# For SQLite3. +LINKFLAGS += -L/usr/local/lib +CFLAGSCOMPILER += -I/usr/local/include + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/fri3ll.gmk b/mps/code/fri3ll.gmk new file mode 100644 index 00000000000..2ea6d13b3cb --- /dev/null +++ b/mps/code/fri3ll.gmk @@ -0,0 +1,61 @@ +# -*- makefile -*- +# +# fri3ll.gmk: BUILD FOR FreeBSD/i386/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = fri3ll + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcfri3.c \ + prmcix.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -pthread + +include ll.gmk + +# For SQLite3. +LINKFLAGS += -L/usr/local/lib +CFLAGSCOMPILER += -I/usr/local/include + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/fri6gc.gmk b/mps/code/fri6gc.gmk new file mode 100644 index 00000000000..6bb4d2c81da --- /dev/null +++ b/mps/code/fri6gc.gmk @@ -0,0 +1,61 @@ +# -*- makefile -*- +# +# fri6gc.gmk: BUILD FOR FreeBSD/x86-64/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = fri6gc + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcfri6.c \ + prmcix.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -pthread + +include gc.gmk + +# For SQLite3. +LINKFLAGS += -L/usr/local/lib +CFLAGSCOMPILER += -I/usr/local/include + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/fri6ll.gmk b/mps/code/fri6ll.gmk new file mode 100644 index 00000000000..9c21fb6c547 --- /dev/null +++ b/mps/code/fri6ll.gmk @@ -0,0 +1,61 @@ +# -*- makefile -*- +# +# fri6ll.gmk: BUILD FOR FreeBSD/x86-64/Clang PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = fri6ll + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcfri6.c \ + prmcix.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -pthread + +include ll.gmk + +# For SQLite3. +LINKFLAGS += -L/usr/local/lib +CFLAGSCOMPILER += -I/usr/local/include + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/code/gc.gmk b/mps/code/gc.gmk new file mode 100644 index 00000000000..1ab9fbd2d66 --- /dev/null +++ b/mps/code/gc.gmk @@ -0,0 +1,79 @@ +# -*- makefile -*- +# +# gc.gmk: GNUMAKEFILE FRAGMENT FOR GNU CC +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# This file is included by platform makefiles that use the GNU CC +# compiler. It defines the compiler-specific variables that the +# common makefile fragment () requires. + +CC = gcc +CFLAGSDEBUG = -O -g3 +CFLAGSOPT = -O2 -g3 + +# Warnings that might be enabled by clients . +CFLAGSCOMPILER := \ +-Waggregate-return \ +-Wall \ +-Wcast-qual \ +-Werror \ +-Wextra \ +-Winline \ +-Wmissing-prototypes \ +-Wnested-externs \ +-Wpointer-arith \ +-Wshadow \ +-Wstrict-aliasing=2 \ +-Wstrict-prototypes \ +-Wswitch-default \ +-Wwrite-strings +CFLAGSCOMPILERSTRICT := -std=c89 -pedantic + +# A different set of compiler flags for less strict compilation, for +# instance when we need to #include a third-party header file that +# won't fly with -ansi -pedantic. Use sparingly! +CFLAGSCOMPILERLAX := + +# gcc -MM generates a dependency line of the form: +# thing.o : thing.c ... +# The sed line converts this into: +# //thing.o //thing.d : thing.c ... +# If interrupted, this is liable to leave a zero-length file behind. + +define gendep +$(SHELL) -ec "$(CC) $(CFLAGSSTRICT) -MM $< | \ + sed '/:/s!$*.o!$(@D)/& $(@D)/$*.d!' > $@" +[ -s $@ ] || rm -f $@ +endef + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/gcbench.c b/mps/code/gcbench.c new file mode 100644 index 00000000000..07fdac6f37e --- /dev/null +++ b/mps/code/gcbench.c @@ -0,0 +1,526 @@ +/* gcbench.c -- "GC" Benchmark on ANSI C library + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * This is an allocation stress benchmark test for gc pools + */ + +#include "mps.c" +#include "testlib.h" +#include "testthr.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpm.h" + +#ifdef MPS_OS_W3 +#include "getopt.h" +#else +#include +#endif + +#include /* fprintf, printf, putchars, sscanf, stderr, stdout */ +#include /* alloca, exit, EXIT_FAILURE, EXIT_SUCCESS, strtoul */ +#include /* clock, CLOCKS_PER_SEC */ + +#define RESMUST(expr) \ + do { \ + mps_res_t res = (expr); \ + if (res != MPS_RES_OK) { \ + fprintf(stderr, #expr " returned %d\n", res); \ + exit(EXIT_FAILURE); \ + } \ + } while(0) + +static mps_arena_t arena; +static mps_pool_t pool; +static mps_fmt_t format; +static mps_chain_t chain; + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((obj_t)MPS_WORD_CONST(0xDECEA5ED)) +#define genLIMIT 100 + +static rnd_state_t seed = 0; /* random number seed */ +static unsigned nthreads = 1; /* threads */ +static unsigned niter = 5; /* iterations */ +static unsigned npass = 10; /* passes over tree */ +static size_t width = 2; /* width of tree nodes */ +static unsigned depth = 20; /* depth of tree */ +static double preuse = 0.2; /* probability of reuse */ +static double pupdate = 0.1; /* probability of update */ +static unsigned ngen = 0; /* number of generations specified */ +static mps_gen_param_s gen[genLIMIT]; /* generation parameters */ +static size_t arena_size = 256ul * 1024 * 1024; /* arena size */ +static size_t arena_grain_size = 1; /* arena grain size */ +static unsigned pinleaf = FALSE; /* are leaf objects pinned at start */ +static mps_bool_t zoned = TRUE; /* arena allocates using zones */ +static double pause_time = ARENA_DEFAULT_PAUSE_TIME; /* maximum pause time */ +static double spare = ARENA_SPARE_DEFAULT; /* spare commit fraction */ + +typedef struct gcthread_s *gcthread_t; + +typedef void *(*gcthread_fn_t)(gcthread_t thread); + +struct gcthread_s { + testthr_t thread; + mps_thr_t mps_thread; + mps_root_t reg_root; + mps_ap_t ap; + gcthread_fn_t fn; +}; + +typedef mps_word_t obj_t; + +static obj_t mkvector(mps_ap_t ap, size_t n) +{ + mps_word_t v; + RESMUST(make_dylan_vector(&v, ap, n)); + return v; +} + +static obj_t aref(obj_t v, size_t i) +{ + return DYLAN_VECTOR_SLOT(v, i); +} + +static void aset(obj_t v, size_t i, obj_t val) +{ + DYLAN_VECTOR_SLOT(v, i) = val; +} + +/* mktree - make a tree of nodes with depth d. */ +static obj_t mktree(mps_ap_t ap, unsigned d, obj_t leaf) +{ + obj_t tree; + size_t i; + if (d <= 0) + return leaf; + tree = mkvector(ap, width); + for (i = 0; i < width; ++i) { + aset(tree, i, mktree(ap, d - 1, leaf)); + } + return tree; +} + +static obj_t random_subtree(obj_t tree, unsigned levels) +{ + while(tree != objNULL && levels > 0) { + tree = aref(tree, rnd() % width); + --levels; + } + return tree; +} + +/* new_tree - Make a new tree from an old tree. + * The new tree is the same depth as the old tree and + * reuses old nodes with probability preuse. + * NOTE: If a new node is reused multiple times, the total size + * will be smaller. + * NOTE: Changing preuse will dramatically change how much work + * is done. In particular, if preuse==1, the old tree is returned + * unchanged. */ +static obj_t new_tree(mps_ap_t ap, obj_t oldtree, unsigned d) +{ + obj_t subtree; + size_t i; + if (rnd_double() < preuse) { + subtree = random_subtree(oldtree, depth - d); + } else { + if (d == 0) + return objNULL; + subtree = mkvector(ap, width); + for (i = 0; i < width; ++i) { + aset(subtree, i, new_tree(ap, oldtree, d - 1)); + } + } + return subtree; +} + +/* Update tree to be identical tree but with nodes reallocated + * with probability pupdate. This avoids writing to vector slots + * if unnecessary. */ +static obj_t update_tree(mps_ap_t ap, obj_t oldtree, unsigned d) +{ + obj_t tree; + size_t i; + if (oldtree == objNULL || d == 0) + return oldtree; + if (rnd_double() < pupdate) { + tree = mkvector(ap, width); + for (i = 0; i < width; ++i) { + aset(tree, i, update_tree(ap, aref(oldtree, i), d - 1)); + } + } else { + tree = oldtree; + for (i = 0; i < width; ++i) { + obj_t oldsubtree = aref(oldtree, i); + obj_t subtree = update_tree(ap, oldsubtree, d - 1); + if (subtree != oldsubtree) { + aset(tree, i, subtree); + } + } + } + return tree; +} + +static void *gc_tree(gcthread_t thread) +{ + unsigned i, j; + mps_ap_t ap = thread->ap; + obj_t leaf = pinleaf ? mktree(ap, 1, objNULL) : objNULL; + for (i = 0; i < niter; ++i) { + obj_t tree = mktree(ap, depth, leaf); + for (j = 0 ; j < npass; ++j) { + if (preuse < 1.0) + tree = new_tree(ap, tree, depth); + if (pupdate > 0.0) + tree = update_tree(ap, tree, depth); + } + } + return NULL; +} + +/* start -- start routine for each thread */ +static void *start(void *p) +{ + gcthread_t thread = p; + void *marker; + RESMUST(mps_thread_reg(&thread->mps_thread, arena)); + RESMUST(mps_root_create_thread(&thread->reg_root, arena, + thread->mps_thread, &marker)); + RESMUST(mps_ap_create_k(&thread->ap, pool, mps_args_none)); + thread->fn(thread); + mps_ap_destroy(thread->ap); + mps_root_destroy(thread->reg_root); + mps_thread_dereg(thread->mps_thread); + return NULL; +} + +static void weave(gcthread_fn_t fn) +{ + gcthread_t threads = alloca(sizeof(threads[0]) * nthreads); + unsigned t; + + for (t = 0; t < nthreads; ++t) { + gcthread_t thread = &threads[t]; + thread->fn = fn; + testthr_create(&thread->thread, start, thread); + } + + for (t = 0; t < nthreads; ++t) + testthr_join(&threads[t].thread, NULL); +} + +static void weave1(gcthread_fn_t fn) +{ + gcthread_t thread = alloca(sizeof(thread[0])); + + thread->fn = fn; + start(thread); +} + + +static void watch(gcthread_fn_t fn, const char *name) +{ + clock_t begin, end; + + begin = clock(); + if (nthreads == 1) + weave1(fn); + else + weave(fn); + end = clock(); + + printf("%s: %g\n", name, (double)(end - begin) / CLOCKS_PER_SEC); +} + + +/* Setup MPS arena and call benchmark. */ + +static void arena_setup(gcthread_fn_t fn, + mps_pool_class_t pool_class, + const char *name) +{ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arena_size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, zoned); + MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, pause_time); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, spare); + RESMUST(mps_arena_create_k(&arena, mps_arena_class_vm(), args)); + } MPS_ARGS_END(args); + RESMUST(dylan_fmt(&format, arena)); + /* Make wrappers now to avoid race condition. */ + /* dylan_make_wrappers() uses malloc. */ + RESMUST(dylan_make_wrappers()); + if (ngen > 0) + RESMUST(mps_chain_create(&chain, arena, ngen, gen)); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, format); + if (ngen > 0) + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + RESMUST(mps_pool_create_k(&pool, arena, pool_class, args)); + } MPS_ARGS_END(args); + watch(fn, name); + mps_arena_park(arena); + mps_pool_destroy(pool); + mps_fmt_destroy(format); + if (ngen > 0) + mps_chain_destroy(chain); + mps_arena_destroy(arena); +} + + +/* Command-line options definitions. See getopt_long(3). */ + +static struct option longopts[] = { + {"help", no_argument, NULL, 'h'}, + {"nthreads", required_argument, NULL, 't'}, + {"niter", required_argument, NULL, 'i'}, + {"npass", required_argument, NULL, 'p'}, + {"gen", required_argument, NULL, 'g'}, + {"arena-size", required_argument, NULL, 'm'}, + {"arena-grain-size", required_argument, NULL, 'a'}, + {"width", required_argument, NULL, 'w'}, + {"depth", required_argument, NULL, 'd'}, + {"preuse", required_argument, NULL, 'r'}, + {"pupdate", required_argument, NULL, 'u'}, + {"pin-leaf", no_argument, NULL, 'l'}, + {"seed", required_argument, NULL, 'x'}, + {"arena-unzoned", no_argument, NULL, 'z'}, + {"pause-time", required_argument, NULL, 'P'}, + {"spare", required_argument, NULL, 'S'}, + {NULL, 0, NULL, 0 } +}; + + +static struct { + const char *name; + gcthread_fn_t fn; + mps_pool_class_t (*pool_class)(void); +} pools[] = { + {"amc", gc_tree, mps_class_amc}, + {"ams", gc_tree, mps_class_ams}, + {"awl", gc_tree, mps_class_awl}, +}; + + +/* Command-line driver */ + +int main(int argc, char *argv[]) +{ + int ch; + unsigned i; + mps_bool_t seed_specified = FALSE; + + seed = rnd_seed(); + + while ((ch = getopt_long(argc, argv, "ht:i:p:g:m:a:w:d:r:u:lx:zP:S:", + longopts, NULL)) != -1) + switch (ch) { + case 't': + nthreads = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'i': + niter = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'p': + npass = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'g': + if (ngen >= genLIMIT) { + fprintf(stderr, "exceeded genLIMIT\n"); + return EXIT_FAILURE; + } + { + char *p; + size_t cap = 0; + double mort = 0.0; + cap = (size_t)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': cap <<= 20; p++; break; + case 'M': cap <<= 10; p++; break; + case 'K': p++; break; + default: cap = 0; break; + } + if (sscanf(p, ",%lg", &mort) != 1 || cap == 0) { + fprintf(stderr, "Bad gen format '%s'\n" + "Each gen option has format --gen=capacity[KMG],mortality\n" + "where capacity is a size specified in kilobytes, megabytes or gigabytes\n" + "and mortality is a number between 0 and 1\n" + "e.g.: --gen=500K,0.85 --gen=20M,0.45\n", optarg); + return EXIT_FAILURE; + } + gen[ngen].mps_capacity = cap; + gen[ngen].mps_mortality = mort; + ngen++; + } + break; + case 'm': { + char *p; + arena_size = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arena_size <<= 30; break; + case 'M': arena_size <<= 20; break; + case 'K': arena_size <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + case 'a': { + char *p; + arena_grain_size = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arena_grain_size <<= 30; break; + case 'M': arena_grain_size <<= 20; break; + case 'K': arena_grain_size <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena grain size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; + case 'w': + width = (size_t)strtoul(optarg, NULL, 10); + break; + case 'd': + depth = (unsigned)strtoul(optarg, NULL, 10); + break; + case 'r': + preuse = strtod(optarg, NULL); + break; + case 'u': + pupdate = strtod(optarg, NULL); + break; + case 'l': + pinleaf = TRUE; + break; + case 'x': + seed = strtoul(optarg, NULL, 10); + seed_specified = TRUE; + break; + case 'z': + zoned = FALSE; + break; + case 'P': + pause_time = strtod(optarg, NULL); + break; + case 'S': + spare = strtod(optarg, NULL); + break; + default: + /* This is printed in parts to keep within the 509 character + limit for string literals in portable standard C. */ + fprintf(stderr, + "Usage: %s [option...] [test...]\n" + "Options:\n" + " -m n, --arena-size=n[KMG]?\n" + " Initial size of arena (default %lu)\n" + " -a n, --arena-grain-size=n[KMG]?\n" + " Arena grain size (default %lu)\n" + " -t n, --nthreads=n\n" + " Launch n threads each running the test (default %u)\n" + " -i n, --niter=n\n" + " Iterate each test n times (default %u)\n" + " -p n, --npass=n\n" + " Pass over the tree n times (default %u)\n", + argv[0], + (unsigned long)arena_size, + (unsigned long)arena_grain_size, + nthreads, + niter, + npass); + fprintf(stderr, + " -g c,m, --gen=c[KMG],m\n" + " Generation with capacity c (in Kb) and mortality m\n" + " Use multiple times for multiple generations\n" + " -w n, --width=n\n" + " Width of tree nodes made (default %lu)\n" + " -d n, --depth=n\n" + " Depth of tree made (default %u)\n" + " -r p, --preuse=p\n" + " Probability of reusing a node (default %g)\n" + " -u p, --pupdate=p\n" + " Probability of updating a node (default %g)\n" + " -l --pin-leaf\n" + " Make a pinned object to use for leaves\n" + " -x n, --seed=n\n" + " Random number seed (default from entropy)\n", + (unsigned long)width, + depth, + preuse, + pupdate); + fprintf(stderr, + " -z, --arena-unzoned\n" + " Disable zoned allocation in the arena\n" + " -P t, --pause-time\n" + " Maximum pause time in seconds (default %f)\n" + " -S f, --spare\n" + " Maximum spare committed fraction (default %f)\n" + "Tests:\n" + " amc pool class AMC\n" + " ams pool class AMS\n" + " awl pool class AWL\n", + pause_time, + spare); + return EXIT_FAILURE; + } + argc -= optind; + argv += optind; + + if (!seed_specified) { + printf("seed: %lu\n", seed); + (void)fflush(stdout); + } + + while (argc > 0) { + for (i = 0; i < NELEMS(pools); ++i) + if (strcmp(argv[0], pools[i].name) == 0) + goto found; + fprintf(stderr, "unknown pool test \"%s\"\n", argv[0]); + return EXIT_FAILURE; + found: + (void)mps_lib_assert_fail_install(assert_die); + rnd_state_set(seed); + arena_setup(pools[i].fn, pools[i].pool_class(), pools[i].name); + --argc; + ++argv; + } + + return EXIT_SUCCESS; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/getopt.h b/mps/code/getopt.h new file mode 100644 index 00000000000..ca1ae21db87 --- /dev/null +++ b/mps/code/getopt.h @@ -0,0 +1,89 @@ +/* getopt.h -- get options from command-line argument list + * + * $Id$ + * + * Adapted from FreeBSD getopt_long(3) to ensure cross-platform availability + * in the Memory Pool System test programs. + */ + +/* $NetBSD: getopt.h,v 1.4 2000/07/07 10:43:54 ad Exp $ */ +/* $FreeBSD: src/include/getopt.h,v 1.6.30.1.8.1 2012/03/03 06:15:13 kensmith Exp $ */ + +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Dieter Baron and Thomas Klausner. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _GETOPT_H_ +#define _GETOPT_H_ + +/* + * GNU-like getopt_long()/getopt_long_only() with 4.4BSD optreset extension. + * getopt() is declared here too for GNU programs. + */ +#define no_argument 0 +#define required_argument 1 +#define optional_argument 2 + +struct option { + /* name of long option */ + const char *name; + /* + * one of no_argument, required_argument, and optional_argument: + * whether option takes an argument + */ + int has_arg; + /* if not NULL, set *flag to val when option found */ + int *flag; + /* if flag not NULL, value to set *flag to; else return value */ + int val; +}; + +int getopt_long(int, char * const *, const char *, + const struct option *, int *); +int getopt_long_only(int, char * const *, const char *, + const struct option *, int *); +#ifndef _GETOPT_DECLARED +#define _GETOPT_DECLARED +int getopt(int, char * const [], const char *); + +extern char *optarg; /* getopt(3) external variables */ +extern int optind, opterr, optopt; +#endif +#ifndef _OPTRESET_DECLARED +#define _OPTRESET_DECLARED +extern int optreset; /* getopt(3) external variable */ +#endif + +#endif /* !_GETOPT_H_ */ diff --git a/mps/code/getoptl.c b/mps/code/getoptl.c new file mode 100644 index 00000000000..3564362ed6d --- /dev/null +++ b/mps/code/getoptl.c @@ -0,0 +1,635 @@ +/* getoptl.c -- get long options from command line argument list + * + * $Id$ + * + * Adapted from FreeBSD getopt_long(3) to ensure cross-platform availability + * in the Memory Pool System test programs. + */ + +/* $OpenBSD: getopt_long.c,v 1.21 2006/09/22 17:22:05 millert Exp $ */ +/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */ + +/* + * Copyright (c) 2002 Todd C. Miller + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F39502-99-1-0512. + */ +/*- + * Copyright (c) 2000 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Dieter Baron and Thomas Klausner. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "getopt.h" + +#include +#include +#include +#include +#include +#include + +#define GNU_COMPATIBLE /* Be more compatible, configure's use us! */ + +int opterr = 1; /* if error message should be printed */ +int optind = 1; /* index into parent argv vector */ +int optopt = '?'; /* character checked for validity */ +int optreset; /* reset getopt */ +char *optarg; /* argument associated with option */ + +#define PRINT_ERROR ((opterr) && (*options != ':')) + +#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */ +#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */ +#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */ + +/* return values */ +#define BADCH (int)'?' +#define BADARG ((*options == ':') ? (int)':' : (int)'?') +#define INORDER (int)1 + +#define EMSG "" + +#ifdef GNU_COMPATIBLE +#define NO_PREFIX (-1) +#define D_PREFIX 0 +#define DD_PREFIX 1 +#define W_PREFIX 2 +#endif + +static int getopt_internal(int, char * const *, const char *, + const struct option *, int *, int); +static int parse_long_options(char * const *, const char *, + const struct option *, int *, int, int); +static int gcd(int, int); +static void permute_args(int, int, int, char * const *); + +static char emsg[] = EMSG; +static char *place = emsg; /* option letter processing */ + +/* XXX: set optreset to 1 rather than these two */ +static int nonopt_start = -1; /* first non option argument (for permute) */ +static int nonopt_end = -1; /* first option after non options (for permute) */ + +/* Error messages */ +static const char recargchar[] = "option requires an argument -- %c"; +static const char illoptchar[] = "illegal option -- %c"; /* From P1003.2 */ +#ifdef GNU_COMPATIBLE +static int dash_prefix = NO_PREFIX; +static const char gnuoptchar[] = "invalid option -- %c"; + +static const char recargstring[] = "option `%s%s' requires an argument"; +static const char ambig[] = "option `%s%.*s' is ambiguous"; +static const char noarg[] = "option `%s%.*s' doesn't allow an argument"; +static const char illoptstring[] = "unrecognized option `%s%s'"; +#else +static const char recargstring[] = "option requires an argument -- %s"; +static const char ambig[] = "ambiguous option -- %.*s"; +static const char noarg[] = "option doesn't take an argument -- %.*s"; +static const char illoptstring[] = "unknown option -- %s"; +#endif + +static void +warnx(const char *fmt, ...) +{ + va_list varargs; + va_start(varargs, fmt); + vfprintf(stderr, fmt, varargs); + fputc('\n', stderr); + va_end(varargs); +} + +/* + * Compute the greatest common divisor of a and b. + */ +static int +gcd(int a, int b) +{ + int c; + + c = a % b; + while (c != 0) { + a = b; + b = c; + c = a % b; + } + + return (b); +} + +/* + * Exchange the block from nonopt_start to nonopt_end with the block + * from nonopt_end to opt_end (keeping the same order of arguments + * in each block). + */ +static void +permute_args(int panonopt_start, int panonopt_end, int opt_end, + char * const *nargv) +{ + int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos; + char *swap; + + /* + * compute lengths of blocks and number and size of cycles + */ + nnonopts = panonopt_end - panonopt_start; + nopts = opt_end - panonopt_end; + ncycle = gcd(nnonopts, nopts); + cyclelen = (opt_end - panonopt_start) / ncycle; + + for (i = 0; i < ncycle; i++) { + cstart = panonopt_end+i; + pos = cstart; + for (j = 0; j < cyclelen; j++) { + if (pos >= panonopt_end) + pos -= nnonopts; + else + pos += nopts; + swap = nargv[pos]; + /* LINTED const cast */ + ((char **) nargv)[pos] = nargv[cstart]; + /* LINTED const cast */ + ((char **)nargv)[cstart] = swap; + } + } +} + +/* + * parse_long_options -- + * Parse long options in argc/argv argument vector. + * Returns -1 if short_too is set and the option does not match long_options. + */ +static int +parse_long_options(char * const *nargv, const char *options, + const struct option *long_options, int *idx, int short_too, int flags) +{ + char *current_argv, *has_equal; +#ifdef GNU_COMPATIBLE + const char *current_dash; +#endif + size_t current_argv_len; + int i, match, exact_match, second_partial_match; + + current_argv = place; +#ifdef GNU_COMPATIBLE + switch (dash_prefix) { + case D_PREFIX: + current_dash = "-"; + break; + case DD_PREFIX: + current_dash = "--"; + break; + case W_PREFIX: + current_dash = "-W "; + break; + default: + current_dash = ""; + break; + } +#endif + match = -1; + exact_match = 0; + second_partial_match = 0; + + optind++; + + if ((has_equal = strchr(current_argv, '=')) != NULL) { + /* argument found (--option=arg) */ + assert(has_equal > current_argv); + current_argv_len = (size_t)(has_equal - current_argv); + has_equal++; + } else + current_argv_len = strlen(current_argv); + + for (i = 0; long_options[i].name; i++) { + /* find matching long option */ + if (strncmp(current_argv, long_options[i].name, + current_argv_len)) + continue; + + if (strlen(long_options[i].name) == current_argv_len) { + /* exact match */ + match = i; + exact_match = 1; + break; + } + /* + * If this is a known short option, don't allow + * a partial match of a single character. + */ + if (short_too && current_argv_len == 1) + continue; + + if (match == -1) /* first partial match */ + match = i; + else if ((flags & FLAG_LONGONLY) || + long_options[i].has_arg != + long_options[match].has_arg || + long_options[i].flag != long_options[match].flag || + long_options[i].val != long_options[match].val) + second_partial_match = 1; + } + if (!exact_match && second_partial_match) { + /* ambiguous abbreviation */ + if (PRINT_ERROR) + fprintf(stderr, + ambig, +#ifdef GNU_COMPATIBLE + current_dash, +#endif + (int)current_argv_len, + current_argv); + optopt = 0; + return (BADCH); + } + if (match != -1) { /* option found */ + if (long_options[match].has_arg == no_argument + && has_equal) { + if (PRINT_ERROR) + warnx(noarg, +#ifdef GNU_COMPATIBLE + current_dash, +#endif + (int)current_argv_len, + current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; +#ifdef GNU_COMPATIBLE + return (BADCH); +#else + return (BADARG); +#endif + } + if (long_options[match].has_arg == required_argument || + long_options[match].has_arg == optional_argument) { + if (has_equal) + optarg = has_equal; + else if (long_options[match].has_arg == + required_argument) { + /* + * optional argument doesn't use next nargv + */ + optarg = nargv[optind++]; + } + } + if ((long_options[match].has_arg == required_argument) + && (optarg == NULL)) { + /* + * Missing argument; leading ':' indicates no error + * should be generated. + */ + if (PRINT_ERROR) + warnx(recargstring, +#ifdef GNU_COMPATIBLE + current_dash, +#endif + current_argv); + /* + * XXX: GNU sets optopt to val regardless of flag + */ + if (long_options[match].flag == NULL) + optopt = long_options[match].val; + else + optopt = 0; + --optind; + return (BADARG); + } + } else { /* unknown option */ + if (short_too) { + --optind; + return (-1); + } + if (PRINT_ERROR) + warnx(illoptstring, +#ifdef GNU_COMPATIBLE + current_dash, +#endif + current_argv); + optopt = 0; + return (BADCH); + } + if (idx) + *idx = match; + if (long_options[match].flag) { + *long_options[match].flag = long_options[match].val; + return (0); + } else + return (long_options[match].val); +} + +/* + * getopt_internal -- + * Parse argc/argv argument vector. Called by user level routines. + */ +static int +getopt_internal(int nargc, char * const *nargv, const char *options, + const struct option *long_options, int *idx, int flags) +{ + char *oli; /* option letter list index */ + int optchar, short_too; + int posixly_correct; /* no static, can be changed on the fly */ + + if (options == NULL) + return (-1); + + /* + * Disable GNU extensions if POSIXLY_CORRECT is set or options + * string begins with a '+'. + */ + posixly_correct = (getenv("POSIXLY_CORRECT") != NULL); +#ifdef GNU_COMPATIBLE + if (*options == '-') + flags |= FLAG_ALLARGS; + else if (posixly_correct || *options == '+') + flags &= ~FLAG_PERMUTE; +#else + if (posixly_correct || *options == '+') + flags &= ~FLAG_PERMUTE; + else if (*options == '-') + flags |= FLAG_ALLARGS; +#endif + if (*options == '+' || *options == '-') + options++; + + /* + * XXX Some GNU programs (like cvs) set optind to 0 instead of + * XXX using optreset. Work around this braindamage. + */ + if (optind == 0) + optind = optreset = 1; + + optarg = NULL; + if (optreset) + nonopt_start = nonopt_end = -1; +start: + if (optreset || !*place) { /* update scanning pointer */ + optreset = 0; + if (optind >= nargc) { /* end of argument vector */ + place = emsg; + if (nonopt_end != -1) { + /* do permutation, if we have to */ + permute_args(nonopt_start, nonopt_end, + optind, nargv); + optind -= nonopt_end - nonopt_start; + } + else if (nonopt_start != -1) { + /* + * If we skipped non-options, set optind + * to the first of them. + */ + optind = nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + if (*(place = nargv[optind]) != '-' || +#ifdef GNU_COMPATIBLE + place[1] == '\0') { +#else + (place[1] == '\0' && strchr(options, '-') == NULL)) { +#endif + place = emsg; /* found non-option */ + if (flags & FLAG_ALLARGS) { + /* + * GNU extension: + * return non-option as argument to option 1 + */ + optarg = nargv[optind++]; + return (INORDER); + } + if (!(flags & FLAG_PERMUTE)) { + /* + * If no permutation wanted, stop parsing + * at first non-option. + */ + return (-1); + } + /* do permutation */ + if (nonopt_start == -1) + nonopt_start = optind; + else if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, + optind, nargv); + nonopt_start = optind - + (nonopt_end - nonopt_start); + nonopt_end = -1; + } + optind++; + /* process next argument */ + goto start; + } + if (nonopt_start != -1 && nonopt_end == -1) + nonopt_end = optind; + + /* + * If we have "-" do nothing, if "--" we are done. + */ + if (place[1] != '\0' && *++place == '-' && place[1] == '\0') { + optind++; + place = emsg; + /* + * We found an option (--), so if we skipped + * non-options, we have to permute. + */ + if (nonopt_end != -1) { + permute_args(nonopt_start, nonopt_end, + optind, nargv); + optind -= nonopt_end - nonopt_start; + } + nonopt_start = nonopt_end = -1; + return (-1); + } + } + + /* + * Check long options if: + * 1) we were passed some + * 2) the arg is not just "-" + * 3) either the arg starts with -- we are getopt_long_only() + */ + if (long_options != NULL && place != nargv[optind] && + (*place == '-' || (flags & FLAG_LONGONLY))) { + short_too = 0; +#ifdef GNU_COMPATIBLE + dash_prefix = D_PREFIX; +#endif + if (*place == '-') { + place++; /* --foo long option */ +#ifdef GNU_COMPATIBLE + dash_prefix = DD_PREFIX; +#endif + } else if (*place != ':' && strchr(options, *place) != NULL) + short_too = 1; /* could be short option too */ + + optchar = parse_long_options(nargv, options, long_options, + idx, short_too, flags); + if (optchar != -1) { + place = emsg; + return (optchar); + } + } + + if ((optchar = (int)*place++) == (int)':' || + (optchar == (int)'-' && *place != '\0') || + (oli = strchr(options, optchar)) == NULL) { + /* + * If the user specified "-" and '-' isn't listed in + * options, return -1 (non-option) as per POSIX. + * Otherwise, it is an unknown option character (or ':'). + */ + if (optchar == (int)'-' && *place == '\0') + return (-1); + if (!*place) + ++optind; +#ifdef GNU_COMPATIBLE + if (PRINT_ERROR) + warnx(posixly_correct ? illoptchar : gnuoptchar, + optchar); +#else + if (PRINT_ERROR) + warnx(illoptchar, optchar); +#endif + optopt = optchar; + return (BADCH); + } + if (long_options != NULL && optchar == 'W' && oli[1] == ';') { + /* -W long-option */ + if (*place) /* no space */ + /* NOTHING */; + else if (++optind >= nargc) { /* no arg */ + place = emsg; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else /* white space */ + place = nargv[optind]; +#ifdef GNU_COMPATIBLE + dash_prefix = W_PREFIX; +#endif + optchar = parse_long_options(nargv, options, long_options, + idx, 0, flags); + place = emsg; + return (optchar); + } + if (*++oli != ':') { /* doesn't take argument */ + if (!*place) + ++optind; + } else { /* takes (optional) argument */ + optarg = NULL; + if (*place) /* no white space */ + optarg = place; + else if (oli[1] != ':') { /* arg not optional */ + if (++optind >= nargc) { /* no arg */ + place = emsg; + if (PRINT_ERROR) + warnx(recargchar, optchar); + optopt = optchar; + return (BADARG); + } else + optarg = nargv[optind]; + } + place = emsg; + ++optind; + } + /* dump back option letter */ + return (optchar); +} + +#ifdef REPLACE_GETOPT +/* + * getopt -- + * Parse argc/argv argument vector. + * + * [eventually this will replace the BSD getopt] + */ +int +getopt(int nargc, char * const *nargv, const char *options) +{ + + /* + * We don't pass FLAG_PERMUTE to getopt_internal() since + * the BSD getopt(3) (unlike GNU) has never done this. + * + * Furthermore, since many privileged programs call getopt() + * before dropping privileges it makes sense to keep things + * as simple (and bug-free) as possible. + */ + return (getopt_internal(nargc, nargv, options, NULL, NULL, 0)); +} +#endif /* REPLACE_GETOPT */ + +/* + * getopt_long -- + * Parse argc/argv argument vector. + */ +int +getopt_long(int nargc, char * const *nargv, const char *options, + const struct option *long_options, int *idx) +{ + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE)); +} + +/* + * getopt_long_only -- + * Parse argc/argv argument vector. + */ +int +getopt_long_only(int nargc, char * const *nargv, const char *options, + const struct option *long_options, int *idx) +{ + + return (getopt_internal(nargc, nargv, options, long_options, idx, + FLAG_PERMUTE|FLAG_LONGONLY)); +} diff --git a/mps/code/global.c b/mps/code/global.c new file mode 100644 index 00000000000..6c7cd39b941 --- /dev/null +++ b/mps/code/global.c @@ -0,0 +1,1148 @@ +/* global.c: ARENA-GLOBAL INTERFACES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .sources: . is relevant + * to the functions ArenaEnter and ArenaLeave in this file. + * + * + * TRANSGRESSIONS + * + * .static: Static data is used in ArenaAccess (in order to find the + * appropriate arena) and GlobalsInit. It's checked in GlobalsCheck. + * . + * + * .non-mod: The Globals structure has many fields which properly belong + * to other modules ; GlobalsInit contains code which + * breaks the usual module abstractions. Such instances are documented + * with a tag to the relevant module implementation. Most of the + * functions should be in some other module, they just ended up here by + * confusion over naming. */ + +#include "bt.h" +#include "poolmrg.h" +#include "mps.h" /* finalization */ +#include "mpm.h" + +SRCID(global, "$Id$"); + + +/* All static data objects are declared here. See .static */ + +/* */ +static Bool arenaRingInit = FALSE; +static RingStruct arenaRing; /* */ +static Serial arenaSerial; /* */ + + +/* arenaClaimRingLock, arenaReleaseRingLock -- lock/release the arena ring + * + * . */ + +static void arenaClaimRingLock(void) +{ + LockClaimGlobal(); /* claim the global lock to protect arenaRing */ +} + +static void arenaReleaseRingLock(void) +{ + LockReleaseGlobal(); /* release the global lock protecting arenaRing */ +} + + +/* GlobalsClaimAll -- claim all MPS locks + * + */ + +void GlobalsClaimAll(void) +{ + LockClaimGlobalRecursive(); + arenaClaimRingLock(); + GlobalsArenaMap(ArenaEnter); +} + +/* GlobalsReleaseAll -- release all MPS locks. GlobalsClaimAll must + * previously have been called. */ + +void GlobalsReleaseAll(void) +{ + GlobalsArenaMap(ArenaLeave); + arenaReleaseRingLock(); + LockReleaseGlobalRecursive(); +} + +/* arenaReinitLock -- reinitialize the lock for an arena */ + +static void arenaReinitLock(Arena arena) +{ + AVERT(Arena, arena); + ShieldLeave(arena); + LockInit(ArenaGlobals(arena)->lock); +} + +/* GlobalsReinitializeAll -- reinitialize all MPS locks, and leave the + * shield for all arenas. GlobalsClaimAll must previously have been + * called. */ + +void GlobalsReinitializeAll(void) +{ + GlobalsArenaMap(arenaReinitLock); + LockInitGlobal(); +} + + +/* arenaAnnounce -- add a new arena into the global ring of arenas + * + * On entry, the arena must not be locked (there should be no need, + * because other threads can't know about it). On exit, it will be. */ + +static void arenaAnnounce(Arena arena) +{ + Globals arenaGlobals; + + /* arena checked in ArenaEnter */ + + arenaClaimRingLock(); + ArenaEnter(arena); + arenaGlobals = ArenaGlobals(arena); + AVERT(Globals, arenaGlobals); + RingAppend(&arenaRing, &arenaGlobals->globalRing); + arenaReleaseRingLock(); +} + + +/* arenaDenounce -- remove an arena from the global ring of arenas + * + * After this, no other thread can access the arena through ArenaAccess. + * On entry, the arena should be locked. On exit, it will still be, but + * the lock has been released and reacquired in the meantime, so callers + * should not assume anything about the state of the arena. */ + +static void arenaDenounce(Arena arena) +{ + Globals arenaGlobals; + + AVERT(Arena, arena); + + /* Temporarily give up the arena lock to avoid deadlock, */ + /* see . */ + ArenaLeave(arena); + + /* Detach the arena from the global list. */ + arenaClaimRingLock(); + ArenaEnter(arena); + arenaGlobals = ArenaGlobals(arena); + AVERT(Globals, arenaGlobals); + RingRemove(&arenaGlobals->globalRing); + arenaReleaseRingLock(); +} + + +/* GlobalsArenaMap -- map a function over the arenas. The caller must + * have acquired the ring lock. */ + +void GlobalsArenaMap(void (*func)(Arena arena)) +{ + Ring node, nextNode; + AVERT(Ring, &arenaRing); + RING_FOR(node, &arenaRing, nextNode) { + Globals arenaGlobals = RING_ELT(Globals, globalRing, node); + Arena arena = GlobalsArena(arenaGlobals); + func(arena); + } +} + + +/* GlobalsCheck -- check the arena globals */ + +Bool GlobalsCheck(Globals arenaGlobals) +{ + Arena arena; + TraceId ti; + Trace trace; + Rank rank; + + CHECKS(Globals, arenaGlobals); + arena = GlobalsArena(arenaGlobals); + CHECKL(arena->serial < arenaSerial); + CHECKD_NOSIG(Ring, &arenaGlobals->globalRing); + + CHECKL(MPSVersion() == arenaGlobals->mpsVersionString); + + if (arenaGlobals->lock != NULL) + CHECKD_NOSIG(Lock, arenaGlobals->lock); + + /* no check possible on pollThreshold */ + CHECKL(BoolCheck(arenaGlobals->insidePoll)); + CHECKL(BoolCheck(arenaGlobals->clamped)); + CHECKL(arenaGlobals->fillMutatorSize >= 0.0); + CHECKL(arenaGlobals->emptyMutatorSize >= 0.0); + CHECKL(arenaGlobals->allocMutatorSize >= 0.0); + CHECKL(arenaGlobals->fillMutatorSize - arenaGlobals->emptyMutatorSize + >= arenaGlobals->allocMutatorSize); + CHECKL(arenaGlobals->fillInternalSize >= 0.0); + CHECKL(arenaGlobals->emptyInternalSize >= 0.0); + + CHECKL(BoolCheck(arenaGlobals->bufferLogging)); + CHECKD_NOSIG(Ring, &arenaGlobals->poolRing); + CHECKD_NOSIG(Ring, &arenaGlobals->rootRing); + CHECKD_NOSIG(Ring, &arena->formatRing); + CHECKD_NOSIG(Ring, &arena->messageRing); + if (arena->enabledMessageTypes != NULL) + CHECKD_NOSIG(BT, arena->enabledMessageTypes); + CHECKL(BoolCheck(arena->isFinalPool)); + if (arena->isFinalPool) { + CHECKD(Pool, arena->finalPool); + } else { + CHECKL(arena->finalPool == NULL); + } + + CHECKD_NOSIG(Ring, &arena->threadRing); + CHECKD_NOSIG(Ring, &arena->deadRing); + + CHECKD(Shield, ArenaShield(arena)); + + CHECKL(TraceSetCheck(arena->busyTraces)); + CHECKL(TraceSetCheck(arena->flippedTraces)); + CHECKL(TraceSetSuper(arena->busyTraces, arena->flippedTraces)); + + TRACE_SET_ITER(ti, trace, TraceSetUNIV, arena) + /* */ + if (TraceSetIsMember(arena->busyTraces, trace)) { + CHECKD(Trace, trace); + } else { + /* */ + CHECKL(trace->sig == SigInvalid); + } + /* */ + CHECKL(TraceIdMessagesCheck(arena, ti)); + TRACE_SET_ITER_END(ti, trace, TraceSetUNIV, arena); + + for(rank = RankMIN; rank < RankLIMIT; ++rank) + CHECKD_NOSIG(Ring, &arena->greyRing[rank]); + CHECKD_NOSIG(Ring, &arena->chainRing); + + CHECKL(arena->tracedWork >= 0.0); + CHECKL(arena->tracedTime >= 0.0); + /* no check for arena->lastWorldCollect (Clock) */ + + /* can't write a check for arena->epoch */ + CHECKD(History, ArenaHistory(arena)); + + /* we also check the statics now. */ + CHECKL(BoolCheck(arenaRingInit)); + /* Can't CHECKD_NOSIG here because &arenaRing is never NULL and GCC + * will warn about a constant comparison. */ + CHECKL(RingCheck(&arenaRing)); + + CHECKL(BoolCheck(arena->emergency)); + /* .emergency.invariant: There can only be an emergency when a trace + * is busy. */ + CHECKL(!arena->emergency || arena->busyTraces != TraceSetEMPTY); + + if (arenaGlobals->defaultChain != NULL) + CHECKD(Chain, arenaGlobals->defaultChain); + + /* can't check arena->stackWarm */ + + return TRUE; +} + + +/* GlobalsInit -- initialize the globals of the arena */ + +Res GlobalsInit(Globals arenaGlobals) +{ + Arena arena; + Rank rank; + TraceId ti; + + /* This is one of the first things that happens, */ + /* so check static consistency here. */ + AVER(MPMCheck()); + + arenaClaimRingLock(); + /* Ensure static things are initialized. */ + if (!arenaRingInit) { + /* there isn't an arena ring yet */ + /* */ + arenaRingInit = TRUE; + RingInit(&arenaRing); + arenaSerial = (Serial)0; + /* The setup functions call pthread_atfork (on the appropriate + platforms) and so must be called in the correct order. Here we + require the locks to be taken first in the "prepare" case and + released last in the "parent" and "child" cases. */ + ThreadSetup(); + ProtSetup(); + LockSetup(); + } + arena = GlobalsArena(arenaGlobals); + /* Ensure updates to arenaSerial do not race by doing the update + * while the ring lock is claimed. */ + arena->serial = arenaSerial; + ++ arenaSerial; + arenaReleaseRingLock(); + + RingInit(&arenaGlobals->globalRing); + + arenaGlobals->lock = NULL; + + arenaGlobals->pollThreshold = 0.0; + arenaGlobals->insidePoll = FALSE; + arenaGlobals->clamped = FALSE; + arenaGlobals->fillMutatorSize = 0.0; + arenaGlobals->emptyMutatorSize = 0.0; + arenaGlobals->allocMutatorSize = 0.0; + arenaGlobals->fillInternalSize = 0.0; + arenaGlobals->emptyInternalSize = 0.0; + + arenaGlobals->mpsVersionString = MPSVersion(); + arenaGlobals->bufferLogging = FALSE; + RingInit(&arenaGlobals->poolRing); + arenaGlobals->poolSerial = (Serial)0; + /* The system pools are: + 1. arena->freeCBSBlockPoolStruct + 2. arena->controlPoolStruct + 3. arena->controlPoolStruct.cbsBlockPoolStruct */ + arenaGlobals->systemPools = (Count)3; + RingInit(&arenaGlobals->rootRing); + arenaGlobals->rootSerial = (Serial)0; + + RingInit(&arena->threadRing); + RingInit(&arena->deadRing); + arena->threadSerial = (Serial)0; + RingInit(&arena->formatRing); + arena->formatSerial = (Serial)0; + RingInit(&arena->messageRing); + arena->enabledMessageTypes = NULL; + arena->droppedMessages = 0; + arena->isFinalPool = FALSE; + arena->finalPool = NULL; + arena->busyTraces = TraceSetEMPTY; /* */ + arena->flippedTraces = TraceSetEMPTY; /* */ + arena->tracedWork = 0.0; + arena->tracedTime = 0.0; + arena->lastWorldCollect = ClockNow(); + ShieldInit(ArenaShield(arena)); + + for (ti = 0; ti < TraceLIMIT; ++ti) { + /* */ + arena->trace[ti].sig = SigInvalid; + /* ti must be valid so that TraceSetIsMember etc. always work */ + arena->trace[ti].ti = ti; + /* */ + arena->tsMessage[ti] = NULL; + arena->tMessage[ti] = NULL; + } + + for(rank = RankMIN; rank < RankLIMIT; ++rank) + RingInit(&arena->greyRing[rank]); + RingInit(&arena->chainRing); + + HistoryInit(ArenaHistory(arena)); + + arena->emergency = FALSE; + + arena->stackWarm = NULL; + + arenaGlobals->defaultChain = NULL; + + arenaGlobals->sig = GlobalsSig; + AVERT(Globals, arenaGlobals); + return ResOK; +} + + +/* GlobalsCompleteCreate -- complete creating the globals of the arena + * + * This is like the final initializations in a Create method, except + * there's no separate GlobalsCreate. */ + +Res GlobalsCompleteCreate(Globals arenaGlobals) +{ + Arena arena; + Res res; + void *p; + TraceId ti; + Trace trace; + + AVERT(Globals, arenaGlobals); + arena = GlobalsArena(arenaGlobals); + + /* initialize the message stuff, */ + { + void *v; + + res = ControlAlloc(&v, arena, BTSize(MessageTypeLIMIT)); + if (res != ResOK) + return res; + arena->enabledMessageTypes = v; + BTResRange(arena->enabledMessageTypes, 0, MessageTypeLIMIT); + } + + TRACE_SET_ITER(ti, trace, TraceSetUNIV, arena) + /* */ + res = TraceIdMessagesCreate(arena, ti); + if(res != ResOK) + return res; + TRACE_SET_ITER_END(ti, trace, TraceSetUNIV, arena); + + res = ControlAlloc(&p, arena, LockSize()); + if (res != ResOK) + return res; + arenaGlobals->lock = (Lock)p; + LockInit(arenaGlobals->lock); + + /* Create the arena's default generation chain. */ + { + GenParamStruct params[] = ChainDEFAULT; + res = ChainCreate(&arenaGlobals->defaultChain, arena, NELEMS(params), params); + if (res != ResOK) + goto failChainCreate; + } + + /* Label generations in default generation chain, for telemetry. */ + { + Chain chain = arenaGlobals->defaultChain; + char label[] = "DefGen-0"; + char *gen_index = &label[(sizeof label) - 2]; + size_t i; + AVER(*gen_index == '0'); + for (i = 0; i < chain->genCount; ++i) { + *gen_index = "0123456789ABCDEF"[i % 16]; + EventLabelPointer(&chain->gens[i], EventInternString(label)); + } + } + + arenaAnnounce(arena); + + return ResOK; + +failChainCreate: + return res; +} + + +/* GlobalsFinish -- finish the globals of the arena */ + +void GlobalsFinish(Globals arenaGlobals) +{ + Arena arena; + Rank rank; + + arena = GlobalsArena(arenaGlobals); + AVERT(Globals, arenaGlobals); + + arenaGlobals->sig = SigInvalid; + + ShieldFinish(ArenaShield(arena)); + HistoryFinish(ArenaHistory(arena)); + RingFinish(&arena->formatRing); + RingFinish(&arena->chainRing); + RingFinish(&arena->messageRing); + RingFinish(&arena->threadRing); + RingFinish(&arena->deadRing); + for(rank = RankMIN; rank < RankLIMIT; ++rank) + RingFinish(&arena->greyRing[rank]); + RingFinish(&arenaGlobals->rootRing); + RingFinish(&arenaGlobals->poolRing); + RingFinish(&arenaGlobals->globalRing); +} + +/* GlobalsPrepareToDestroy -- prepare to destroy the globals of the arena + * + * This is like the final initializations in a Destroy method, except + * there's no separate GlobalsDestroy. */ + +void GlobalsPrepareToDestroy(Globals arenaGlobals) +{ + Arena arena; + TraceId ti; + Trace trace; + Chain defaultChain; + Rank rank; + + AVERT(Globals, arenaGlobals); + + /* Park the arena before destroying the default chain, to ensure + * that there are no traces using that chain. */ + ArenaPark(arenaGlobals); + + arena = GlobalsArena(arenaGlobals); + + arenaDenounce(arena); + + defaultChain = arenaGlobals->defaultChain; + arenaGlobals->defaultChain = NULL; + ChainDestroy(defaultChain); + + LockRelease(arenaGlobals->lock); + /* Theoretically, another thread could grab the lock here, but it's */ + /* not worth worrying about, since an attempt after the lock has been */ + /* destroyed would lead to a crash just the same. */ + LockFinish(arenaGlobals->lock); + arenaGlobals->lock = NULL; + + TRACE_SET_ITER(ti, trace, TraceSetUNIV, arena) + /* */ + TraceIdMessagesDestroy(arena, ti); + TRACE_SET_ITER_END(ti, trace, TraceSetUNIV, arena); + + /* report dropped messages */ + if(arena->droppedMessages > 0) + EVENT1(MessagesDropped, arena->droppedMessages); + + /* .message.queue.empty: Empty the queue of messages before */ + /* proceeding to finish the arena. It is important that this */ + /* is done before destroying the finalization pool as otherwise */ + /* the message queue would have dangling pointers to messages */ + /* whose memory has been unmapped. */ + if(MessagePoll(arena)) + EVENT0(MessagesExist); + MessageEmpty(arena); + + /* throw away the BT used by messages */ + if (arena->enabledMessageTypes != NULL) { + ControlFree(arena, (void *)arena->enabledMessageTypes, + BTSize(MessageTypeLIMIT)); + arena->enabledMessageTypes = NULL; + } + + /* destroy the final pool */ + if (arena->isFinalPool) { + /* All this subtlety is because PoolDestroy will call */ + /* ArenaCheck several times. The invariant on finalPool */ + /* and isFinalPool should hold before, after, and during */ + /* the PoolDestroy call */ + Pool pool = arena->finalPool; + + arena->isFinalPool = FALSE; + arena->finalPool = NULL; + PoolDestroy(pool); + } + + ShieldDestroyQueue(ArenaShield(arena), arena); + + /* Check that the tear-down is complete: that the client has + * destroyed all data structures associated with the arena. We do + * this here rather than in GlobalsFinish because by the time that + * is called, the control pool has been destroyed and so the address + * space containing all these rings has potentially been unmapped, + * and so RingCheck dereferences a pointer into that unmapped memory + * and we get a crash instead of an assertion. See job000652. + */ + AVER(RingIsSingle(&arena->formatRing)); /* */ + AVER(RingIsSingle(&arena->chainRing)); /* */ + AVER(RingIsSingle(&arena->messageRing)); + AVER(RingIsSingle(&arena->threadRing)); /* */ + AVER(RingIsSingle(&arena->deadRing)); + AVER(RingIsSingle(&arenaGlobals->rootRing)); /* */ + for(rank = RankMIN; rank < RankLIMIT; ++rank) + AVER(RingIsSingle(&arena->greyRing[rank])); + AVER(RingLength(&arenaGlobals->poolRing) == arenaGlobals->systemPools); /* */ +} + + +/* ArenaEnter -- enter the state where you can look at the arena */ + +void ArenaEnter(Arena arena) +{ + ArenaEnterLock(arena, FALSE); +} + +/* The recursive argument specifies whether to claim the lock + recursively or not. */ +void ArenaEnterLock(Arena arena, Bool recursive) +{ + Lock lock; + + /* This check is safe to do outside the lock. Unless the client + is also calling ArenaDestroy, but that's a protocol violation by + the client if so. */ + AVER(TESTT(Arena, arena)); + + /* It's critical that the stack probe is outside the lock, because + * the stack probe may cause arbitrary code to run (via a signal or + * exception handler) and that code may enter the MPS. If we took + * the lock first then this would deadlock. */ + StackProbe(StackProbeDEPTH); + lock = ArenaGlobals(arena)->lock; + if(recursive) { + LockClaimRecursive(lock); + } else { + LockClaim(lock); + } + AVERT(Arena, arena); /* can't AVERT it until we've got the lock */ + if(recursive) { + /* already in shield */ + } else { + ShieldEnter(arena); + } +} + +/* Same as ArenaEnter, but for the few functions that need to be + reentrant with respect to some part of the MPS. + For example, mps_arena_has_addr. */ + +void ArenaEnterRecursive(Arena arena) +{ + ArenaEnterLock(arena, TRUE); +} + +/* ArenaLeave -- leave the state where you can look at MPM data structures */ + +void ArenaLeave(Arena arena) +{ + AVERT(Arena, arena); + ArenaLeaveLock(arena, FALSE); +} + +void ArenaLeaveLock(Arena arena, Bool recursive) +{ + Lock lock; + + AVERT(Arena, arena); + + lock = ArenaGlobals(arena)->lock; + + if(recursive) { + /* no need to leave shield */ + } else { + ShieldLeave(arena); + } + ProtSync(arena); /* */ + if(recursive) { + LockReleaseRecursive(lock); + } else { + LockRelease(lock); + } +} + +void ArenaLeaveRecursive(Arena arena) +{ + ArenaLeaveLock(arena, TRUE); +} + +Bool ArenaBusy(Arena arena) +{ + return LockIsHeld(ArenaGlobals(arena)->lock); +} + + +/* ArenaAccess -- deal with an access fault + * + * This is called when a protected address is accessed. The mode + * corresponds to which mode flags need to be cleared in order for the + * access to continue. */ + +Bool ArenaAccess(Addr addr, AccessSet mode, MutatorContext context) +{ + Seg seg; + Ring node, nextNode; + Res res; + + arenaClaimRingLock(); /* */ + AVERT(Ring, &arenaRing); + + RING_FOR(node, &arenaRing, nextNode) { + Globals arenaGlobals = RING_ELT(Globals, globalRing, node); + Arena arena = GlobalsArena(arenaGlobals); + Root root; + + ArenaEnter(arena); /* */ + EVENT3(ArenaAccessBegin, arena, addr, mode); + + /* @@@@ The code below assumes that Roots and Segs are disjoint. */ + /* It will fall over (in TraceSegAccess probably) if there is a */ + /* protected root on a segment. */ + /* It is possible to overcome this restriction. */ + if (SegOfAddr(&seg, arena, addr)) { + arenaReleaseRingLock(); + /* An access in a different thread (or even in the same thread, + * via a signal or exception handler) may have already caused + * the protection to be cleared. This avoids calling TraceAccess + * on protection that has already been cleared on a separate + * thread. */ + mode &= SegPM(seg); + if (mode != AccessSetEMPTY) { + res = SegAccess(seg, arena, addr, mode, context); + AVER(res == ResOK); /* Mutator can't continue unless this succeeds */ + } else { + /* Protection was already cleared, for example by another thread + or a fault in a nested exception handler: nothing to do now. */ + } + EVENT1(ArenaAccessEnd, arena); + ArenaLeave(arena); + return TRUE; + } else if (RootOfAddr(&root, arena, addr)) { + arenaReleaseRingLock(); + mode &= RootPM(root); + if (mode != AccessSetEMPTY) + RootAccess(root, mode); + EVENT1(ArenaAccessEnd, arena); + ArenaLeave(arena); + return TRUE; + } else { + /* No segment or root was found at the address: this must mean + * that activity in another thread (or even in the same thread, + * via a signal or exception handler) caused the segment or root + * to go away. So there's nothing to do now. */ + EVENT1(ArenaAccessEnd, arena); + ArenaLeave(arena); + } + } + + arenaReleaseRingLock(); + return FALSE; +} + + +/* ArenaPoll -- trigger periodic actions + * + * Poll all background activities to see if they need to do anything. + * ArenaPoll does nothing if the amount of committed memory is less than + * the arena poll threshold. This means that actions are taken as the + * memory demands increase. + * + * @@@@ This is where time is "stolen" from the mutator in addition + * to doing what it asks and servicing accesses. This is where the + * amount of time should be controlled, perhaps by passing time + * limits to the various other activities. + * + * @@@@ Perhaps this should be based on a process table rather than a + * series of manual steps for looking around. This might be worthwhile + * if we introduce background activities other than tracing. */ + +void (ArenaPoll)(Globals globals) +{ + Arena arena; + Clock start; + Bool worldCollected = FALSE; + Bool moreWork, workWasDone = FALSE; + Work tracedWork; + + AVERT(Globals, globals); + + if (globals->clamped) + return; + if (globals->insidePoll) + return; + arena = GlobalsArena(globals); + if (!PolicyPoll(arena)) + return; + + globals->insidePoll = TRUE; + + /* fillMutatorSize has advanced; call TracePoll enough to catch up. */ + start = ClockNow(); + + EVENT1(ArenaPollBegin, arena); + + do { + moreWork = TracePoll(&tracedWork, &worldCollected, globals, + !worldCollected); + if (moreWork) { + workWasDone = TRUE; + } + } while (PolicyPollAgain(arena, start, moreWork, tracedWork)); + + /* Don't count time spent checking for work, if there was no work to do. */ + if (workWasDone) { + ArenaAccumulateTime(arena, start, ClockNow()); + } + + EVENT2(ArenaPollEnd, arena, BOOLOF(workWasDone)); + + globals->insidePoll = FALSE; +} + + +/* ArenaStep -- use idle time for collection work */ + +Bool ArenaStep(Globals globals, double interval, double multiplier) +{ + Bool workWasDone = FALSE; + Clock start, intervalEnd, availableEnd, now; + Clock clocks_per_sec; + Arena arena; + + AVERT(Globals, globals); + AVER(interval >= 0.0); + AVER(multiplier >= 0.0); + + arena = GlobalsArena(globals); + clocks_per_sec = ClocksPerSec(); + + start = now = ClockNow(); + intervalEnd = start + (Clock)(interval * (double)clocks_per_sec); + AVER(intervalEnd >= start); + availableEnd = start + (Clock)(interval * multiplier * (double)clocks_per_sec); + AVER(availableEnd >= start); + + /* loop while there is work to do and time on the clock. */ + do { + Trace trace; + if (arena->busyTraces != TraceSetEMPTY) { + trace = ArenaTrace(arena, (TraceId)0); + } else { + /* No traces are running: consider collecting the world. */ + if (PolicyShouldCollectWorld(arena, (double)(availableEnd - now), now, + clocks_per_sec)) + { + Res res; + res = TraceStartCollectAll(&trace, arena, TraceStartWhyOPPORTUNISM); + if (res != ResOK) + break; + arena->lastWorldCollect = now; + } else { + /* Not worth collecting the world; consider starting a trace. */ + Bool worldCollected; + if (!PolicyStartTrace(&trace, &worldCollected, arena, FALSE)) + break; + } + } + TraceAdvance(trace); + if (trace->state == TraceFINISHED) + TraceDestroyFinished(trace); + workWasDone = TRUE; + now = ClockNow(); + } while (now < intervalEnd); + + if (workWasDone) { + ArenaAccumulateTime(arena, start, now); + } + + return workWasDone; +} + +/* ArenaFinalize -- registers an object for finalization + * + * . */ + +Res ArenaFinalize(Arena arena, Ref obj) +{ + Res res; + Pool refpool; + + AVERT(Arena, arena); + AVER(PoolOfAddr(&refpool, arena, (Addr)obj)); + AVER(PoolHasAttr(refpool, AttrGC)); + + if (!arena->isFinalPool) { + Pool finalpool; + + res = PoolCreate(&finalpool, arena, PoolClassMRG(), argsNone); + if (res != ResOK) + return res; + arena->finalPool = finalpool; + arena->isFinalPool = TRUE; + } + + res = MRGRegister(arena->finalPool, obj); + return res; +} + + +/* ArenaDefinalize -- removes one finalization registration of an object + * + * . */ + +Res ArenaDefinalize(Arena arena, Ref obj) +{ + Res res; + + AVERT(Arena, arena); + AVER(ArenaHasAddr(arena, (Addr)obj)); + + if (!arena->isFinalPool) { + return ResFAIL; + } + res = MRGDeregister(arena->finalPool, obj); + return res; +} + + +/* ArenaPeek -- read a single reference, possibly through a barrier */ + +Ref ArenaPeek(Arena arena, Ref *p) +{ + Seg seg; + Ref ref; + + AVERT(Arena, arena); + /* Can't check p as it is arbitrary */ + + if (SegOfAddr(&seg, arena, (Addr)p)) + ref = ArenaPeekSeg(arena, seg, p); + else + ref = *p; + return ref; +} + +/* ArenaPeekSeg -- as ArenaPeek, but p must be in seg. */ + +Ref ArenaPeekSeg(Arena arena, Seg seg, Ref *p) +{ + Ref ref; + Rank rank; + + AVERT(Arena, arena); + AVERT(Seg, seg); + AVER(PoolArena(SegPool(seg)) == arena); + AVER(SegBase(seg) <= (Addr)p); + AVER((Addr)p < SegLimit(seg)); + /* TODO: Consider checking p's alignment using seg->pool->alignment */ + + /* .read.flipped: We AVER that the reference that we are reading */ + /* refers to an object for which all the traces that the object is */ + /* white for are also flipped. This is because we don't have any */ + /* write-barrier (in the sense of write-barrier collectors) */ + /* mechanism in place for reading (strictly speaking, writing */ + /* it somewhere after having read it) references that are white. */ + AVER(TraceSetSub(SegWhite(seg), arena->flippedTraces)); + + /* .read.conservative: Scan according to rank phase-of-trace, */ + /* See */ + /* If the segment isn't grey it doesn't need scanning, and in fact it + would be wrong to even ask what rank to scan it at, since there might + not be any traces running. */ + if (TraceSetInter(SegGrey(seg), arena->flippedTraces) != TraceSetEMPTY) { + rank = TraceRankForAccess(arena, seg); + TraceScanSingleRef(arena->flippedTraces, rank, arena, seg, p); + } + + /* We don't need to update the Seg Summary as in PoolSingleAccess + * because we are not changing it after it has been scanned. */ + + ShieldExpose(arena, seg); + ref = *p; + ShieldCover(arena, seg); + return ref; +} + +/* ArenaPoke -- write a single reference, possibly through a barrier */ + +void ArenaPoke(Arena arena, Ref *p, Ref ref) +{ + Seg seg; + + AVERT(Arena, arena); + /* Can't check p as it is arbitrary */ + /* Can't check ref as it is arbitrary */ + + if (SegOfAddr(&seg, arena, (Addr)p)) + ArenaPokeSeg(arena, seg, p, ref); + else + *p = ref; +} + +/* ArenaPokeSeg -- as ArenaPoke, but p must be in seg. */ + +void ArenaPokeSeg(Arena arena, Seg seg, Ref *p, Ref ref) +{ + RefSet summary; + + AVERT(Arena, arena); + AVERT(Seg, seg); + AVER(PoolArena(SegPool(seg)) == arena); + AVER(SegBase(seg) <= (Addr)p); + AVER((Addr)p < SegLimit(seg)); + /* TODO: Consider checking p's alignment using seg->pool->alignment */ + /* ref is arbitrary and can't be checked */ + + ShieldExpose(arena, seg); + *p = ref; + summary = SegSummary(seg); + summary = RefSetAdd(arena, summary, (Addr)ref); + SegSetSummary(seg, summary); + ShieldCover(arena, seg); +} + +/* ArenaRead -- like ArenaPeek, but reference known to be owned by arena */ + +Ref ArenaRead(Arena arena, Ref *p) +{ + Bool b; + Seg seg = NULL; /* suppress "may be used uninitialized" */ + + AVERT(Arena, arena); + + b = SegOfAddr(&seg, arena, (Addr)p); + AVER(b == TRUE); + + return ArenaPeekSeg(arena, seg, p); +} + +/* ArenaWrite -- like ArenaPoke, but reference known to be owned by arena */ + +void ArenaWrite(Arena arena, Ref *p, Ref ref) +{ + Bool b; + Seg seg = NULL; /* suppress "may be used uninitialized" */ + + AVERT(Arena, arena); + + b = SegOfAddr(&seg, arena, (Addr)p); + AVER(b == TRUE); + + ArenaPokeSeg(arena, seg, p, ref); +} + + +/* GlobalsDescribe -- describe the arena globals */ + +Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth) +{ + Res res; + Arena arena; + Ring node, nextNode; + TraceId ti; + Trace trace; + + if (!TESTT(Globals, arenaGlobals)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, "Globals\n", NULL); + if (res != ResOK) + return res; + + arena = GlobalsArena(arenaGlobals); + res = WriteF(stream, depth + 2, + "mpsVersion $S\n", (WriteFS)arenaGlobals->mpsVersionString, + "lock $P\n", (WriteFP)arenaGlobals->lock, + "pollThreshold $U\n", (WriteFU)arenaGlobals->pollThreshold, + arenaGlobals->insidePoll ? "inside" : "outside", " poll\n", + arenaGlobals->clamped ? "clamped\n" : "released\n", + "fillMutatorSize $U\n", (WriteFU)arenaGlobals->fillMutatorSize, + "emptyMutatorSize $U\n", (WriteFU)arenaGlobals->emptyMutatorSize, + "allocMutatorSize $U\n", (WriteFU)arenaGlobals->allocMutatorSize, + "fillInternalSize $U\n", (WriteFU)arenaGlobals->fillInternalSize, + "emptyInternalSize $U\n", (WriteFU)arenaGlobals->emptyInternalSize, + "poolSerial $U\n", (WriteFU)arenaGlobals->poolSerial, + "rootSerial $U\n", (WriteFU)arenaGlobals->rootSerial, + "formatSerial $U\n", (WriteFU)arena->formatSerial, + "threadSerial $U\n", (WriteFU)arena->threadSerial, + "busyTraces $B\n", (WriteFB)arena->busyTraces, + "flippedTraces $B\n", (WriteFB)arena->flippedTraces, + NULL); + if (res != ResOK) + return res; + + res = HistoryDescribe(ArenaHistory(arena), stream, depth + 2); + if (res != ResOK) + return res; + + res = ShieldDescribe(ArenaShield(arena), stream, depth + 2); + if (res != ResOK) + return res; + + res = RootsDescribe(arenaGlobals, stream, depth + 2); + if (res != ResOK) + return res; + + RING_FOR(node, &arenaGlobals->poolRing, nextNode) { + Pool pool = RING_ELT(Pool, arenaRing, node); + res = PoolDescribe(pool, stream, depth + 2); + if (res != ResOK) + return res; + } + + RING_FOR(node, &arena->formatRing, nextNode) { + Format format = RING_ELT(Format, arenaRing, node); + res = FormatDescribe(format, stream, depth + 2); + if (res != ResOK) + return res; + } + + RING_FOR(node, &arena->threadRing, nextNode) { + Thread thread = ThreadRingThread(node); + res = ThreadDescribe(thread, stream, depth + 2); + if (res != ResOK) + return res; + } + + RING_FOR(node, &arena->chainRing, nextNode) { + Chain chain = RING_ELT(Chain, chainRing, node); + res = ChainDescribe(chain, stream, depth + 2); + if (res != ResOK) + return res; + } + + TRACE_SET_ITER(ti, trace, TraceSetUNIV, arena) + if (TraceSetIsMember(arena->busyTraces, trace)) { + res = TraceDescribe(trace, stream, depth + 2); + if (res != ResOK) + return res; + } + TRACE_SET_ITER_END(ti, trace, TraceSetUNIV, arena); + + /* @@@@ What about grey rings? */ + return res; +} + + +/* ArenaSetEmergency -- move the arena into emergency mode + * + * Emergency mode is set when garbage collection cannot make progress because + * it can't allocate memory. + * + * Emergency mode affects the choice of PoolFixMethod in new ScanStates. + * See ScanStateInit. + * + * If the traces aren't normal GC traces, and have their fix method + * set to something other than PoolFix, then this won't affect the choice + * of fix method in ScanStateInit and so won't have any effect. Whatever + * caused the first failure will likely repeat. + */ + +void ArenaSetEmergency(Arena arena, Bool emergency) +{ + AVERT(Arena, arena); + AVERT(Bool, emergency); + + EVENT2(ArenaSetEmergency, arena, BOOLOF(emergency)); + + arena->emergency = emergency; +} + +Bool ArenaEmergency(Arena arena) +{ + AVERT(Arena, arena); + return arena->emergency; +} + + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/gp.gmk b/mps/code/gp.gmk new file mode 100644 index 00000000000..1b22a9fd55f --- /dev/null +++ b/mps/code/gp.gmk @@ -0,0 +1,63 @@ +# -*- makefile -*- +# +# gp.gmk: GNUMAKEFILE FRAGMENT FOR GNU CC/GPROF +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# This file is included by platform makefiles that use the GNU CC +# compiler with gprof. It defines the compiler specific variables +# that the common makefile fragment () requires. + + +CC = gcc +CFLAGSCOMPILER = \ + -ansi -pedantic -Wall -Wextra -Werror -Wpointer-arith \ + -Wstrict-prototypes -Wmissing-prototypes \ + -Winline -Waggregate-return -Wnested-externs \ + -Wcast-qual -Wshadow -Wwrite-strings -pg +CFLAGSDEBUG = -g -ggdb3 +CFLAGSOPT = -O -g -ggdb3 + +# gcc -MM generates a dependency line of the form: +# thing.o : thing.c ... +# The sed line converts this into: +# //thing.o //thing.d : thing.c ... +# @@ This sequence is vulnerable to interrupts (for some reason) + +define gendep + $(SHELL) -ec "gcc -c $(CFLAGS) -MM $< | \ + sed '/:/s!$*.o!$(@D)/& $(@D)/$*.d!' > $@" +endef + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/land.c b/mps/code/land.c new file mode 100644 index 00000000000..6d7a0ae00d8 --- /dev/null +++ b/mps/code/land.c @@ -0,0 +1,646 @@ +/* land.c: LAND (COLLECTION OF ADDRESS RANGES) IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .design: + * + * .critical.macros: In manual-allocation-bound programs using MVFF, + * the Land generic functions are on the critical path via mps_free. + * In non-checking varieties we provide macro alternatives (in mpm.h) + * to these functions that call the underlying methods directly, + * giving a few percent improvement in performance but skipping the + * re-entrancy checking provided by landEnter and landLeave. + */ + +#include "mpm.h" +#include "range.h" + +SRCID(land, "$Id$"); + + +/* Forward declarations */ + +static Res landNoInsert(Range rangeReturn, Land land, Range range); +static Res landNoDelete(Range rangeReturn, Land land, Range range); + + +/* FindDeleteCheck -- check method for a FindDelete value */ + +Bool FindDeleteCheck(FindDelete findDelete) +{ + CHECKL(findDelete == FindDeleteNONE + || findDelete == FindDeleteLOW + || findDelete == FindDeleteHIGH + || findDelete == FindDeleteENTIRE); + UNUSED(findDelete); /* */ + + return TRUE; +} + + +/* landEnter, landLeave -- Avoid re-entrance + * + * .enter-leave: The visitor functions passed to LandIterate and + * LandIterateAndDelete are not allowed to call methods of that land. + * These functions enforce this. + * + * .enter-leave.simple: Some simple queries are fine to call from + * visitor functions. These are marked with the tag of this comment. + */ + +static void landEnter(Land land) +{ + /* Don't need to check as always called from interface function. */ + AVER(!land->inLand); + land->inLand = TRUE; +} + +static void landLeave(Land land) +{ + /* Don't need to check as always called from interface function. */ + AVER(land->inLand); + land->inLand = FALSE; +} + + +/* LandCheck -- check land */ + +Bool LandCheck(Land land) +{ + LandClass klass; + /* .enter-leave.simple */ + CHECKS(Land, land); + CHECKC(Land, land); + klass = ClassOfPoly(Land, land); + CHECKD(LandClass, klass); + CHECKU(Arena, land->arena); + CHECKL(AlignCheck(land->alignment)); + CHECKL(BoolCheck(land->inLand)); + return TRUE; +} + +static Res LandAbsInit(Land land, Arena arena, Align alignment, ArgList args) +{ + AVER(land != NULL); + AVERT(Arena, arena); + AVERT(Align, alignment); + UNUSED(args); + + /* Superclass init */ + InstInit(CouldBeA(Inst, land)); + + land->inLand = TRUE; + land->alignment = alignment; + land->arena = arena; + + SetClassOfPoly(land, CLASS(Land)); + land->sig = LandSig; + AVERC(Land, land); + + return ResOK; +} + +static void LandAbsFinish(Inst inst) +{ + Land land = MustBeA(Land, inst); + AVERC(Land, land); + land->sig = SigInvalid; + NextMethod(Inst, Land, finish)(inst); +} + + +/* LandInit -- initialize land + * + * + */ + +Res LandInit(Land land, LandClass klass, Arena arena, Align alignment, void *owner, ArgList args) +{ + Res res; + + AVER(land != NULL); + AVERT(LandClass, klass); + AVERT(Align, alignment); + + res = klass->init(land, arena, alignment, args); + if (res != ResOK) + return res; + + EVENT2(LandInit, land, owner); + landLeave(land); + return ResOK; +} + + +/* LandFinish -- finish land + * + * + */ + +void LandFinish(Land land) +{ + AVERC(Land, land); + landEnter(land); + + Method(Inst, land, finish)(MustBeA(Inst, land)); +} + + +/* LandSize -- return the total size of ranges in land + * + * + */ + +Size (LandSize)(Land land) +{ + /* .enter-leave.simple */ + AVERC(Land, land); + + return LandSizeMacro(land); +} + + +/* LandInsert -- insert range of addresses into land + * + * + */ + +Res (LandInsert)(Range rangeReturn, Land land, Range range) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVERT(Range, range); + AVER(RangeIsAligned(range, land->alignment)); + AVER(!RangeIsEmpty(range)); + landEnter(land); + + res = LandInsertMacro(rangeReturn, land, range); + + landLeave(land); + return res; +} + + +/* LandInsertSteal -- insert range of addresses into land, possibly + * stealing some of the inserted memory to allocate internal data + * structures. + * + * + */ + +Res LandInsertSteal(Range rangeReturn, Land land, Range rangeIO) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVER(rangeReturn != rangeIO); + AVERT(Range, rangeIO); + AVER(RangeIsAligned(rangeIO, land->alignment)); + AVER(!RangeIsEmpty(rangeIO)); + + landEnter(land); + + res = Method(Land, land, insertSteal)(rangeReturn, land, rangeIO); + + landLeave(land); + return res; +} + + +/* LandDelete -- delete range of addresses from land + * + * + */ + +Res (LandDelete)(Range rangeReturn, Land land, Range range) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVERT(Range, range); + AVER(!RangeIsEmpty(range)); + AVER(RangeIsAligned(range, land->alignment)); + landEnter(land); + + res = LandDeleteMacro(rangeReturn, land, range); + + landLeave(land); + return res; +} + + +/* LandDeleteSteal -- delete range of addresses from land, possibly + * stealing some memory from the land to allocate internal data + * structures. + * + * + */ + +Res LandDeleteSteal(Range rangeReturn, Land land, Range range) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVERT(Range, range); + AVER(!RangeIsEmpty(range)); + AVER(RangeIsAligned(range, land->alignment)); + landEnter(land); + + res = Method(Land, land, deleteSteal)(rangeReturn, land, range); + + landLeave(land); + return res; +} + + +/* LandIterate -- iterate over isolated ranges of addresses in land + * + * + */ + +Bool (LandIterate)(Land land, LandVisitor visitor, void *closure) +{ + Bool b; + AVERC(Land, land); + AVER(FUNCHECK(visitor)); + landEnter(land); + + b = LandIterateMacro(land, visitor, closure); + + landLeave(land); + return b; +} + + +/* LandIterateAndDelete -- iterate over isolated ranges of addresses + * in land, deleting some of them + * + * + */ + +Bool (LandIterateAndDelete)(Land land, LandDeleteVisitor visitor, void *closure) +{ + Bool b; + AVERC(Land, land); + AVER(FUNCHECK(visitor)); + landEnter(land); + + b = LandIterateAndDeleteMacro(land, visitor, closure); + + landLeave(land); + return b; +} + + +/* LandFindFirst -- find first range of given size + * + * + */ + +Bool (LandFindFirst)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVERT(FindDelete, findDelete); + landEnter(land); + + b = LandFindFirstMacro(rangeReturn, oldRangeReturn, land, size, findDelete); + + landLeave(land); + return b; +} + + +/* LandFindLast -- find last range of given size + * + * + */ + +Bool (LandFindLast)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVERT(FindDelete, findDelete); + landEnter(land); + + b = LandFindLastMacro(rangeReturn, oldRangeReturn, land, size, findDelete); + + landLeave(land); + return b; +} + + +/* LandFindLargest -- find largest range of at least given size + * + * + */ + +Bool (LandFindLargest)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVERT(FindDelete, findDelete); + landEnter(land); + + b = LandFindLargestMacro(rangeReturn, oldRangeReturn, land, size, findDelete); + + landLeave(land); + return b; +} + + +/* LandFindInSize -- find range of given size in set of zones + * + * + */ + +Res (LandFindInZones)(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + Res res; + + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + /* AVER(ZoneSet, zoneSet); */ + AVERT(Bool, high); + landEnter(land); + + res = LandFindInZonesMacro(foundReturn, rangeReturn, oldRangeReturn, + land, size, zoneSet, high); + + landLeave(land); + return res; +} + + +/* LandDescribe -- describe land for debugging + * + * + */ + +Res LandDescribe(Land land, mps_lib_FILE *stream, Count depth) +{ + return Method(Inst, land, describe)(MustBeA(Inst, land), stream, depth); +} + + +/* landFlushVisitor -- visitor for LandFlush. + * + * closure argument is the destination Land. Attempt to insert the + * range into the destination. + * + * .flush.critical: In manual-allocation-bound programs using MVFF + * this is on the critical paths via mps_alloc (and then PoolAlloc, + * MVFFAlloc, failoverFind*, LandFlush) and mps_free (and then + * MVFFFree, failoverInsert, LandFlush). + */ +Bool LandFlushVisitor(Bool *deleteReturn, Land land, Range range, + void *closure) +{ + Res res; + RangeStruct newRange; + Land dest; + + AVER_CRITICAL(deleteReturn != NULL); + AVERC_CRITICAL(Land, land); + AVERT_CRITICAL(Range, range); + AVER_CRITICAL(closure != NULL); + + dest = MustBeA_CRITICAL(Land, closure); + res = LandInsert(&newRange, dest, range); + if (res == ResOK) { + *deleteReturn = TRUE; + return TRUE; + } else { + *deleteReturn = FALSE; + return FALSE; + } +} + + +/* LandFlush -- move ranges from src to dest + * + * + */ + +Bool (LandFlush)(Land dest, Land src) +{ + AVERC(Land, dest); + AVERC(Land, src); + + return LandFlushMacro(dest, src); +} + + +/* LandClassCheck -- check land class */ + +Bool LandClassCheck(LandClass klass) +{ + CHECKL(InstClassCheck(&klass->instClassStruct)); + CHECKL(klass->size >= sizeof(LandStruct)); + CHECKL(FUNCHECK(klass->init)); + CHECKL(FUNCHECK(klass->insert)); + CHECKL(FUNCHECK(klass->delete)); + CHECKL(FUNCHECK(klass->findFirst)); + CHECKL(FUNCHECK(klass->findLast)); + CHECKL(FUNCHECK(klass->findLargest)); + CHECKL(FUNCHECK(klass->findInZones)); + + /* Check that land classes override sets of related methods. */ + CHECKL((klass->init == LandAbsInit) + == (klass->instClassStruct.finish == LandAbsFinish)); + CHECKL((klass->insert == landNoInsert) == (klass->delete == landNoDelete)); + + CHECKS(LandClass, klass); + return TRUE; +} + + +static Size landNoSize(Land land) +{ + UNUSED(land); + NOTREACHED; + return 0; +} + +/* LandSlowSize -- generic size method but slow */ + +static Bool landSizeVisitor(Land land, Range range, + void *closure) +{ + Size *size; + + AVERC(Land, land); + AVERT(Range, range); + AVER(closure != NULL); + + size = closure; + *size += RangeSize(range); + + return TRUE; +} + +Size LandSlowSize(Land land) +{ + Size size = 0; + Bool b = LandIterate(land, landSizeVisitor, &size); + AVER(b); + return size; +} + +static Res landNoInsert(Range rangeReturn, Land land, Range range) +{ + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVERT(Range, range); + return ResUNIMPL; +} + +static Res landNoDelete(Range rangeReturn, Land land, Range range) +{ + AVER(rangeReturn != NULL); + AVERC(Land, land); + AVERT(Range, range); + return ResUNIMPL; +} + +static Bool landNoIterate(Land land, LandVisitor visitor, void *closure) +{ + AVERC(Land, land); + AVER(visitor != NULL); + UNUSED(closure); + return FALSE; +} + +static Bool landNoIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closure) +{ + AVERC(Land, land); + AVER(visitor != NULL); + UNUSED(closure); + return FALSE; +} + +static Bool landNoFind(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + UNUSED(size); + AVERT(FindDelete, findDelete); + return ResUNIMPL; +} + +static Res landNoFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERC(Land, land); + UNUSED(size); + UNUSED(zoneSet); + AVERT(Bool, high); + return ResUNIMPL; +} + +static Res LandAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Land land = CouldBeA(Land, inst); + LandClass klass; + Res res; + + if (!TESTC(Land, land)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, Land, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + klass = ClassOfPoly(Land, land); + return WriteF(stream, depth + 2, + "class $P (\"$S\")\n", + (WriteFP)klass, (WriteFS)ClassName(klass), + "arena $P\n", (WriteFP)land->arena, + "align $U\n", (WriteFU)land->alignment, + "inLand $S\n", WriteFYesNo(land->inLand), + NULL); +} + +DEFINE_CLASS(Inst, LandClass, klass) +{ + INHERIT_CLASS(klass, LandClass, InstClass); + AVERT(InstClass, klass); +} + +DEFINE_CLASS(Land, Land, klass) +{ + INHERIT_CLASS(&klass->instClassStruct, Land, Inst); + klass->instClassStruct.describe = LandAbsDescribe; + klass->instClassStruct.finish = LandAbsFinish; + klass->size = sizeof(LandStruct); + klass->init = LandAbsInit; + klass->sizeMethod = landNoSize; + klass->insert = landNoInsert; + klass->insertSteal = landNoInsert; + klass->delete = landNoDelete; + klass->deleteSteal = landNoDelete; + klass->iterate = landNoIterate; + klass->iterateAndDelete = landNoIterateAndDelete; + klass->findFirst = landNoFind; + klass->findLast = landNoFind; + klass->findLargest = landNoFind; + klass->findInZones = landNoFindInZones; + klass->sig = LandClassSig; + AVERT(LandClass, klass); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/landtest.c b/mps/code/landtest.c new file mode 100644 index 00000000000..b4edc8f4249 --- /dev/null +++ b/mps/code/landtest.c @@ -0,0 +1,680 @@ +/* landtest.c: LAND TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * Test all three Land implementations against duplicate operations on + * a bit-table. + * + * Test the "steal" operations on a CBS. + */ + +#include "cbs.h" +#include "failover.h" +#include "freelist.h" +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "poolmfs.h" +#include "testlib.h" + +#include /* printf */ + +SRCID(landtest, "$Id$"); + + +#define ArraySize ((Size)123456) + +/* CBS is much faster than Freelist, so we apply more operations to + * the former. */ +#define nCBSOperations ((Size)125000) +#define nFLOperations ((Size)12500) +#define nFOOperations ((Size)12500) + +static Count NAllocateTried, NAllocateSucceeded, NDeallocateTried, + NDeallocateSucceeded; + +static int verbose = 0; + +typedef struct TestStateStruct { + Align align; + BT allocTable; + Addr block; + Size size; + Land land; +} TestStateStruct, *TestState; + +typedef struct CheckTestClosureStruct { + TestState state; + Addr limit; + Addr oldLimit; +} CheckTestClosureStruct, *CheckTestClosure; + + +static Addr (addrOfIndex)(TestState state, Index i) +{ + return AddrAdd(state->block, (i * state->align)); +} + + +static Index (indexOfAddr)(TestState state, Addr a) +{ + return (Index)(AddrOffset(state->block, a) / state->align); +} + + +static void describe(TestState state) +{ + die(LandDescribe(state->land, mps_lib_get_stdout(), 0), "LandDescribe"); +} + + +static Bool checkVisitor(Land land, Range range, void *closure) +{ + Addr base, limit; + CheckTestClosure cl = closure; + + testlib_unused(land); + Insist(cl != NULL); + + base = RangeBase(range); + limit = RangeLimit(range); + + if (base > cl->oldLimit) { + Insist(BTIsSetRange(cl->state->allocTable, + indexOfAddr(cl->state, cl->oldLimit), + indexOfAddr(cl->state, base))); + } else { /* must be at start of table */ + Insist(base == cl->oldLimit); + Insist(cl->oldLimit == cl->state->block); + } + + Insist(BTIsResRange(cl->state->allocTable, + indexOfAddr(cl->state, base), + indexOfAddr(cl->state, limit))); + + cl->oldLimit = limit; + + return TRUE; +} + +static void check(TestState state) +{ + CheckTestClosureStruct closure; + Bool b; + + closure.state = state; + closure.limit = addrOfIndex(state, state->size); + closure.oldLimit = state->block; + + b = LandIterate(state->land, checkVisitor, &closure); + Insist(b); + + if (closure.oldLimit == state->block) + Insist(BTIsSetRange(state->allocTable, 0, + indexOfAddr(state, closure.limit))); + else if (closure.limit > closure.oldLimit) + Insist(BTIsSetRange(state->allocTable, + indexOfAddr(state, closure.oldLimit), + indexOfAddr(state, closure.limit))); + else + Insist(closure.oldLimit == closure.limit); +} + + +static Word fbmRnd(Word limit) +{ + /* Not very uniform, but never mind. */ + return (Word)rnd() % limit; +} + + +/* nextEdge -- Finds the next transition in the bit table + * + * Returns the index greater than such that the + * range [, ) has the same value in the bit table, + * and has a different value or does not exist. + */ + +static Index nextEdge(BT bt, Size size, Index base) +{ + Index end; + Bool baseValue; + + Insist(bt != NULL); + Insist(base < size); + + baseValue = BTGet(bt, base); + + for(end = base + 1; end < size && BTGet(bt, end) == baseValue; end++) + NOOP; + + return end; +} + + +/* lastEdge -- Finds the previous transition in the bit table + * + * Returns the index less than such that the range + * [, ] has the same value in the bit table, + * and -1 has a different value or does not exist. + */ + +static Index lastEdge(BT bt, Size size, Index base) +{ + Index end; + Bool baseValue; + + Insist(bt != NULL); + Insist(base < size); + + baseValue = BTGet(bt, base); + + for(end = base; end > (Index)0 && BTGet(bt, end - 1) == baseValue; end--) + NOOP; + + return end; +} + + +/* randomRange -- picks random range within table + * + * The function first picks a uniformly distributed within the table. + * + * It then scans forward a binary exponentially distributed number of + * "edges" in the table (that is, transitions between set and reset) + * to get . Note that there is a 50% chance that will be + * the next edge, a 25% chance it will be the edge after, etc., until + * the end of the table. + * + * Finally it picks a uniformly distributed in the range + * [base+1, limit]. + * + * Hence there is a somewhat better than 50% chance that the range will be + * all either set or reset. + */ + +static void randomRange(Addr *baseReturn, Addr *limitReturn, TestState state) +{ + Index base; /* the start of our range */ + Index end; /* an edge (i.e. different from its predecessor) */ + /* after base */ + Index limit; /* a randomly chosen value in (base, limit]. */ + + base = fbmRnd(state->size); + + do { + end = nextEdge(state->allocTable, state->size, base); + } while (end < state->size && fbmRnd(2) == 0); /* p=0.5 exponential */ + + Insist(end > base); + + limit = base + 1 + fbmRnd(end - base); + + *baseReturn = addrOfIndex(state, base); + *limitReturn = addrOfIndex(state, limit); +} + + +static void allocate(TestState state, Addr base, Addr limit) +{ + Res res; + Index ib, il; /* Indexed for base and limit */ + Bool isFree; + RangeStruct range, oldRange; + Addr outerBase, outerLimit; /* interval containing [ib, il) */ + + ib = indexOfAddr(state, base); + il = indexOfAddr(state, limit); + + isFree = BTIsResRange(state->allocTable, ib, il); + + NAllocateTried++; + + if (isFree) { + outerBase = + addrOfIndex(state, lastEdge(state->allocTable, state->size, ib)); + outerLimit = + addrOfIndex(state, nextEdge(state->allocTable, state->size, il - 1)); + } else { + outerBase = outerLimit = NULL; + } + + RangeInit(&range, base, limit); + res = LandDelete(&oldRange, state->land, &range); + + if (verbose) { + printf("allocate: [%p,%p) -- %s\n", + (void *)base, (void *)limit, isFree ? "succeed" : "fail"); + describe(state); + } + + if (!isFree) { + die_expect((mps_res_t)res, MPS_RES_FAIL, + "Succeeded in deleting allocated block"); + } else { /* isFree */ + die_expect((mps_res_t)res, MPS_RES_OK, + "failed to delete free block"); + Insist(RangeBase(&oldRange) == outerBase); + Insist(RangeLimit(&oldRange) == outerLimit); + NAllocateSucceeded++; + BTSetRange(state->allocTable, ib, il); + } +} + + +static void deallocate(TestState state, Addr base, Addr limit) +{ + Res res; + Index ib, il; + Bool isAllocated; + Addr outerBase = base, outerLimit = limit; /* interval containing [ib, il) */ + RangeStruct range, freeRange; /* interval returned by the manager */ + + ib = indexOfAddr(state, base); + il = indexOfAddr(state, limit); + + isAllocated = BTIsSetRange(state->allocTable, ib, il); + + NDeallocateTried++; + + if (isAllocated) { + /* Find the free blocks adjacent to the allocated block */ + if (ib > 0 && !BTGet(state->allocTable, ib - 1)) { + outerBase = + addrOfIndex(state, lastEdge(state->allocTable, state->size, ib - 1)); + } else { + outerBase = base; + } + + if (il < state->size && !BTGet(state->allocTable, il)) { + outerLimit = + addrOfIndex(state, nextEdge(state->allocTable, state->size, il)); + } else { + outerLimit = limit; + } + } + + RangeInit(&range, base, limit); + res = LandInsert(&freeRange, state->land, &range); + + if (verbose) { + printf("deallocate: [%p,%p) -- %s\n", + (void *)base, (void *)limit, isAllocated ? "succeed" : "fail"); + describe(state); + } + + if (!isAllocated) { + die_expect((mps_res_t)res, MPS_RES_FAIL, + "succeeded in inserting non-allocated block"); + } else { /* isAllocated */ + die_expect((mps_res_t)res, MPS_RES_OK, + "failed to insert allocated block"); + + NDeallocateSucceeded++; + BTResRange(state->allocTable, ib, il); + Insist(RangeBase(&freeRange) == outerBase); + Insist(RangeLimit(&freeRange) == outerLimit); + } +} + + +static void find(TestState state, Size size, Bool high, FindDelete findDelete) +{ + Bool expected, found; + Index expectedBase, expectedLimit; + RangeStruct foundRange, oldRange; + Addr origBase, origLimit; + + origBase = origLimit = NULL; + expected = (high ? BTFindLongResRangeHigh : BTFindLongResRange) + (&expectedBase, &expectedLimit, state->allocTable, + (Index)0, (Index)state->size, (Count)size); + + if (expected) { + origBase = addrOfIndex(state, expectedBase); + origLimit = addrOfIndex(state, expectedLimit); + + switch(findDelete) { + case FindDeleteNONE: + /* do nothing */ + break; + case FindDeleteENTIRE: + break; + case FindDeleteLOW: + expectedLimit = expectedBase + size; + break; + case FindDeleteHIGH: + expectedBase = expectedLimit - size; + break; + default: + cdie(0, "invalid findDelete"); + break; + } + } + + found = (high ? LandFindLast : LandFindFirst) + (&foundRange, &oldRange, state->land, size * state->align, findDelete); + + if (verbose) { + printf("find %s %lu: ", high ? "last" : "first", + (unsigned long)(size * state->align)); + if (expected) { + printf("expecting [%p,%p)\n", + (void *)addrOfIndex(state, expectedBase), + (void *)addrOfIndex(state, expectedLimit)); + } else { + printf("expecting this not to be found\n"); + } + if (found) { + printf(" found [%p,%p)\n", (void *)RangeBase(&foundRange), + (void *)RangeLimit(&foundRange)); + } else { + printf(" not found\n"); + } + } + + Insist(found == expected); + + if (found) { + Insist(expectedBase == indexOfAddr(state, RangeBase(&foundRange))); + Insist(expectedLimit == indexOfAddr(state, RangeLimit(&foundRange))); + + if (findDelete != FindDeleteNONE) { + Insist(RangeBase(&oldRange) == origBase); + Insist(RangeLimit(&oldRange) == origLimit); + BTSetRange(state->allocTable, expectedBase, expectedLimit); + } + } +} + +static void test(TestState state, unsigned n, unsigned operations) +{ + Addr base, limit; + unsigned i; + Size size; + Bool high; + FindDelete findDelete = FindDeleteNONE; + + BTSetRange(state->allocTable, 0, state->size); /* Initially all allocated */ + check(state); + for(i = 0; i < n; i++) { + switch (fbmRnd(operations)) { + case 0: + randomRange(&base, &limit, state); + allocate(state, base, limit); + break; + case 1: + randomRange(&base, &limit, state); + deallocate(state, base, limit); + break; + case 2: + size = fbmRnd(state->size / 10) + 1; + high = fbmRnd(2) ? TRUE : FALSE; + switch(fbmRnd(6)) { + default: findDelete = FindDeleteNONE; break; + case 3: findDelete = FindDeleteLOW; break; + case 4: findDelete = FindDeleteHIGH; break; + case 5: findDelete = FindDeleteENTIRE; break; + } + find(state, size, high, findDelete); + break; + default: + cdie(0, "invalid operation"); + return; + } + if ((i + 1) % 1000 == 0) + check(state); + } +} + +#define testArenaSIZE (((size_t)4)<<20) + +static void test_land(void) +{ + static const struct { + LandClass (*klass)(void); + unsigned operations; + } cbsConfig[] = { + {CBSClassGet, 2}, + {CBSFastClassGet, 3}, + {CBSZonedClassGet, 3}, + }; + mps_arena_t mpsArena; + Arena arena; + TestStateStruct state; + void *p; + MFSStruct blockPool; + CBSStruct cbsStruct; + FreelistStruct flStruct; + FailoverStruct foStruct; + Land cbs = CBSLand(&cbsStruct); + Land fl = FreelistLand(&flStruct); + Land fo = FailoverLand(&foStruct); + Pool mfs = MFSPool(&blockPool); + size_t i; + + state.size = ArraySize; + state.align = (1 << rnd() % 4) * MPS_PF_ALIGN; + + NAllocateTried = NAllocateSucceeded = NDeallocateTried = + NDeallocateSucceeded = 0; + + die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + arena = (Arena)mpsArena; /* avoid pun */ + + die((mps_res_t)BTCreate(&state.allocTable, arena, state.size), + "failed to create alloc table"); + + die((mps_res_t)ControlAlloc(&p, arena, (state.size + 1) * state.align), + "failed to allocate block"); + state.block = AddrAlignUp(p, state.align); + + if (verbose) { + printf("Allocated block [%p,%p)\n", (void *)state.block, + (void *)AddrAdd(state.block, state.size)); + } + + /* 1. Test CBS */ + + for (i = 0; i < NELEMS(cbsConfig); ++i) { + MPS_ARGS_BEGIN(args) { + die((mps_res_t)LandInit(cbs, cbsConfig[i].klass(), arena, state.align, + NULL, args), + "failed to initialise CBS"); + } MPS_ARGS_END(args); + state.land = cbs; + test(&state, nCBSOperations, cbsConfig[i].operations); + LandFinish(cbs); + } + + /* 2. Test Freelist */ + + die((mps_res_t)LandInit(fl, CLASS(Freelist), arena, state.align, + NULL, mps_args_none), + "failed to initialise Freelist"); + state.land = fl; + test(&state, nFLOperations, 3); + LandFinish(fl); + + /* 3. Test CBS-failing-over-to-Freelist (always failing over on + * first iteration, never failing over on second; see fotest.c for a + * test case that randomly switches fail-over on and off) + */ + + for (i = 0; i < 2; ++i) { + MPS_ARGS_BEGIN(piArgs) { + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSFastBlockStruct)); + MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, ArenaGrainSize(arena)); + MPS_ARGS_ADD(piArgs, MFSExtendSelf, i != 0); + die(PoolInit(mfs, arena, PoolClassMFS(), piArgs), "PoolInit"); + } MPS_ARGS_END(piArgs); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, CBSBlockPool, mfs); + die((mps_res_t)LandInit(cbs, CLASS(CBSFast), arena, state.align, + NULL, args), + "failed to initialise CBS"); + } MPS_ARGS_END(args); + + die((mps_res_t)LandInit(fl, CLASS(Freelist), arena, state.align, + NULL, mps_args_none), + "failed to initialise Freelist"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, FailoverPrimary, cbs); + MPS_ARGS_ADD(args, FailoverSecondary, fl); + die((mps_res_t)LandInit(fo, CLASS(Failover), arena, state.align, + NULL, args), + "failed to initialise Failover"); + } MPS_ARGS_END(args); + + state.land = fo; + test(&state, nFOOperations, 3); + LandFinish(fo); + LandFinish(fl); + LandFinish(cbs); + PoolFinish(mfs); + } + + ControlFree(arena, p, (state.size + 1) * state.align); + mps_arena_destroy(arena); + + printf("Number of allocations attempted: %"PRIuLONGEST"\n", + (ulongest_t)NAllocateTried); + printf("Number of allocations succeeded: %"PRIuLONGEST"\n", + (ulongest_t)NAllocateSucceeded); + printf("Number of deallocations attempted: %"PRIuLONGEST"\n", + (ulongest_t)NDeallocateTried); + printf("Number of deallocations succeeded: %"PRIuLONGEST"\n", + (ulongest_t)NDeallocateSucceeded); +} + +static void shuffle(Addr *addr, size_t n) +{ + size_t i; + for (i = 0; i < n; ++i) { + size_t j = rnd() % (n - i); + Addr tmp = addr[j]; + addr[j] = addr[i]; + addr[i] = tmp; + } +} + +static void test_steal(void) +{ + mps_arena_t mpsArena; + Arena arena; + MFSStruct mfs; /* stores blocks for the CBS */ + Pool pool = MFSPool(&mfs); + CBSStruct cbs; /* allocated memory land */ + Land land = CBSLand(&cbs); + Addr base; + Addr addr[4096]; + Size grainSize; + size_t i, n = NELEMS(addr), stolenInsert = 0, missingDelete = 0; + + MPS_ARGS_BEGIN(args) { + die(mps_arena_create_k(&mpsArena, mps_arena_class_vm(), args), "arena"); + } MPS_ARGS_END(args); + arena = (Arena)mpsArena; /* avoid pun */ + grainSize = ArenaGrainSize(arena); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, sizeof(RangeTreeStruct)); + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, grainSize); + MPS_ARGS_ADD(args, MFSExtendSelf, FALSE); + die(PoolInit(pool, arena, CLASS(MFSPool), args), "pool"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, CBSBlockPool, pool); + die(LandInit(land, CLASS(CBS), arena, grainSize, NULL, args), + "land"); + } MPS_ARGS_END(args); + + /* Allocate a range of grains. */ + die(ArenaAlloc(&base, LocusPrefDefault(), grainSize * n, pool), "alloc"); + for (i = 0; i < n; ++i) + addr[i] = AddrAdd(base, i * grainSize); + + /* Shuffle the grains. */ + shuffle(addr, n); + + /* Insert grains into the land in shuffled order. */ + for (i = 0; i < n; ++i) { + RangeStruct range, origRange, containingRange; + RangeInitSize(&range, addr[i], grainSize); + RangeCopy(&origRange, &range); + die(LandInsertSteal(&containingRange, land, &range), "steal"); + if (!RangesEqual(&origRange, &range)) + ++ stolenInsert; + } + + /* Shuffle grains again. */ + shuffle(addr, n); + + /* Delete unstolen grains from the land in shuffled order. */ + for (i = 0; i < n; ++i) { + RangeStruct range, containingRange; + Res res; + RangeInitSize(&range, addr[i], grainSize); + res = LandDeleteSteal(&containingRange, land, &range); + if (res == ResOK) { + ArenaFree(addr[i], grainSize, pool); + } else { + Insist(res == ResFAIL); /* grain was stolen */ + ++ missingDelete; + } + } + + Insist(LandSize(land) == 0); + LandFinish(land); + Insist(PoolFreeSize(pool) == PoolTotalSize(pool)); + PoolFinish(pool); + mps_arena_destroy(arena); + Insist(stolenInsert <= missingDelete); + Insist(missingDelete < n); + printf("Stolen on insert: %"PRIuLONGEST"\n", (ulongest_t)stolenInsert); + printf("Missing on delete: %"PRIuLONGEST"\n", (ulongest_t)missingDelete); +} + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + test_land(); + test_steal(); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ld.c b/mps/code/ld.c new file mode 100644 index 00000000000..d9bc69c1ff4 --- /dev/null +++ b/mps/code/ld.c @@ -0,0 +1,352 @@ +/* ld.c: LOCATION DEPENDENCY IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .def: A location dependency records the fact that the bit-patterns + * of some references will be used directly (most likely for + * hashing), and provides a protocol for finding out whether that + * dependency has become stale because a reference has been changed (by + * a moving memory manager). + * + * .rationale: The client may build hash-tables using pointer hashing. + * The collector may change the values of the pointers transparently, + * by fixing them and moving the objects. The hash function will no + * longer return the same value, and the object can't be found in + * the expected bucket. When the client can't find an object in a + * hashtable it must check to see if any of the references in the table + * have moved, and rehash if they have. Location dependency provides + * a reasonably accurate way of determining whether this has happened. + * + * .impl: A location dependency consists of an epoch (monotonically + * increasing notion of time) and a reference set. The epoch records + * when the location dependency started, and the reference set + * accumulates an approximation to the set of references which are + * depended on. The client can check to see if any of these + * references have moved since the epoch. + * + * .history: The current epoch, and a history of object movement + * are recorded in the arena. Each slot in the history contains a + * summary of all the movement since an earlier epoch (maintained by + * LDAge). To see if a dependency has become stale all that + * is needed is to see whether its reference set intersects with the + * movement since its epoch. + * + * .mod: LDHistoryLENGTH is used as a modulus to calculate the offset + * of an epoch in the history, so it's best if this is a power of two. + * + * + * .epoch-size: The epoch should probably be a longer integer to avoid + * the possibility of overflow. + * (32 bits only gives 50 days at 1ms frequency) + * + * .ld.access: Accesses (reads and writes) to the ld structure must be + * "wrapped" with an ShieldExpose/Cover pair if and only if the access + * is taking place inside the arena. Currently this is only the case for + * LDReset. + */ + +#include "mpm.h" + +SRCID(ld, "$Id$"); + + +void HistoryInit(History history) +{ + Index i; + + AVER(history != NULL); + + history->epoch = 0; + history->prehistory = RefSetEMPTY; + for (i = 0; i < LDHistoryLENGTH; ++i) + history->history[i] = RefSetEMPTY; + + history->sig = HistorySig; + AVERT(History, history); +} + +Bool HistoryCheck(History history) +{ + Index i; + RefSet rs; + + CHECKS(History, history); + + /* check that each history entry is a subset of the next oldest */ + rs = RefSetEMPTY; + /* note this loop starts from 1; there is no history age 0 */ + for (i = 1; i <= LDHistoryLENGTH; ++i) { + /* check history age 'i'; 'j' is the history index. */ + Index j = (history->epoch + LDHistoryLENGTH - i) % LDHistoryLENGTH; + CHECKL(RefSetSub(rs, history->history[j])); + rs = history->history[j]; + } + /* the oldest history entry must be a subset of the prehistory */ + CHECKL(RefSetSub(rs, history->prehistory)); + + return TRUE; +} + +void HistoryFinish(History history) +{ + AVERT(History, history); + history->sig = SigInvalid; +} + +Res HistoryDescribe(History history, mps_lib_FILE *stream, Count depth) +{ + Res res; + Index i; + + if (!TESTT(History, history)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = WriteF(stream, depth, + "History $P {\n", (WriteFP)history, + " epoch = $U\n", (WriteFU)history->epoch, + " prehistory = $B\n", (WriteFB)history->prehistory, + " history {\n", + " [note: indices are raw, not rotated]\n", + NULL); + if (res != ResOK) + return res; + + for (i = 0; i < LDHistoryLENGTH; ++i) { + res = WriteF(stream, depth + 4, + "[$U] = $B\n", (WriteFU)i, (WriteFB)history->history[i], + NULL); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth, + " }\n", + "} History $P\n", (WriteFP)history, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* LDReset -- reset a dependency to empty + * + * .reset.sync: This does not need to be synchronized with LDAge + * because if the epoch advances after it is read the dependency + * will simply include movement for more time than necessary. + */ +void LDReset(mps_ld_t ld, Arena arena) +{ + Bool b; + Seg seg; + + AVER(ld != NULL); + AVERT(Arena, arena); + + b = SegOfAddr(&seg, arena, (Addr)ld); + if (b) + ShieldExpose(arena, seg); /* .ld.access */ + ld->_epoch = ArenaHistory(arena)->epoch; + ld->_rs = RefSetEMPTY; + if (b) + ShieldCover(arena, seg); +} + + +/* LDAdd -- add a reference to a dependency + * + * .add.lock-free: This function is thread safe with respect to the + * (rest of the) mps. It is unnecessary to claim locks before calling + * this function. + * + * .add.user-serial: + * However, this function is _not_ thread safe with respect to itself. + * Users should ensure that calls to LDAdd operating on the same LD are + * serialized. + * + * .add.sync: Add must take place _before_ the location of the reference + * is depended on. If the reference changes between adding and + * depending it will show up as moved because the movement will have + * occurred since the epoch recorded in the dependency. If the location + * were used first only the new location of the reference would end up + * in the set. + * + * .add.no-arena-check: Add does not check that the address belongs to + * the arena because this would require taking the arena lock. We + * would rather that this function be lock-free even if some errors + * are not detected. + * + * .add.no-align-check: Add does not check that the address is + * aligned, for the same reason as .add.check: it can't find out which + * pool the address belongs to without taking the lock. + */ +void LDAdd(mps_ld_t ld, Arena arena, Addr addr) +{ + AVER(ld != NULL); + AVER(TESTT(Arena, arena)); /* see .add.lock-free */ + AVER(ld->_epoch <= ArenaHistory(arena)->epoch); + + ld->_rs = RefSetAdd(arena, ld->_rs, addr); +} + + +/* LDIsStaleAny -- check whether any dependency is stale + * + * .stale.thread-safe: This function is thread safe. It will return a + * correct (but possibly conservative) answer regardless of the number + * of calls to LDAge anywhere during the function. Update with care. + * + * .stale.current: If the dependency's epoch is the current epoch, + * nothing can have moved since it was initialized. + * + * .stale.recent: If the dependency is recent, see if it intersects + * with everything which has moved since it was initialized. + * + * .stale.recent.conservative: The refset from the history table is + * loaded before we check whether ld->_epoch is "recent" with respect to + * the current epoch. This means that we may (conservatively) decide + * to use the prehistory instead. + * + * .stale.old: Otherwise, if the dependency is older than the length + * of the history, check it against all movement that has ever occurred. + */ +Bool LDIsStaleAny(mps_ld_t ld, Arena arena) +{ + History history; + RefSet rs; + + AVER(ld != NULL); + AVER(TESTT(Arena, arena)); /* .stale.thread-safe */ + history = ArenaHistory(arena); + AVER(ld->_epoch <= history->epoch); + + if (history->epoch == ld->_epoch) /* .stale.current */ + return FALSE; + + /* Load the history refset, _then_ check to see if it's recent. + * This may in fact load an okay refset, which we decide to throw + * away and use the pre-history instead. */ + rs = history->history[ld->_epoch % LDHistoryLENGTH]; + /* .stale.recent */ + /* .stale.recent.conservative */ + if (history->epoch - ld->_epoch > LDHistoryLENGTH) { + rs = history->prehistory; /* .stale.old */ + } + + return RefSetInter(ld->_rs, rs) != RefSetEMPTY; +} + + +/* LDIsStale -- check whether a particular dependency is stale + * + * .stale.conservative: In fact we just ignore the address and test if + * any dependency is stale. This is conservatively correct (no false + * negatives) but provides a hook for future improvement. + * + * .stale.no-arena-check: See .add.no-arena-check. + * + * .stale.no-align-check: See .add.no-align-check. + */ +Bool LDIsStale(mps_ld_t ld, Arena arena, Addr addr) +{ + UNUSED(addr); + return LDIsStaleAny(ld, arena); +} + + +/* LDAge -- age the arena by adding a moved set + * + * This stores the fact that a set of references has changed in + * the history in the arena structure, and increments the epoch. + * + * This is only called during a 'flip', because it must be atomic + * w.r.t. the mutator (and therefore w.r.t. LdIsStale). This is + * because it updates the notion of the 'current' and 'oldest' history + * entries. + */ +void LDAge(Arena arena, RefSet rs) +{ + History history; + Size i; + + AVERT(Arena, arena); + history = ArenaHistory(arena); + AVER(rs != RefSetEMPTY); + + /* Replace the entry for epoch - LDHistoryLENGTH by an empty */ + /* set which will become the set which has moved since the */ + /* current epoch. */ + history->history[history->epoch % LDHistoryLENGTH] = RefSetEMPTY; + + /* Record the fact that the moved set has moved, by adding it */ + /* to all the sets in the history, including the set for the */ + /* current epoch. */ + for(i = 0; i < LDHistoryLENGTH; ++i) + history->history[i] = RefSetUnion(history->history[i], rs); + + /* This is the union of all movement since time zero. */ + history->prehistory = RefSetUnion(history->prehistory, rs); + + /* Advance the epoch by one. */ + ++history->epoch; + AVER(history->epoch != 0); /* .epoch-size */ +} + + +/* LDMerge -- merge two location dependencies + * + * .merge.lock-free: This function is thread-safe with respect to the + * (rest of the) MPS. It is unnecessary to claim locks before calling + * this function. + */ +void LDMerge(mps_ld_t ld, Arena arena, mps_ld_t from) +{ + AVER(ld != NULL); + AVER(TESTT(Arena, arena)); /* .merge.lock-free */ + AVER(ld->_epoch <= ArenaHistory(arena)->epoch); + AVER(from != NULL); + AVER(from->_epoch <= ArenaHistory(arena)->epoch); + + /* If a reference has been added since epoch e1 then I've */ + /* certainly added since epoch e0 where e0 < e1. Therefore */ + /* the epoch of the merged ld is the minimum. */ + if (from->_epoch < ld->_epoch) + ld->_epoch = from->_epoch; + + /* The set of references added is the union of the two. */ + ld->_rs = RefSetUnion(ld->_rs, from->_rs); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lia6gc.gmk b/mps/code/lia6gc.gmk new file mode 100644 index 00000000000..1c6437f68a0 --- /dev/null +++ b/mps/code/lia6gc.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# lia6gc.gmk: BUILD FOR LINUX/ARM64/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2021 Ravenbrook Limited. See end of file for license. + +PFM = lia6gc + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcix.c \ + prmclia6.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -lpthread + +include gc.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2021 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/lia6ll.gmk b/mps/code/lia6ll.gmk new file mode 100644 index 00000000000..9e329ee98a4 --- /dev/null +++ b/mps/code/lia6ll.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# lia6ll.gmk: BUILD FOR LINUX/ARM64/Clang PLATFORM +# +# $Id$ +# Copyright (c) 2001-2021 Ravenbrook Limited. See end of file for license. + +PFM = lia6ll + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcix.c \ + prmclia6.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -lpthread + +include ll.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2021 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/lii3gc.gmk b/mps/code/lii3gc.gmk new file mode 100644 index 00000000000..8ad1b103e89 --- /dev/null +++ b/mps/code/lii3gc.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# lii3gc.gmk: BUILD FOR LINUX/x86/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = lii3gc + +MPMPF = \ + lockix.c \ + prmci3.c \ + prmcix.c \ + prmclii3.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -lpthread + +include gc.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/lii6gc.gmk b/mps/code/lii6gc.gmk new file mode 100644 index 00000000000..9b73752ec16 --- /dev/null +++ b/mps/code/lii6gc.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# lii6gc.gmk: BUILD FOR LINUX/x64/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = lii6gc + +MPMPF = \ + lockix.c \ + prmci6.c \ + prmcix.c \ + prmclii6.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -lpthread + +include gc.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/lii6ll.gmk b/mps/code/lii6ll.gmk new file mode 100644 index 00000000000..cf3e70ace07 --- /dev/null +++ b/mps/code/lii6ll.gmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# lii6ll.gmk: BUILD FOR LINUX/x64/Clang PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = lii6ll + +MPMPF = \ + lockix.c \ + prmci6.c \ + prmcix.c \ + prmclii6.c \ + protix.c \ + protsgix.c \ + pthrdext.c \ + span.c \ + thix.c \ + vmix.c + +LIBS = -lm -lpthread + +include ll.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/ll.gmk b/mps/code/ll.gmk new file mode 100644 index 00000000000..16ebf0e7874 --- /dev/null +++ b/mps/code/ll.gmk @@ -0,0 +1,83 @@ +# -*- makefile -*- +# +# ll.gmk: GNUMAKEFILE FRAGMENT FOR CLANG/LLVM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# This file is included by platform makefiles that use the Clang/LLVM +# compiler. It defines the compiler-specific variables that the +# common makefile fragment () requires. + +CC = clang +CFLAGSDEBUG = -O0 -g3 +CFLAGSOPT = -O2 -g3 + +# Warnings that might be enabled by clients . +# TODO: add -Wcomma when all our continuous integration platforms support it. +CFLAGSCOMPILER := \ + -Waggregate-return \ + -Wall \ + -Wcast-qual \ + -Wconversion \ + -Wduplicate-enum \ + -Werror \ + -Wextra \ + -Winline \ + -Wmissing-prototypes \ + -Wmissing-variable-declarations \ + -Wnested-externs \ + -Wpointer-arith \ + -Wshadow \ + -Wstrict-aliasing=2 \ + -Wstrict-prototypes \ + -Wunreachable-code \ + -Wwrite-strings +CFLAGSCOMPILERSTRICT := -std=c89 -pedantic + +# A different set of compiler flags for less strict compilation, for +# instance when we need to #include a third-party header file that +# won't fly with -ansi -pedantic. Use sparingly! +CFLAGSCOMPILERLAX := + +# clang -MM generates a dependency line of the form: +# thing.o : thing.c ... +# The sed line converts this into: +# //thing.o //thing.d : thing.c ... +# If interrupted, this is liable to leave a zero-length file behind. + +define gendep + $(SHELL) -ec "$(CC) $(CFLAGSSTRICT) -MM $< | \ + sed '/:/s!$*.o!$(@D)/& $(@D)/$*.d!' > $@" + [ -s $@ ] || rm -f $@ +endef + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/locbwcss.c b/mps/code/locbwcss.c new file mode 100644 index 00000000000..7cb17e11c0e --- /dev/null +++ b/mps/code/locbwcss.c @@ -0,0 +1,239 @@ +/* locbwcss.c: LOCUS BACKWARDS COMPATIBILITY STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpscmvff.h" +#include "mpslib.h" +#include "mpsavm.h" +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" + +#include /* printf */ + + +/* some constants */ + +#define TRUE 1 +#define FALSE 0 + +#define iterationCount 30 /* number of iterations */ +#define allocsPerIteration 8 /* number of allocs each iteration */ +#define chunkSize ((size_t)65536) /* our allocation chunk size */ + +#define testArenaSIZE \ + ((size_t)(chunkSize * iterationCount * allocsPerIteration * 3)) + + +#define AddressOffset(b, l) \ + ((size_t)((char *)(l) - (char *)(b))) + + +/* PoolStat -- maintain data about contiguous allocations */ + +typedef struct PoolStatStruct *PoolStat; + +typedef struct PoolStatStruct { + mps_pool_t pool; /* the pool being measured */ + size_t objSize; /* size of each allocation */ + mps_addr_t min; /* lowest address lock allocated to the pool */ + mps_addr_t max; /* highest address lock allocated to the pool */ + int ncCount; /* count of non-contiguous allocations */ + int aCount; /* count of allocations */ + int fCount; /* count of frees */ +} PoolStatStruct; + + + +static mps_addr_t allocObject(mps_pool_t pool, size_t size) +{ + mps_addr_t addr; + die(mps_alloc(&addr, pool, size), + "Allocate Object"); + return addr; +} + + +static void recordNewObjectStat(PoolStat stat, mps_addr_t obj) +{ + stat->aCount++; + if (obj < stat->min) { + if (AddressOffset(obj, stat->min) > stat->objSize) { + stat->ncCount++; + } + stat->min = obj; + } else if (obj > stat->max) { + if (AddressOffset(stat->max, obj) > stat->objSize) { + stat->ncCount++; + } + stat->max = obj; + } +} + +static void recordFreedObjectStat(PoolStat stat) +{ + stat->fCount++; +} + + +static void poolStatInit(PoolStat stat, mps_pool_t pool, size_t objSize) +{ + mps_addr_t s1, s2, s3; + + stat->pool = pool; + stat->objSize = objSize; + stat->ncCount = 0; + stat->aCount = 0; + stat->fCount = 0; + + /* allocate 3 half-size sentinel objects, freeing the middle one */ + /* to leave a bit of space for the control pool */ + s1 = allocObject(pool, objSize / 2); + stat->min = s1; + stat->max = s1; + stat->aCount++; + + s2 = allocObject(pool, objSize / 2); + recordNewObjectStat(stat, s2); + s3 = allocObject(pool, objSize / 2); + recordNewObjectStat(stat, s3); + + mps_free(pool, s2, objSize / 2); + recordFreedObjectStat(stat); + +} + + +static void allocMultiple(PoolStat stat) +{ + mps_addr_t objects[allocsPerIteration]; + size_t i; + + /* allocate a few objects, and record stats for them */ + for (i = 0; i < allocsPerIteration; i++) { + mps_addr_t obj = allocObject(stat->pool, stat->objSize); + recordNewObjectStat(stat, obj); + objects[i] = obj; + } + + /* free one of the objects, to make the test more interesting */ + i = rnd() % allocsPerIteration; + mps_free(stat->pool, objects[i], stat->objSize); + recordFreedObjectStat(stat); + +} + + +/* reportResults - print a report on a PoolStat */ + +static void reportResults(PoolStat stat, const char *name) +{ + printf("\nResults for %s\n", name); + printf("\n"); + printf(" Allocated %"PRIuLONGEST" objects\n", (ulongest_t)stat->aCount); + printf(" Freed %"PRIuLONGEST" objects\n", (ulongest_t)stat->fCount); + printf(" There were %lu non-contiguous allocations\n", + (unsigned long)stat->ncCount); + printf(" Address range from %p to %p\n", + (void *)stat->min, (void *)stat->max); + printf("\n"); +} + + +static void testInArena(mps_arena_t arena) +{ + mps_pool_t lopool, hipool; + PoolStatStruct lostruct; /* stats about lopool */ + PoolStatStruct histruct; /* stats about lopool */ + PoolStat lostat = &lostruct; + PoolStat histat = &histruct; + int i; + + die(mps_pool_create(&hipool, arena, mps_class_mvff(), + chunkSize, chunkSize, + (mps_align_t)1024, + TRUE, TRUE, TRUE), + "Create HI MFFV"); + + die(mps_pool_create(&lopool, arena, mps_class_mvff(), + chunkSize, chunkSize, + (mps_align_t)1024, + FALSE, FALSE, TRUE), + "Create LO MFFV"); + + poolStatInit(lostat, lopool, chunkSize); + poolStatInit(histat, hipool, chunkSize); + + /* iterate, allocating objects */ + for (i=0; imax > histat->min) { + error("\nFOUND PROBLEM - low range overlaps high\n"); + } else { + printf("\nNo problems detected.\n"); + } + + mps_pool_destroy(hipool); + mps_pool_destroy(lopool); +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), + "mps_arena_create"); + } MPS_ARGS_END(args); + + testInArena(arena); + + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lock.h b/mps/code/lock.h new file mode 100644 index 00000000000..87f554cadb2 --- /dev/null +++ b/mps/code/lock.h @@ -0,0 +1,171 @@ +/* lock.h: RECURSIVE LOCKS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef lock_h +#define lock_h + +#include "mpm.h" + + +#define LockSig ((Sig)0x51970CC9) /* SIGnature LOCK */ + + +/* LockSize -- Return the size of a LockStruct + * + * Supports allocation of locks. + */ + +extern size_t LockSize(void); + + +/* LockInitGlobal -- initialize global locks */ + +extern void LockInitGlobal(void); + + +/* LockInit/Finish + * + * lock points to the allocated lock structure. A lock has no + * owner after initialisation. + */ + +extern void LockInit(Lock lock); +extern void LockFinish(Lock lock); + + +/* LockClaimRecursive + * + * This is called to increase the number of claims on the lock. + * LockClaimRecursive will wait until the lock is not owned by another + * thread and return with the lock owned. + * This can be called recursively. + */ + +extern void LockClaimRecursive(Lock lock); + + +/* LockReleaseRecursive + * + * This is called to reduce the number of claims on the lock. + * If the number of claims drops to zero, ownership is relinquished. + * This must not be called without possession of the lock. + */ + +extern void LockReleaseRecursive(Lock lock); + + +/* LockClaim + * + * This may only be used when the lock is not already owned by + * the calling thread. + * When used it behaves like LockClaimRecursive, but must be + * matched by a call to LockRelease. + */ + +extern void LockClaim(Lock lock); + + +/* LockRelease + * + * This must only be used to release a Lock symmetrically + * with LockClaim. It therefore should only be called with + * a single claim. + */ + +extern void LockRelease(Lock lock); + + +/* LockCheck -- Validation */ + +extern Bool LockCheck(Lock lock); + + +/* LockIsHeld -- test whether lock is held by any thread */ + +extern Bool LockIsHeld(Lock lock); + + +/* == Global locks == */ + + +/* LockClaimGlobalRecursive + * + * This is called to increase the number of claims on the recursive + * global lock. LockClaimRecursive will wait until the lock is not + * owned by another thread and return with the lock owned. + * This can be called recursively. + */ + +extern void LockClaimGlobalRecursive(void); + + +/* LockReleaseGlobalRecursive + * + * This is called to reduce the number of claims on the recursive + * global lock. If the number of claims drops to zero, ownership + * is relinquished. This must not be called without possession of + * the lock. + */ + +extern void LockReleaseGlobalRecursive(void); + + +/* LockClaimGlobal + * + * This is called to claim the binary global lock, and may only be + * used if that lock is not already owned by the calling thread. + * It must be matched by a call to LockReleaseGlobal. + */ + +extern void LockClaimGlobal(void); + + +/* LockReleaseGlobal + * + * This must only be used to release the binary global lock + * symmetrically with LockClaimGlobal. + * It therefore should only be called with a single claim. + */ + +extern void LockReleaseGlobal(void); + + +/* LockSetup -- one-time lock initialization */ + +extern void LockSetup(void); + + +#endif /* lock_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lockan.c b/mps/code/lockan.c new file mode 100644 index 00000000000..0d1de6b7b8a --- /dev/null +++ b/mps/code/lockan.c @@ -0,0 +1,170 @@ +/* lockan.c: ANSI RECURSIVE LOCKS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This is a trivial implementation of recursive locks + * that assumes we are not running in a multi-threaded environment. + * This provides stubs for the locking code where locking is not + * applicable. The stubs provide some amount of checking. + * + * .limit: The limit on the number of recursive claims is ULONG_MAX. + */ + +#include "lock.h" +#include "mpmtypes.h" + +SRCID(lockan, "$Id$"); + + +typedef struct LockStruct { /* ANSI fake lock structure */ + Sig sig; /* design.mps.sig.field */ + unsigned long claims; /* # claims held by owner */ +} LockStruct; + + +size_t (LockSize)(void) +{ + return sizeof(LockStruct); +} + +Bool (LockCheck)(Lock lock) +{ + CHECKS(Lock, lock); + return TRUE; +} + + +void (LockInit)(Lock lock) +{ + AVER(lock != NULL); + lock->claims = 0; + lock->sig = LockSig; + AVERT(Lock, lock); +} + +void (LockFinish)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims == 0); + lock->sig = SigInvalid; +} + + +void (LockClaim)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims == 0); + lock->claims = 1; +} + +void (LockRelease)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims == 1); + lock->claims = 0; +} + +void (LockClaimRecursive)(Lock lock) +{ + AVERT(Lock, lock); + ++lock->claims; + AVER(lock->claims>0); +} + +void (LockReleaseRecursive)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims > 0); + --lock->claims; +} + +Bool (LockIsHeld)(Lock lock) +{ + AVERT(Lock, lock); + return lock->claims > 0; +} + + +/* Global locking is performed by normal locks. + * A separate lock structure is used for recursive and + * non-recursive locks so that each may be differently ordered + * with respect to client-allocated locks. + */ + +static LockStruct globalLockStruct = { + LockSig, + 0 +}; + +static LockStruct globalRecursiveLockStruct = { + LockSig, + 0 +}; + +static Lock globalLock = &globalLockStruct; + +static Lock globalRecLock = &globalRecursiveLockStruct; + +void LockInitGlobal(void) +{ + globalLock->claims = 0; + LockInit(globalLock); + globalRecLock->claims = 0; + LockInit(globalRecLock); +} + +void (LockClaimGlobalRecursive)(void) +{ + LockClaimRecursive(globalRecLock); +} + +void (LockReleaseGlobalRecursive)(void) +{ + LockReleaseRecursive(globalRecLock); +} + +void (LockClaimGlobal)(void) +{ + LockClaim(globalLock); +} + +void (LockReleaseGlobal)(void) +{ + LockRelease(globalLock); +} + +void LockSetup(void) +{ + /* Nothing to do as ANSI platform does not have fork(). */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lockcov.c b/mps/code/lockcov.c new file mode 100644 index 00000000000..0e0d462ef9d --- /dev/null +++ b/mps/code/lockcov.c @@ -0,0 +1,111 @@ +/* lockcov.c: LOCK COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mps.h" +#include "mpsavm.h" +#include "mpscmfs.h" +#include "mpm.h" +#include "testlib.h" +#include "mpslib.h" + +#include /* printf */ + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_pool_t pool; + mps_addr_t p; + Lock a, b; + + testlib_init(argc, argv); + + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), + "arena_create"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, LockSize()); + die(mps_pool_create_k(&pool, arena, mps_class_mfs(), args), "pool_create"); + } MPS_ARGS_END(args); + + die(mps_alloc(&p, pool, LockSize()), "alloc a"); + a = p; + die(mps_alloc(&p, pool, LockSize()), "alloc b"); + b = p; + + Insist(a != NULL); + Insist(b != NULL); + + LockInit(a); + Insist(!LockIsHeld(a)); + LockInit(b); + Insist(!LockIsHeld(b)); + LockClaimGlobal(); + LockClaim(a); + Insist(LockIsHeld(a)); + LockClaimRecursive(b); + Insist(LockIsHeld(b)); + LockClaimGlobalRecursive(); + LockReleaseGlobal(); + LockClaimGlobal(); + LockRelease(a); + Insist(!LockIsHeld(a)); + LockClaimGlobalRecursive(); + LockReleaseGlobal(); + LockClaimRecursive(b); + Insist(LockIsHeld(b)); + LockFinish(a); + LockReleaseRecursive(b); + Insist(LockIsHeld(b)); + LockReleaseRecursive(b); + Insist(!LockIsHeld(b)); + LockFinish(b); + LockInit(a); + LockClaim(a); + LockClaimRecursive(a); + LockReleaseGlobalRecursive(); + LockReleaseRecursive(a); + LockRelease(a); + LockFinish(a); + LockReleaseGlobalRecursive(); + + mps_free(pool, a, LockSize()); + mps_free(pool, b, LockSize()); + mps_pool_destroy(pool); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lockix.c b/mps/code/lockix.c new file mode 100644 index 00000000000..a6555f5bb36 --- /dev/null +++ b/mps/code/lockix.c @@ -0,0 +1,306 @@ +/* lockix.c: RECURSIVE LOCKS FOR POSIX SYSTEMS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .posix: The implementation uses a POSIX interface, and should be reusable + * for many Unix-like operating systems. + * + * .freebsd: This implementation supports FreeBSD (platform + * MPS_OS_FR). + * + * .darwin: This implementation supports Darwin (macOS) (platform + * MPS_OS_XC). + * + * .design: These locks are implemented using mutexes. + * + * .recursive: Mutexes support both non-recursive and recursive + * locking, but only at initialization time. This doesn't match the + * API of MPS Lock module, which chooses at locking time, so all locks + * are made (non-recursive) errorchecking. Recursive locks are + * implemented by checking the error code. + * + * .claims: During use the claims field is updated to remember the + * number of claims acquired on a lock. This field must only be + * modified while we hold the mutex. + * + * .from: This was copied from the FreeBSD implementation (lockfr.c) + * which was itself a cleaner version of the LinuxThreads + * implementation (lockli.c). + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC) +#error "lockix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC" +#endif + +#include "lock.h" + +#include /* see .feature.li in config.h */ +#include +#include + +SRCID(lockix, "$Id$"); + +#if defined(LOCK) + +/* LockStruct -- the MPS lock structure + * + * .lock.posix: Posix lock structure; uses a mutex. + */ + +typedef struct LockStruct { + Sig sig; /* design.mps.sig.field */ + unsigned long claims; /* # claims held by owner */ + pthread_mutex_t mut; /* the mutex itself */ +} LockStruct; + + +/* LockSize -- size of a LockStruct */ + +size_t (LockSize)(void) +{ + return sizeof(LockStruct); +} + + +/* LockCheck -- check a lock */ + +Bool (LockCheck)(Lock lock) +{ + CHECKS(Lock, lock); + /* While claims can't be very large, I don't dare to put a limit on it. */ + /* There's no way to test the mutex, or check if it's held by somebody. */ + return TRUE; +} + + +/* LockInit -- initialize a lock */ + +void (LockInit)(Lock lock) +{ + pthread_mutexattr_t attr; + int res; + + AVER(lock != NULL); + lock->claims = 0; + res = pthread_mutexattr_init(&attr); + AVER(res == 0); + res = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + AVER(res == 0); + res = pthread_mutex_init(&lock->mut, &attr); + AVER(res == 0); + res = pthread_mutexattr_destroy(&attr); + AVER(res == 0); + lock->sig = LockSig; + AVERT(Lock, lock); +} + + +/* LockFinish -- finish a lock */ + +void (LockFinish)(Lock lock) +{ + int res; + + AVERT(Lock, lock); + /* Lock should not be finished while held */ + AVER(lock->claims == 0); + res = pthread_mutex_destroy(&lock->mut); + AVER(res == 0); + lock->sig = SigInvalid; +} + + +/* LockClaim -- claim a lock (non-recursive) */ + +void (LockClaim)(Lock lock) +{ + int res; + + AVERT(Lock, lock); + + res = pthread_mutex_lock(&lock->mut); + /* pthread_mutex_lock will error if we own the lock already. */ + AVER(res == 0); /* */ + + /* This should be the first claim. Now we own the mutex */ + /* it is ok to check this. */ + AVER(lock->claims == 0); + lock->claims = 1; +} + + +/* LockRelease -- release a lock (non-recursive) */ + +void (LockRelease)(Lock lock) +{ + int res; + + AVERT(Lock, lock); + AVER(lock->claims == 1); /* The lock should only be held once */ + lock->claims = 0; /* Must set this before releasing the lock */ + res = pthread_mutex_unlock(&lock->mut); + /* pthread_mutex_unlock will error if we didn't own the lock. */ + AVER(res == 0); +} + + +/* LockClaimRecursive -- claim a lock (recursive) */ + +void (LockClaimRecursive)(Lock lock) +{ + int res; + + AVERT(Lock, lock); + + res = pthread_mutex_lock(&lock->mut); + /* pthread_mutex_lock will return: */ + /* 0 if we have just claimed the lock */ + /* EDEADLK if we own the lock already. */ + AVER((res == 0) == (lock->claims == 0)); + AVER((res == EDEADLK) == (lock->claims > 0)); + + ++lock->claims; + AVER(lock->claims > 0); +} + + +/* LockReleaseRecursive -- release a lock (recursive) */ + +void (LockReleaseRecursive)(Lock lock) +{ + int res; + + AVERT(Lock, lock); + AVER(lock->claims > 0); + --lock->claims; + if (lock->claims == 0) { + res = pthread_mutex_unlock(&lock->mut); + /* pthread_mutex_unlock will error if we didn't own the lock. */ + AVER(res == 0); + } +} + + +/* LockIsHeld -- test whether lock is held */ + +Bool (LockIsHeld)(Lock lock) +{ + AVERT(Lock, lock); + if (pthread_mutex_trylock(&lock->mut) == 0) { + Bool claimed = lock->claims > 0; + int res = pthread_mutex_unlock(&lock->mut); + AVER(res == 0); + return claimed; + } + return TRUE; +} + + +/* Global locks + * + * .global: The two "global" locks are statically allocated normal locks. + */ + +static LockStruct globalLockStruct; +static LockStruct globalRecLockStruct; +static Lock globalLock = &globalLockStruct; +static Lock globalRecLock = &globalRecLockStruct; +static pthread_once_t isGlobalLockInit = PTHREAD_ONCE_INIT; + +void LockInitGlobal(void) +{ + LockInit(globalLock); + LockInit(globalRecLock); +} + + +/* LockClaimGlobalRecursive -- claim the global recursive lock */ + +void (LockClaimGlobalRecursive)(void) +{ + int res; + + /* Ensure the global lock has been initialized */ + res = pthread_once(&isGlobalLockInit, LockInitGlobal); + AVER(res == 0); + LockClaimRecursive(globalRecLock); +} + + +/* LockReleaseGlobalRecursive -- release the global recursive lock */ + +void (LockReleaseGlobalRecursive)(void) +{ + LockReleaseRecursive(globalRecLock); +} + + +/* LockClaimGlobal -- claim the global non-recursive lock */ + +void (LockClaimGlobal)(void) +{ + int res; + + /* Ensure the global lock has been initialized */ + res = pthread_once(&isGlobalLockInit, LockInitGlobal); + AVER(res == 0); + LockClaim(globalLock); +} + + +/* LockReleaseGlobal -- release the global non-recursive lock */ + +void (LockReleaseGlobal)(void) +{ + LockRelease(globalLock); +} + + +/* LockSetup -- one-time lock initialization */ + +void LockSetup(void) +{ + /* Claim all locks before a fork; release in the parent; + reinitialize in the child */ + pthread_atfork(GlobalsClaimAll, GlobalsReleaseAll, GlobalsReinitializeAll); +} + + +#elif defined(LOCK_NONE) +#include "lockan.c" +#else +#error "No lock configuration." +#endif + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lockut.c b/mps/code/lockut.c new file mode 100644 index 00000000000..9eee88fb5f9 --- /dev/null +++ b/mps/code/lockut.c @@ -0,0 +1,145 @@ +/* lockut.c: LOCK UTILIZATION TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mps.h" +#include "mpsavm.h" +#include "mpscmfs.h" +#include "mpm.h" +#include "testlib.h" +#include "testthr.h" + +#include /* printf */ + + +#define nTHREADS 4 + +static Lock lock; +static unsigned long shared, tmp; + + +static void incR(unsigned long i) +{ + LockClaimRecursive(lock); + if (i < 100) { + while(i--) { + tmp = shared; + shared = tmp + 1; + } + } else { + incR(i >> 1); + incR( (i+1) >> 1); + } + LockReleaseRecursive(lock); +} + + +static void inc(unsigned long i) +{ + incR( (i+1) >>1); + i >>= 1; + while (i) { + LockClaim(lock); + if (i > 10000) { + incR(5000); + i -= 5000; + } + tmp = shared; + shared = tmp+1; + i--; + LockRelease(lock); + } +} + + +#define COUNT 100000l +static void *thread0(void *p) +{ + unsigned i; + testlib_unused(p); + LockClaimGlobal(); + LockReleaseGlobal(); + for (i = 0; i < COUNT; ++i) + LockClaimGlobalRecursive(); + for (i = 0; i < COUNT; ++i) + LockReleaseGlobalRecursive(); + inc(COUNT); + return NULL; +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_pool_t pool; + mps_addr_t p; + testthr_t t[10]; + unsigned i; + + testlib_init(argc, argv); + + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), + "arena_create"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, LockSize()); + die(mps_pool_create_k(&pool, arena, mps_class_mfs(), args), "pool_create"); + } MPS_ARGS_END(args); + + die(mps_alloc(&p, pool, LockSize()), "alloc"); + lock = p; + Insist(lock != NULL); + + LockInit(lock); + UNUSED(argc); + + shared = 0; + + for(i = 0; i < nTHREADS; i++) + testthr_create(&t[i], thread0, NULL); + + for(i = 0; i < nTHREADS; i++) + testthr_join(&t[i], NULL); + + Insist(shared == nTHREADS*COUNT); + + LockFinish(lock); + + mps_free(pool, lock, LockSize()); + mps_pool_destroy(pool); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/lockw3.c b/mps/code/lockw3.c new file mode 100644 index 00000000000..1945f0ab0aa --- /dev/null +++ b/mps/code/lockw3.c @@ -0,0 +1,232 @@ +/* lockw3.c: RECURSIVE LOCKS IN WIN32 + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: These are implemented using critical sections. + * See the section titled "Synchronization functions" in the Groups + * chapter of the Microsoft Win32 API Programmer's Reference. + * The "Synchronization" section of the Overview is also relevant. + * + * Critical sections support recursive locking, so the implementation + * could be trivial. This implementation counts the claims to provide + * extra checking. + * + * The limit on the number of recursive claims is the max of + * ULONG_MAX and the limit imposed by critical sections, which + * is believed to be about UCHAR_MAX. + * + * During use the claims field is updated to remember the number of + * claims acquired on a lock. This field must only be modified + * while we are inside the critical section. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_W3) +#error "lockw3.c is specific to MPS_OS_W3" +#endif + +#include "mpswin.h" + +SRCID(lockw3, "$Id$"); + +#if defined(LOCK) + +/* .lock.win32: Win32 lock structure; uses CRITICAL_SECTION */ +typedef struct LockStruct { + Sig sig; /* design.mps.sig.field */ + unsigned long claims; /* # claims held by the owning thread */ + CRITICAL_SECTION cs; /* Win32's recursive lock thing */ +} LockStruct; + + +size_t (LockSize)(void) +{ + return sizeof(LockStruct); +} + +Bool (LockCheck)(Lock lock) +{ + CHECKS(Lock, lock); + return TRUE; +} + +void (LockInit)(Lock lock) +{ + AVER(lock != NULL); + lock->claims = 0; + InitializeCriticalSection(&lock->cs); + lock->sig = LockSig; + AVERT(Lock, lock); +} + +void (LockFinish)(Lock lock) +{ + AVERT(Lock, lock); + /* Lock should not be finished while held */ + AVER(lock->claims == 0); + DeleteCriticalSection(&lock->cs); + lock->sig = SigInvalid; +} + +void (LockClaim)(Lock lock) +{ + AVERT(Lock, lock); + EnterCriticalSection(&lock->cs); + /* This should be the first claim. Now we are inside the + * critical section it is ok to check this. */ + AVER(lock->claims == 0); /* */ + lock->claims = 1; +} + +void (LockRelease)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims == 1); /* The lock should only be held once */ + lock->claims = 0; /* Must set this before leaving CS */ + LeaveCriticalSection(&lock->cs); +} + +void (LockClaimRecursive)(Lock lock) +{ + AVERT(Lock, lock); + EnterCriticalSection(&lock->cs); + ++lock->claims; + AVER(lock->claims > 0); +} + +void (LockReleaseRecursive)(Lock lock) +{ + AVERT(Lock, lock); + AVER(lock->claims > 0); + --lock->claims; + LeaveCriticalSection(&lock->cs); +} + +Bool (LockIsHeld)(Lock lock) +{ + if (TryEnterCriticalSection(&lock->cs)) { + Bool claimed = lock->claims > 0; + LeaveCriticalSection(&lock->cs); + return claimed; + } + return TRUE; +} + + +/* Global locking is performed by normal locks. + * A separate lock structure is used for recursive and + * non-recursive locks so that each may be differently ordered + * with respect to client-allocated locks. + */ + +static LockStruct globalLockStruct; +static LockStruct globalRecLockStruct; +static Lock globalLock = &globalLockStruct; +static Lock globalRecLock = &globalRecLockStruct; +static Bool globalLockInit = FALSE; /* TRUE iff initialized */ + +void LockInitGlobal(void) +{ + globalLock->claims = 0; + LockInit(globalLock); + globalRecLock->claims = 0; + LockInit(globalRecLock); + globalLockInit = TRUE; +} + +/* lockEnsureGlobalLock -- one-time initialization of global locks + * + * InitOnceExecuteOnce ensures that only one thread can be running the + * callback at a time, which allows to safely check globalLockInit. See + * + * but note that at time of writing (2018-06-27) the documentation has + * the arguments the wrong way round (parameter comes before context). + */ + +static BOOL CALLBACK lockEnsureGlobalLockCallback(INIT_ONCE *init_once, void *parameter, void **context) +{ + UNUSED(init_once); + AVER(parameter == UNUSED_POINTER); + UNUSED(context); + if (!globalLockInit) { + LockInitGlobal(); + } + return TRUE; +} + +static void lockEnsureGlobalLock(void) +{ + static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT; + BOOL b = InitOnceExecuteOnce(&init_once, lockEnsureGlobalLockCallback, + UNUSED_POINTER, NULL); + AVER(b); +} + +void (LockClaimGlobalRecursive)(void) +{ + lockEnsureGlobalLock(); + AVER(globalLockInit); + LockClaimRecursive(globalRecLock); +} + +void (LockReleaseGlobalRecursive)(void) +{ + AVER(globalLockInit); + LockReleaseRecursive(globalRecLock); +} + +void (LockClaimGlobal)(void) +{ + lockEnsureGlobalLock(); + AVER(globalLockInit); + LockClaim(globalLock); +} + +void (LockReleaseGlobal)(void) +{ + AVER(globalLockInit); + LockRelease(globalLock); +} + +void LockSetup(void) +{ + /* Nothing to do as MPS does not support fork() on Windows. */ +} + +#elif defined(LOCK_NONE) +#include "lockan.c" +#else +#error "No lock configuration." +#endif + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/locus.c b/mps/code/locus.c new file mode 100644 index 00000000000..d4fb3a86c64 --- /dev/null +++ b/mps/code/locus.c @@ -0,0 +1,930 @@ +/* locus.c: LOCUS MANAGER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * and for basic locus stuff. + * for chains. for the + * collection strategy. + */ + +#include "locus.h" +#include "ring.h" +#include "mpm.h" +#include "mpstd.h" +#include /* for DBL_MAX */ + +SRCID(locus, "$Id$"); + + +/* LocusPrefCheck -- check the consistency of a locus preference */ + +Bool LocusPrefCheck(LocusPref pref) +{ + CHECKS(LocusPref, pref); + CHECKL(BoolCheck(pref->high)); + /* zones can't be checked because it's arbitrary. */ + /* avoid can't be checked because it's arbitrary. */ + return TRUE; +} + + +/* LocusPrefDefault -- return a locus preference representing the defaults */ + +static LocusPrefStruct locusPrefDefault = LocusPrefDEFAULT; + +LocusPref LocusPrefDefault(void) +{ + return &locusPrefDefault; +} + +/* LocusPrefInit -- initialise a locus preference to the defaults */ + +void LocusPrefInit(LocusPref pref) +{ + (void)mps_lib_memcpy(pref, &locusPrefDefault, sizeof(LocusPrefStruct)); +} + + +/* LocusPrefExpress -- express a locus preference */ + +void LocusPrefExpress(LocusPref pref, LocusPrefKind kind, void *p) +{ + AVERT(LocusPref, pref); + AVER(pref != &locusPrefDefault); + + switch(kind) { + case LocusPrefHIGH: + AVER(p == NULL); + pref->high = TRUE; + break; + + case LocusPrefLOW: + AVER(p == NULL); + pref->high = FALSE; + break; + + case LocusPrefZONESET: + AVER(p != NULL); + pref->zones = *(ZoneSet *)p; + break; + + default: + /* Unknown kinds are ignored for binary compatibility. */ + break; + } +} + + +/* LocusPrefDescribe -- describe a locus preference */ + +Res LocusPrefDescribe(LocusPref pref, mps_lib_FILE *stream, Count depth) +{ + Res res; + + if (!TESTT(LocusPref, pref)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "LocusPref $P {\n", (WriteFP)pref, + " high $S\n", WriteFYesNo(pref->high), + " zones $B\n", (WriteFB)pref->zones, + " avoid $B\n", (WriteFB)pref->avoid, + "} LocusPref $P\n", (WriteFP)pref, + NULL); + return res; +} + + +/* GenDescCheck -- check a GenDesc */ + +ATTRIBUTE_UNUSED +Bool GenDescCheck(GenDesc gen) +{ + CHECKS(GenDesc, gen); + /* nothing to check for zones */ + CHECKL(gen->capacity > 0); + CHECKL(gen->mortality >= 0.0); + CHECKL(gen->mortality <= 1.0); + CHECKD_NOSIG(Ring, &gen->locusRing); + CHECKD_NOSIG(Ring, &gen->segRing); + return TRUE; +} + + +/* GenParamCheck -- check consistency of generation parameters */ + +ATTRIBUTE_UNUSED +static Bool GenParamCheck(GenParamStruct *params) +{ + CHECKL(params != NULL); + CHECKL(params->capacity > 0); + CHECKL(params->capacity <= SizeMAX / 1024); + CHECKL(params->mortality >= 0.0); + CHECKL(params->mortality <= 1.0); + return TRUE; +} + + +/* GenDescInit -- initialize a generation in a chain */ + +static void GenDescInit(Arena arena, GenDesc gen, GenParamStruct *params) +{ + TraceId ti; + + AVER(arena != NULL); /* might not be initialized yet. */ + AVER(gen != NULL); + AVER(GenParamCheck(params)); + + gen->serial = arena->genSerial; + ++ arena->genSerial; + gen->zones = ZoneSetEMPTY; + gen->capacity = params->capacity * 1024; + gen->mortality = params->mortality; + RingInit(&gen->locusRing); + RingInit(&gen->segRing); + gen->activeTraces = TraceSetEMPTY; + for (ti = 0; ti < TraceLIMIT; ++ti) + RingInit(&gen->trace[ti].traceRing); + gen->sig = GenDescSig; + AVERT(GenDesc, gen); + EVENT5(GenInit, arena, gen, gen->serial, gen->capacity, gen->mortality); +} + + +/* GenDescFinish -- finish a generation in a chain */ + +static void GenDescFinish(Arena arena, GenDesc gen) +{ + TraceId ti; + + AVER(arena != NULL); /* might be being finished */ + AVERT(GenDesc, gen); + + EVENT3(GenFinish, arena, gen, gen->serial); + gen->sig = SigInvalid; + RingFinish(&gen->locusRing); + RingFinish(&gen->segRing); + AVER(gen->activeTraces == TraceSetEMPTY); /* */ + for (ti = 0; ti < TraceLIMIT; ++ti) + RingFinish(&gen->trace[ti].traceRing); +} + + +/* GenDescNewSize -- return effective size of generation */ + +Size GenDescNewSize(GenDesc gen) +{ + Size size = 0; + Ring node, nextNode; + + AVERT(GenDesc, gen); + + RING_FOR(node, &gen->locusRing, nextNode) { + PoolGen pgen = RING_ELT(PoolGen, genRing, node); + AVERT(PoolGen, pgen); + size += pgen->newSize; + } + return size; +} + + +/* genDescTraceStart -- notify generation of start of a trace */ + +void GenDescStartTrace(GenDesc gen, Trace trace) +{ + GenTrace genTrace; + + AVERT(GenDesc, gen); + AVERT(Trace, trace); + + AVER(!TraceSetIsMember(gen->activeTraces, trace)); + gen->activeTraces = TraceSetAdd(gen->activeTraces, trace); + genTrace = &gen->trace[trace->ti]; + AVER(RingIsSingle(&genTrace->traceRing)); + RingAppend(&trace->genRing, &genTrace->traceRing); + genTrace->condemned = 0; + genTrace->forwarded = 0; + genTrace->preservedInPlace = 0; +} + + +/* genDescEndTrace -- notify generation of end of a trace */ + +void GenDescEndTrace(GenDesc gen, Trace trace) +{ + GenTrace genTrace; + Size survived; + + AVERT(GenDesc, gen); + AVERT(Trace, trace); + + AVER(TraceSetIsMember(gen->activeTraces, trace)); + gen->activeTraces = TraceSetDel(gen->activeTraces, trace); + genTrace = &gen->trace[trace->ti]; + RingRemove(&genTrace->traceRing); + survived = genTrace->forwarded + genTrace->preservedInPlace; + AVER(survived <= genTrace->condemned); + + if (genTrace->condemned > 0) { + double mortality = 1.0 - (double)survived / (double)genTrace->condemned; + double alpha = LocusMortalityALPHA; + gen->mortality = gen->mortality * (1 - alpha) + mortality * alpha; + EVENT8(TraceEndGen, trace->arena, trace, gen, genTrace->condemned, + genTrace->forwarded, genTrace->preservedInPlace, mortality, + gen->mortality); + } +} + + +/* GenDescCondemned -- memory in a generation was condemned for a trace */ + +void GenDescCondemned(GenDesc gen, Trace trace, Size size) +{ + GenTrace genTrace; + + AVERT(GenDesc, gen); + AVERT(Trace, trace); + + genTrace = &gen->trace[trace->ti]; + genTrace->condemned += size; + trace->condemned += size; +} + + +/* GenDescSurvived -- memory in a generation survived a trace */ + +void GenDescSurvived(GenDesc gen, Trace trace, Size forwarded, + Size preservedInPlace) +{ + GenTrace genTrace; + + AVERT(GenDesc, gen); + AVERT(Trace, trace); + + genTrace = &gen->trace[trace->ti]; + genTrace->forwarded += forwarded; + genTrace->preservedInPlace += preservedInPlace; + trace->forwardedSize += forwarded; + trace->preservedInPlaceSize += preservedInPlace; +} + + +/* GenDescTotalSize -- return total size of generation */ + +Size GenDescTotalSize(GenDesc gen) +{ + Size size = 0; + Ring node, nextNode; + + AVERT(GenDesc, gen); + + RING_FOR(node, &gen->locusRing, nextNode) { + PoolGen pgen = RING_ELT(PoolGen, genRing, node); + AVERT(PoolGen, pgen); + size += pgen->totalSize; + } + return size; +} + + +/* GenDescDescribe -- describe a generation in a chain */ + +Res GenDescDescribe(GenDesc gen, mps_lib_FILE *stream, Count depth) +{ + Index i; + Res res; + Ring node, nextNode; + + if (!TESTT(GenDesc, gen)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "GenDesc $P {\n", (WriteFP)gen, + " zones $B\n", (WriteFB)gen->zones, + " capacity $U\n", (WriteFW)gen->capacity, + " mortality $D\n", (WriteFD)gen->mortality, + " activeTraces $B\n", (WriteFB)gen->activeTraces, + NULL); + if (res != ResOK) + return res; + + for (i = 0; i < NELEMS(gen->trace); ++i) { + GenTrace genTrace = &gen->trace[i]; + res = WriteF(stream, depth + 2, + "trace $U {\n", (WriteFW)i, + " condemned $U\n", (WriteFW)genTrace->condemned, + " forwarded $U\n", (WriteFW)genTrace->forwarded, + " preservedInPlace $U\n", (WriteFW)genTrace->preservedInPlace, + "}\n", NULL); + if (res != ResOK) + return res; + } + + RING_FOR(node, &gen->locusRing, nextNode) { + PoolGen pgen = RING_ELT(PoolGen, genRing, node); + res = PoolGenDescribe(pgen, stream, depth + 2); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth, "} GenDesc $P\n", (WriteFP)gen, NULL); + return res; +} + + +/* ChainInit -- initialize a generation chain */ + +static void ChainInit(ChainStruct *chain, Arena arena, GenDescStruct *gens, + Count genCount) +{ + AVER(chain != NULL); + AVERT(Arena, arena); + AVER(gens != NULL); + AVER(genCount > 0); + + chain->arena = arena; + RingInit(&chain->chainRing); + chain->genCount = genCount; + chain->gens = gens; + chain->sig = ChainSig; + + AVERT(Chain, chain); + + RingAppend(&arena->chainRing, &chain->chainRing); +} + + +/* ChainCreate -- create a generation chain */ + +Res ChainCreate(Chain *chainReturn, Arena arena, size_t genCount, + GenParamStruct *params) +{ + size_t i; + Size size; + Chain chain; + GenDescStruct *gens; + Res res; + void *p; + + AVER(chainReturn != NULL); + AVERT(Arena, arena); + AVER(genCount > 0); + AVER(params != NULL); + + size = sizeof(ChainStruct) + genCount * sizeof(GenDescStruct); + res = ControlAlloc(&p, arena, size); + if (res != ResOK) + return res; + chain = p; + gens = PointerAdd(p, sizeof(ChainStruct)); + + for (i = 0; i < genCount; ++i) + GenDescInit(arena, &gens[i], ¶ms[i]); + ChainInit(chain, arena, gens, genCount); + + *chainReturn = chain; + return ResOK; +} + + +/* ChainCheck -- check a chain */ + +Bool ChainCheck(Chain chain) +{ + size_t i; + + CHECKS(Chain, chain); + CHECKU(Arena, chain->arena); + CHECKD_NOSIG(Ring, &chain->chainRing); + CHECKL(chain->genCount > 0); + for (i = 0; i < chain->genCount; ++i) { + CHECKD(GenDesc, &chain->gens[i]); + } + return TRUE; +} + + +/* ChainDestroy -- destroy a chain */ + +void ChainDestroy(Chain chain) +{ + Arena arena; + Size size; + size_t genCount; + size_t i; + + AVERT(Chain, chain); + + arena = chain->arena; + genCount = chain->genCount; + RingRemove(&chain->chainRing); + chain->sig = SigInvalid; + for (i = 0; i < genCount; ++i) + GenDescFinish(arena, &chain->gens[i]); + + RingFinish(&chain->chainRing); + + size = sizeof(ChainStruct) + genCount * sizeof(GenDescStruct); + ControlFree(arena, chain, size); +} + + +/* ChainGens -- return the number of generation in chain */ + +size_t ChainGens(Chain chain) +{ + AVERT(Chain, chain); + return chain->genCount; +} + + +/* ChainGen -- return a generation in a chain, or the arena top generation */ + +GenDesc ChainGen(Chain chain, Index gen) +{ + AVERT(Chain, chain); + AVER(gen <= chain->genCount); + + if (gen < chain->genCount) + return &chain->gens[gen]; + else + return &chain->arena->topGen; +} + + +/* ChainDeferral -- time until next ephemeral GC for this chain */ + +double ChainDeferral(Chain chain) +{ + double time = DBL_MAX; + size_t i; + + AVERT(Chain, chain); + + for (i = 0; i < chain->genCount; ++i) { + double genTime; + GenDesc gen = &chain->gens[i]; + if (gen->activeTraces != TraceSetEMPTY) + return DBL_MAX; + genTime = (double)gen->capacity - (double)GenDescNewSize(&chain->gens[i]); + if (genTime < time) + time = genTime; + } + + return time; +} + + +/* ChainDescribe -- describe a chain */ + +Res ChainDescribe(Chain chain, mps_lib_FILE *stream, Count depth) +{ + Res res; + size_t i; + + if (!TESTT(Chain, chain)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "Chain $P {\n", (WriteFP)chain, + " arena $P\n", (WriteFP)chain->arena, + NULL); + if (res != ResOK) + return res; + + for (i = 0; i < chain->genCount; ++i) { + res = GenDescDescribe(&chain->gens[i], stream, depth + 2); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth, + "} Chain $P\n", (WriteFP)chain, + NULL); + return res; +} + + +/* PoolGenInit -- initialize a PoolGen */ + +Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool) +{ + /* Can't check pgen, because it's not been initialized. */ + AVER(pgen != NULL); + AVERT(GenDesc, gen); + AVERT(Pool, pool); + AVER(PoolHasAttr(pool, AttrGC)); + + pgen->pool = pool; + pgen->gen = gen; + RingInit(&pgen->genRing); + pgen->segs = 0; + pgen->totalSize = 0; + pgen->freeSize = 0; + pgen->bufferedSize = 0; + pgen->newSize = 0; + pgen->oldSize = 0; + pgen->newDeferredSize = 0; + pgen->oldDeferredSize = 0; + pgen->sig = PoolGenSig; + AVERT(PoolGen, pgen); + + RingAppend(&gen->locusRing, &pgen->genRing); + return ResOK; +} + + +/* PoolGenFinish -- finish a PoolGen */ + +void PoolGenFinish(PoolGen pgen) +{ + AVERT(PoolGen, pgen); + AVER(pgen->segs == 0); + AVER(pgen->totalSize == 0); + AVER(pgen->freeSize == 0); + AVER(pgen->bufferedSize == 0); + AVER(pgen->newSize == 0); + AVER(pgen->newDeferredSize == 0); + AVER(pgen->oldSize == 0); + AVER(pgen->oldDeferredSize == 0); + + pgen->sig = SigInvalid; + RingRemove(&pgen->genRing); +} + + +/* PoolGenCheck -- check a PoolGen */ + +Bool PoolGenCheck(PoolGen pgen) +{ + CHECKS(PoolGen, pgen); + /* nothing to check about serial */ + CHECKU(Pool, pgen->pool); + CHECKU(GenDesc, pgen->gen); + CHECKD_NOSIG(Ring, &pgen->genRing); + CHECKL((pgen->totalSize == 0) == (pgen->segs == 0)); + CHECKL(pgen->totalSize >= pgen->segs * ArenaGrainSize(PoolArena(pgen->pool))); + CHECKL(pgen->totalSize == pgen->freeSize + pgen->bufferedSize + + pgen->newSize + pgen->oldSize + + pgen->newDeferredSize + pgen->oldDeferredSize); + return TRUE; +} + + +/* PoolGenAccountForAlloc -- accounting for allocation of a segment */ + +static void PoolGenAccountForAlloc(PoolGen pgen, Size size) +{ + pgen->totalSize += size; + ++ pgen->segs; + pgen->freeSize += size; +} + + +/* PoolGenAlloc -- allocate a segment in a pool generation + * + * Allocate a segment belong to klass (which must be GCSegClass or a + * subclass), attach it to the generation, and update the accounting. + */ + +Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass klass, Size size, + ArgList args) +{ + LocusPrefStruct pref; + Res res; + Seg seg; + ZoneSet zones, moreZones; + Arena arena; + GenDesc gen; + + AVER(segReturn != NULL); + AVERT(PoolGen, pgen); + AVERT(SegClass, klass); + AVER(IsSubclass(klass, GCSeg)); + AVER(size > 0); + AVERT(ArgList, args); + + arena = PoolArena(pgen->pool); + gen = pgen->gen; + zones = gen->zones; + + LocusPrefInit(&pref); + pref.high = FALSE; + pref.zones = zones; + pref.avoid = ZoneSetBlacklist(arena); + res = SegAlloc(&seg, klass, &pref, size, pgen->pool, args); + if (res != ResOK) + return res; + + RingAppend(&gen->segRing, &SegGCSeg(seg)->genRing); + + moreZones = ZoneSetUnion(zones, ZoneSetOfSeg(arena, seg)); + gen->zones = moreZones; + + if (!ZoneSetSuper(zones, moreZones)) { + /* Tracking the whole zoneset for each generation gives more + * understandable telemetry than just reporting the added + * zones. */ + EVENT3(GenZoneSet, arena, gen, moreZones); + } + + PoolGenAccountForAlloc(pgen, SegSize(seg)); + + *segReturn = seg; + return ResOK; +} + + +/* PoolGenAccountForFill -- accounting for allocation within a segment + * + * Call this when the pool allocates memory to the client program via + * BufferFill. + * + * + */ + +void PoolGenAccountForFill(PoolGen pgen, Size size) +{ + AVERT(PoolGen, pgen); + + AVER(pgen->freeSize >= size); + pgen->freeSize -= size; + pgen->bufferedSize += size; +} + + +/* PoolGenAccountForEmpty -- accounting for emptying a buffer + * + * Call this when the client program returns memory to the pool via + * BufferEmpty. The deferred flag indicates whether the accounting of + * the used memory (for the purpose of scheduling collections) should + * be deferred until later. + * + * + */ + +void PoolGenAccountForEmpty(PoolGen pgen, Size used, Size unused, Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + AVER(pgen->bufferedSize >= used + unused); + pgen->bufferedSize -= used + unused; + if (deferred) { + pgen->newDeferredSize += used; + } else { + pgen->newSize += used; + } + pgen->freeSize += unused; +} + + +/* PoolGenAccountForAge -- accounting for condemning + * + * Call this when memory is condemned via PoolWhiten, or when + * artificially ageing memory in PoolGenFree. The size parameter + * should be the amount of memory that is being condemned for the + * first time. The deferred flag is as for PoolGenAccountForEmpty. + * + * + */ + +void PoolGenAccountForAge(PoolGen pgen, Size wasBuffered, Size wasNew, + Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + AVER(pgen->bufferedSize >= wasBuffered); + pgen->bufferedSize -= wasBuffered; + if (deferred) { + AVER(pgen->newDeferredSize >= wasNew); + pgen->newDeferredSize -= wasNew; + pgen->oldDeferredSize += wasBuffered + wasNew; + } else { + AVER(pgen->newSize >= wasNew); + pgen->newSize -= wasNew; + pgen->oldSize += wasBuffered + wasNew; + } +} + + +/* PoolGenAccountForReclaim -- accounting for reclaiming + * + * Call this when reclaiming memory, passing the amount of memory that + * was reclaimed. The deferred flag is as for PoolGenAccountForEmpty. + * + * + */ + +void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + if (deferred) { + AVER(pgen->oldDeferredSize >= reclaimed); + pgen->oldDeferredSize -= reclaimed; + } else { + AVER(pgen->oldSize >= reclaimed); + pgen->oldSize -= reclaimed; + } + pgen->freeSize += reclaimed; +} + + +/* PoolGenUndefer -- finish deferring accounting + * + * Call this when exiting ramp mode, passing the amount of old + * (condemned at least once) and new (never condemned) memory whose + * accounting was deferred (for example, during a ramp). + * + * + */ + +void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize) +{ + AVERT(PoolGen, pgen); + AVER(pgen->oldDeferredSize >= oldSize); + pgen->oldDeferredSize -= oldSize; + pgen->oldSize += oldSize; + AVER(pgen->newDeferredSize >= newSize); + pgen->newDeferredSize -= newSize; + pgen->newSize += newSize; +} + + +/* PoolGenAccountForSegSplit -- accounting for splitting a segment */ + +void PoolGenAccountForSegSplit(PoolGen pgen) +{ + AVERT(PoolGen, pgen); + AVER(pgen->segs >= 1); /* must be at least one segment to split */ + ++ pgen->segs; +} + + +/* PoolGenAccountForSegMerge -- accounting for merging a segment */ + +void PoolGenAccountForSegMerge(PoolGen pgen) +{ + AVERT(PoolGen, pgen); + AVER(pgen->segs >= 2); /* must be at least two segments to merge */ + -- pgen->segs; +} + + +/* PoolGenAccountForFree -- accounting for the freeing of a segment */ + +static void PoolGenAccountForFree(PoolGen pgen, Size size, + Size oldSize, Size newSize, + Bool deferred) +{ + /* Pretend to age and reclaim the contents of the segment to ensure + * that the entire segment is accounted as free. */ + PoolGenAccountForAge(pgen, 0, newSize, deferred); + PoolGenAccountForReclaim(pgen, oldSize + newSize, deferred); + + AVER(pgen->totalSize >= size); + pgen->totalSize -= size; + AVER(pgen->segs > 0); + -- pgen->segs; + AVER(pgen->freeSize >= size); + pgen->freeSize -= size; +} + + +/* PoolGenFree -- free a segment and update accounting + * + * Pass the amount of memory in the segment that is accounted as free, + * old, or new, respectively. The deferred flag is as for + * PoolGenAccountForEmpty. + * + * + */ + +void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize, + Size newSize, Bool deferred) +{ + Size size; + + AVERT(PoolGen, pgen); + AVERT(Seg, seg); + + size = SegSize(seg); + AVER(freeSize + oldSize + newSize == size); + + PoolGenAccountForFree(pgen, size, oldSize, newSize, deferred); + + RingRemove(&SegGCSeg(seg)->genRing); + + SegFree(seg); +} + + +/* PoolGenDescribe -- describe a PoolGen */ + +Res PoolGenDescribe(PoolGen pgen, mps_lib_FILE *stream, Count depth) +{ + Res res; + PoolClass poolClass; + + if (!TESTT(PoolGen, pgen)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + poolClass = ClassOfPoly(Pool, pgen->pool); + + res = WriteF(stream, depth, + "PoolGen $P {\n", (WriteFP)pgen, + " pool $P ($U) \"$S\"\n", + (WriteFP)pgen->pool, (WriteFU)pgen->pool->serial, + (WriteFS)ClassName(poolClass), + " segs $U\n", (WriteFU)pgen->segs, + " totalSize $U\n", (WriteFU)pgen->totalSize, + " freeSize $U\n", (WriteFU)pgen->freeSize, + " bufferedSize $U\n", (WriteFU)pgen->bufferedSize, + " oldSize $U\n", (WriteFU)pgen->oldSize, + " oldDeferredSize $U\n", (WriteFU)pgen->oldDeferredSize, + " newSize $U\n", (WriteFU)pgen->newSize, + " newDeferredSize $U\n", (WriteFU)pgen->newDeferredSize, + "} PoolGen $P\n", (WriteFP)pgen, + NULL); + return res; +} + + +/* LocusInit -- initialize the locus module */ + +void LocusInit(Arena arena) +{ + GenParamStruct params; + + AVER(arena != NULL); /* not initialized yet. */ + + params.capacity = 1; /* unused since top generation is not on any chain */ + params.mortality = 0.5; + + GenDescInit(arena, &arena->topGen, ¶ms); + EventLabelPointer(&arena->topGen, EventInternString("TopGen")); +} + + +/* LocusFinish -- finish the locus module */ + +void LocusFinish(Arena arena) +{ + /* Can't check arena, because it's being finished. */ + AVER(arena != NULL); + GenDescFinish(arena, &arena->topGen); +} + + +/* LocusCheck -- check the locus module */ + +Bool LocusCheck(Arena arena) +{ + /* Can't check arena, because this is part of ArenaCheck. */ + CHECKD(GenDesc, &arena->topGen); + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/locus.h b/mps/code/locus.h new file mode 100644 index 00000000000..10867596d77 --- /dev/null +++ b/mps/code/locus.h @@ -0,0 +1,158 @@ +/* locus.h: GENERATION CHAINS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef locus_h +#define locus_h + +#include "mpmtypes.h" +#include "ring.h" + + +/* GenParamStruct -- structure for specifying generation parameters */ +/* .gen-param: This structure must match . */ + +typedef struct GenParamStruct *GenParam; + +typedef struct GenParamStruct { + Size capacity; /* capacity in kB */ + double mortality; /* predicted mortality */ +} GenParamStruct; + + +/* GenTrace -- per-generation per-trace structure */ + +typedef struct GenTraceStruct *GenTrace; + +typedef struct GenTraceStruct { + RingStruct traceRing; /* link in ring of generations condemned by trace */ + Size condemned; /* size of objects condemned by the trace */ + Size forwarded; /* size of objects that were forwarded by the trace */ + Size preservedInPlace; /* size of objects preserved in place by the trace */ +} GenTraceStruct; + + +/* GenDesc -- descriptor of a generation in a chain */ + +typedef struct GenDescStruct *GenDesc; + +#define GenDescSig ((Sig)0x5199E4DE) /* SIGnature GEN DEsc */ + +typedef struct GenDescStruct { + Sig sig; /* design.mps.sig.field */ + Serial serial; /* serial number within arena */ + ZoneSet zones; /* zoneset for this generation */ + Size capacity; /* capacity in bytes */ + double mortality; /* moving average mortality */ + RingStruct locusRing; /* Ring of all PoolGen's in this GenDesc (locus) */ + RingStruct segRing; /* Ring of GCSegs in this generation */ + TraceSet activeTraces; /* set of traces collecting this generation */ + GenTraceStruct trace[TraceLIMIT]; +} GenDescStruct; + + +/* PoolGen -- descriptor of a generation in a pool */ + +#define PoolGenSig ((Sig)0x519B009E) /* SIGnature POOl GEn */ + +typedef struct PoolGenStruct { + Sig sig; /* design.mps.sig.field */ + Pool pool; /* pool this belongs to */ + GenDesc gen; /* generation this belongs to */ + /* link in ring of all PoolGen's in this GenDesc (locus) */ + RingStruct genRing; + + /* Accounting of memory in this generation for this pool */ + Size segs; /* number of segments */ + Size totalSize; /* total (sum of segment sizes) */ + Size freeSize; /* unused (free or lost to fragmentation) */ + Size bufferedSize; /* held in buffers but not condemned yet */ + Size newSize; /* allocated since last collection */ + Size oldSize; /* allocated prior to last collection */ + Size newDeferredSize; /* new (but deferred) */ + Size oldDeferredSize; /* old (but deferred) */ +} PoolGenStruct; + + +/* Chain -- a generation chain */ + +#define ChainSig ((Sig)0x519C8A14) /* SIGnature CHAIN */ + +typedef struct mps_chain_s { + Sig sig; /* design.mps.sig.field */ + Arena arena; + RingStruct chainRing; /* list of chains in the arena */ + size_t genCount; /* number of generations */ + GenDesc gens; /* the array of generations */ +} ChainStruct; + + +extern Bool GenDescCheck(GenDesc gen); +extern Size GenDescNewSize(GenDesc gen); +extern Size GenDescTotalSize(GenDesc gen); +extern void GenDescStartTrace(GenDesc gen, Trace trace); +extern void GenDescEndTrace(GenDesc gen, Trace trace); +extern void GenDescCondemned(GenDesc gen, Trace trace, Size size); +extern void GenDescSurvived(GenDesc gen, Trace trace, Size forwarded, Size preservedInPlace); +extern Res GenDescDescribe(GenDesc gen, mps_lib_FILE *stream, Count depth); +#define GenDescOfTraceRing(node, tr) PARENT(GenDescStruct, trace, RING_ELT(GenTrace, traceRing, node) - (tr)->ti) + +extern Res ChainCreate(Chain *chainReturn, Arena arena, size_t genCount, + GenParam params); +extern void ChainDestroy(Chain chain); +extern Bool ChainCheck(Chain chain); + +extern double ChainDeferral(Chain chain); +extern size_t ChainGens(Chain chain); +extern GenDesc ChainGen(Chain chain, Index gen); +extern Res ChainDescribe(Chain chain, mps_lib_FILE *stream, Count depth); + +extern Bool PoolGenCheck(PoolGen pgen); +extern Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool); +extern void PoolGenFinish(PoolGen pgen); +extern Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass klass, + Size size, ArgList args); +extern void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize, + Size newSize, Bool deferred); +extern void PoolGenAccountForFill(PoolGen pgen, Size size); +extern void PoolGenAccountForEmpty(PoolGen pgen, Size used, Size unused, Bool deferred); +extern void PoolGenAccountForAge(PoolGen pgen, Size wasBuffered, Size wasNew, Bool deferred); +extern void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred); +extern void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize); +extern void PoolGenAccountForSegSplit(PoolGen pgen); +extern void PoolGenAccountForSegMerge(PoolGen pgen); +extern Res PoolGenDescribe(PoolGen gen, mps_lib_FILE *stream, Count depth); + +#endif /* locus_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/locusss.c b/mps/code/locusss.c new file mode 100644 index 00000000000..8c8e3a25a26 --- /dev/null +++ b/mps/code/locusss.c @@ -0,0 +1,277 @@ +/* locusss.c: LOCUS STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpscmvff.h" +#include "mpslib.h" +#include "mpsavm.h" +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" + +#include /* printf */ + + +/* some constants */ + +#define TRUE 1 +#define FALSE 0 + +#define iterationCount 30 /* number of iterations */ +#define contigAllocs 8 /* number of allocs each iteration */ +#define chunkSize ((size_t)65536) /* our allocation chunk size */ + +#define smallArenaSize \ + ((size_t)(chunkSize * iterationCount * contigAllocs * 2)) + + +#define AddressOffset(b, l) \ + ((size_t)((char *)(l) - (char *)(b))) + + +/* PoolStat -- maintain data about contiguous allocations */ + +typedef struct PoolStatStruct *PoolStat; + +typedef struct PoolStatStruct { + mps_pool_t pool; /* the pool being measured */ + size_t objSize; /* size of each allocation */ + mps_addr_t min; /* lowest address lock allocated to the pool */ + mps_addr_t max; /* highest address lock allocated to the pool */ + int ncCount; /* count of non-contiguous allocations */ + int aCount; /* count of allocations */ + int fCount; /* count of frees */ +} PoolStatStruct; + + + +static mps_addr_t allocObject(mps_pool_t pool, size_t size) +{ + mps_addr_t addr; + die(mps_alloc(&addr, pool, size), + "Allocate Object"); + return addr; +} + +static void recordNewObjectStat(PoolStat stat, mps_addr_t obj) +{ + stat->aCount++; + if (obj < stat->min) { + if (AddressOffset(obj, stat->min) > stat->objSize) { + stat->ncCount++; + } + stat->min = obj; + } else if (obj > stat->max) { + if (AddressOffset(stat->max, obj) > stat->objSize) { + stat->ncCount++; + } + stat->max = obj; + } +} + +static void recordFreedObjectStat(PoolStat stat) +{ + stat->fCount++; +} + + +static void poolStatInit(PoolStat stat, mps_pool_t pool, size_t objSize) +{ + mps_addr_t s1, s2, s3; + + stat->pool = pool; + stat->objSize = objSize; + stat->ncCount = 0; + stat->aCount = 0; + stat->fCount = 0; + + /* allocate 3 half-size sentinel objects, freeing the middle one */ + /* to leave a bit of space for the control pool */ + s1 = allocObject(pool, objSize / 2); + stat->min = s1; + stat->max = s1; + stat->aCount++; + + s2 = allocObject(pool, objSize / 2); + recordNewObjectStat(stat, s2); + s3 = allocObject(pool, objSize / 2); + recordNewObjectStat(stat, s3); + + mps_free(pool, s2, objSize / 2); + recordFreedObjectStat(stat); + +} + + +static mps_res_t allocMultiple(PoolStat stat) +{ + mps_addr_t objects[contigAllocs]; + size_t i; + + /* allocate a few objects, and record stats for them */ + for (i = 0; i < contigAllocs; i++) { + mps_addr_t obj; + mps_res_t res = mps_alloc(&obj, stat->pool, stat->objSize); + if (res != MPS_RES_OK) + return res; + recordNewObjectStat(stat, obj); + objects[i] = obj; + } + + /* free one of the objects, to make the test more interesting */ + i = rnd() % contigAllocs; + mps_free(stat->pool, objects[i], stat->objSize); + recordFreedObjectStat(stat); + + return MPS_RES_OK; +} + + +/* reportResults - print a report on a PoolStat */ + +static void reportResults(PoolStat stat, const char *name) +{ + printf("\nResults for "); + printf("%s", name); + printf("\n"); + printf(" Allocated %"PRIuLONGEST" objects\n", (ulongest_t)stat->aCount); + printf(" Freed %"PRIuLONGEST" objects\n", (ulongest_t)stat->fCount); + printf(" There were %lu non-contiguous allocations\n", + (unsigned long)stat->ncCount); + printf(" Address range from %p to %p\n", stat->min, stat->max); + printf("\n"); +} + + +static void testInArena(mps_arena_t arena, + mps_bool_t failcase, + mps_bool_t usefulFailcase) +{ + mps_pool_t lopool, hipool, temppool; + PoolStatStruct lostruct; /* stats about lopool */ + PoolStatStruct histruct; /* stats about lopool */ + PoolStatStruct tempstruct; /* stats about temppool */ + PoolStat lostat = &lostruct; + PoolStat histat = &histruct; + PoolStat tempstat = &tempstruct; + int i; + + die(mps_pool_create(&hipool, arena, mps_class_mvff(), + chunkSize, chunkSize, (size_t)1024, + TRUE, TRUE, TRUE), + "Create HI MFFV"); + + die(mps_pool_create(&lopool, arena, mps_class_mvff(), + chunkSize, chunkSize, (size_t)1024, + FALSE, FALSE, TRUE), + "Create LO MFFV"); + + die(mps_pool_create_k(&temppool, arena, mps_class_mvff(), + mps_args_none), + "Create TEMP"); + + if(failcase) { + if(usefulFailcase) { + /* describe a useful failure case */ + } else { + /* describe a misleading failure case */ + } + } + + poolStatInit(lostat, lopool, chunkSize); + poolStatInit(histat, hipool, chunkSize); + poolStatInit(tempstat, temppool, chunkSize); + + /* iterate, allocating objects */ + for (i=0; i. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/locv.c b/mps/code/locv.c new file mode 100644 index 00000000000..9a0627c1b25 --- /dev/null +++ b/mps/code/locv.c @@ -0,0 +1,222 @@ +/* locv.c: LEAF OBJECT POOL CLASS COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This is (not much of) a coverage test for the Leaf Object + * pool (PoolClassLO). + */ + +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" +#include "mpsclo.h" +#include "mpsavm.h" + +#include /* printf */ + + +#define testArenaSIZE ((size_t)16<<20) + +static mps_res_t scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit); +static mps_addr_t skip(mps_addr_t object); +static void move(mps_addr_t object, mps_addr_t to); +static mps_addr_t isMoved(mps_addr_t object); +static void copy(mps_addr_t old, mps_addr_t new); +static void pad(mps_addr_t base, size_t size); + +static void stepper(mps_addr_t addr, mps_fmt_t fmt, mps_pool_t pool, + void *p, size_t s); + +static mps_fmt_A_s locv_fmt = + { + (mps_align_t)0, /* .fmt.align.delayed: to be filled in */ + scan, + skip, + copy, + move, + isMoved, + pad + }; + +static mps_addr_t roots[4]; + + +/* area_scan -- area scanning function for mps_pool_walk */ + +static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + unsigned long *count = closure; + testlib_unused(ss); + while (base < limit) { + mps_addr_t prev = base; + ++ *count; + base = skip(base); + Insist(prev < base); + } + Insist(base == limit); + return MPS_RES_OK; +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_pool_t pool; + mps_fmt_t format; + mps_ap_t ap; + mps_addr_t p; + mps_root_t root; + + testlib_init(argc, argv); + + locv_fmt.align = sizeof(void *); /* .fmt.align.delayed */ + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + die(mps_root_create_table(&root, arena, mps_rank_exact(), + (mps_rm_t)0, + roots, (sizeof(roots)/sizeof(*roots))), + "RootCreate"); + + die(mps_fmt_create_A(&format, arena, &locv_fmt), "FormatCreate"); + + die(mps_pool_create(&pool, arena, mps_class_lo(), format), "LOCreate"); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "APCreate"); + + die(mps_reserve(&p, ap, sizeof(void *)), "mps_reserve min"); + *(mps_word_t *)p = sizeof(void *); + cdie(mps_commit(ap, p, sizeof(void *)), "commit min"); + + die(mps_reserve(&roots[1], ap, 2*sizeof(void *)), "mps_reserve 2*min"); + p = roots[1]; + *(mps_word_t *)p = 2*sizeof(void *); + cdie(mps_commit(ap, p, 2*sizeof(void *)), "commit 2*min"); + + die(mps_reserve(&p, ap, (size_t)4096), "mps_reserve 4096"); + *(mps_word_t *)p = 4096; + cdie(mps_commit(ap, p, (size_t)4096), "commit 4096"); + + die(mps_reserve(&p, ap, sizeof(void *)), "mps_reserve last"); + *(mps_word_t *)p = sizeof(void *); + cdie(mps_commit(ap, p, sizeof(void *)), "commit last"); + + mps_arena_park(arena); + { + size_t count = 0; + mps_arena_formatted_objects_walk(arena, stepper, &count, 0); + cdie(count == 4, "stepped 4 objects"); + } + { + size_t count = 0; + die(mps_pool_walk(pool, area_scan, &count), "mps_pool_walk"); + cdie(count == 4, "walk 4 objects"); + } + + mps_ap_destroy(ap); + mps_pool_destroy(pool); + mps_fmt_destroy(format); + mps_root_destroy(root); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +static mps_res_t scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) +{ + testlib_unused(ss); + testlib_unused(base); + testlib_unused(limit); + die(MPS_RES_FAIL, "Error in Test, scan called unexpectedly"); + return MPS_RES_FAIL; +} + + +static mps_addr_t skip(mps_addr_t object) +{ + size_t bytes; + + bytes = (size_t)(*(mps_word_t *)object); + + return (mps_addr_t)((char *)object + bytes); +} + + +static void move(mps_addr_t object, mps_addr_t to) +{ + testlib_unused(object); + testlib_unused(to); + cdie(0, "move"); +} + + +static mps_addr_t isMoved(mps_addr_t object) +{ + testlib_unused(object); + cdie(0, "isMoved"); + return (mps_addr_t)NULL; +} + + +static void copy(mps_addr_t old, mps_addr_t new) +{ + testlib_unused(old); + testlib_unused(new); + cdie(0, "copy"); +} + + +static void pad(mps_addr_t base, size_t size) +{ + testlib_unused(base); + testlib_unused(size); + cdie(0, "pad"); +} + +static void stepper(mps_addr_t addr, mps_fmt_t fmt, mps_pool_t pool, + void *p, size_t s) +{ + size_t *pcount; + + testlib_unused(addr); + testlib_unused(fmt); + testlib_unused(pool); + testlib_unused(s); + + pcount = p; + *pcount += 1; +} + + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/message.c b/mps/code/message.c new file mode 100644 index 00000000000..476127db99f --- /dev/null +++ b/mps/code/message.c @@ -0,0 +1,455 @@ +/* message.c: MPS/CLIENT MESSAGES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: (it really exists). + * + * PURPOSE + * + * .purpose: Provide the generic part of the MPS / Client message + * interface. Messages are instances of Message Classes; much of the + * "real work" goes on in the modules that provide the actual messages. + * + * TODO: Consider using protocol classes for messages. + */ + +#include "bt.h" +#include "mpm.h" + +SRCID(message, "$Id$"); + + +/* Maps from a Ring pointer to the message */ +#define MessageNodeMessage(node) \ + PARENT(MessageStruct, queueRing, node) + + +/* forward declarations */ +static Bool MessageTypeEnabled(Arena arena, MessageType type); +static void MessageDelete(Message message); + + +/* Internal (MPM) Interface -- functions for message originator + * + */ + + +Bool MessageTypeCheck(MessageType type) +{ + CHECKL(type < MessageTypeLIMIT); + UNUSED(type); /* */ + + return TRUE; +} + +/* See .message.clocked. Currently finalization messages are the */ +/* only ones that can be numerous. */ +#define MessageIsClocked(message) \ + ((message)->klass->type != MessageTypeFINALIZATION) + +Bool MessageCheck(Message message) +{ + CHECKS(Message, message); + CHECKU(Arena, message->arena); + CHECKD(MessageClass, message->klass); + CHECKD_NOSIG(Ring, &message->queueRing); + /* postedClock is uncheckable for clocked message types, */ + /* but must be 0 for unclocked message types: */ + CHECKL(MessageIsClocked(message) || (message->postedClock == 0)); + + return TRUE; +} + +Bool MessageClassCheck(MessageClass klass) +{ + CHECKS(MessageClass, klass); + CHECKL(klass->name != NULL); + CHECKL(MessageTypeCheck(klass->type)); + CHECKL(FUNCHECK(klass->delete)); + CHECKL(FUNCHECK(klass->finalizationRef)); + CHECKL(FUNCHECK(klass->gcLiveSize)); + CHECKL(FUNCHECK(klass->gcCondemnedSize)); + CHECKL(FUNCHECK(klass->gcNotCondemnedSize)); + CHECKL(FUNCHECK(klass->gcStartWhy)); + CHECKL(klass->endSig == MessageClassSig); + + return TRUE; +} + +void MessageInit(Arena arena, Message message, MessageClass klass, + MessageType type) +{ + AVERT(Arena, arena); + AVER(message != NULL); + AVERT(MessageClass, klass); + AVERT(MessageType, type); + + message->arena = arena; + message->klass = klass; + RingInit(&message->queueRing); + message->postedClock = 0; + message->sig = MessageSig; + + AVERT(Message, message); + AVER(MessageGetType(message) == type); +} + +void MessageFinish(Message message) +{ + AVERT(Message, message); + AVER(RingIsSingle(&message->queueRing)); + + message->sig = SigInvalid; + RingFinish(&message->queueRing); +} + +Arena MessageArena(Message message) +{ + AVERT(Message, message); + + return message->arena; +} + +Bool MessageOnQueue(Message message) +{ + AVERT(Message, message); + + /* message is on queue if and only if its ring is not a singleton. */ + return !RingIsSingle(&message->queueRing); +} + +/* Post a message to the arena's queue of pending messages */ +void MessagePost(Arena arena, Message message) +{ + AVERT(Arena, arena); + AVERT(Message, message); + + /* queueRing field must be a singleton, see */ + /* */ + AVER(!MessageOnQueue(message)); + if(MessageTypeEnabled(arena, MessageGetType(message))) { + /* .message.clocked: Reading the clock with ClockNow() */ + /* involves an mpslib call, so we avoid it for message */ + /* types that may be numerous. */ + if(MessageIsClocked(message)) { + message->postedClock = ClockNow(); + } + RingAppend(&arena->messageRing, &message->queueRing); + } else { + /* discard message immediately if client hasn't enabled that type */ + MessageDiscard(arena, message); + } +} + +/* Return the message at the head of the arena's queue */ +static Message MessageHead(Arena arena) +{ + AVERT(Arena, arena); + AVER(!RingIsSingle(&arena->messageRing)); + + return MessageNodeMessage(RingNext(&arena->messageRing)); +} + +/* Delete the message at the head of the queue (helper function). */ +static void MessageDeleteHead(Arena arena) +{ + Message message; + + AVERT(Arena, arena); + AVER(!RingIsSingle(&arena->messageRing)); + + message = MessageHead(arena); + AVERT(Message, message); + RingRemove(&message->queueRing); + MessageDelete(message); +} + +/* Empty the queue by discarding all messages */ +void MessageEmpty(Arena arena) +{ + AVERT(Arena, arena); + + while(!RingIsSingle(&arena->messageRing)) { + MessageDeleteHead(arena); + } +} + + +/* Delivery (Client) Interface -- functions for recipient + * + * Most of these functions are exposed through the external MPS + * interface. + */ + + +static Bool MessageTypeEnabled(Arena arena, MessageType type) +{ + AVERT(Arena, arena); + AVERT(MessageType, type); + + return BTGet(arena->enabledMessageTypes, type); +} + +void MessageTypeEnable(Arena arena, MessageType type) +{ + AVERT(Arena, arena); + AVERT(MessageType, type); + + BTSet(arena->enabledMessageTypes, type); +} + +void MessageTypeDisable(Arena arena, MessageType type) +{ + Message message; + + AVERT(Arena, arena); + AVERT(MessageType, type); + + /* Flush existing messages of this type */ + while(MessageGet(&message, arena, type)) { + MessageDelete(message); + } + + BTRes(arena->enabledMessageTypes, type); +} + +/* Any messages on the queue? */ +Bool MessagePoll(Arena arena) +{ + AVERT(Arena, arena); + + if(RingIsSingle(&arena->messageRing)) { + return FALSE; + } else { + return TRUE; + } +} + +/* Return the type of the message at the head of the queue, if any */ +Bool MessageQueueType(MessageType *typeReturn, Arena arena) +{ + Message message; + MessageType type; + + AVER(typeReturn != NULL); + AVERT(Arena, arena); + + if(!MessagePoll(arena)) { + return FALSE; + } + message = MessageHead(arena); + type = MessageGetType(message); + *typeReturn = type; + + return TRUE; +} + +/* Get next message of specified type, removing it from the queue */ +Bool MessageGet(Message *messageReturn, Arena arena, MessageType type) +{ + Ring node, next; + + AVER(messageReturn != NULL); + AVERT(Arena, arena); + AVERT(MessageType, type); + + RING_FOR(node, &arena->messageRing, next) { + Message message = RING_ELT(Message, queueRing, node); + if(MessageGetType(message) == type) { + RingRemove(&message->queueRing); + *messageReturn = message; + return TRUE; + } + } + return FALSE; +} + +/* Discard a message (recipient has finished using it). */ +void MessageDiscard(Arena arena, Message message) +{ + AVERT(Arena, arena); + AVERT(Message, message); + + AVER(!MessageOnQueue(message)); + + MessageDelete(message); +} + + +/* Message Methods, Generic + * + * (Some of these dispatch on message->klass). + */ + + +/* Return the type of a message */ +MessageType MessageGetType(Message message) +{ + MessageClass klass; + AVERT(Message, message); + + klass = message->klass; + AVERT(MessageClass, klass); + + return klass->type; +} + +/* Return the class of a message */ +MessageClass MessageGetClass(Message message) +{ + AVERT(Message, message); + + return message->klass; +} + +Clock MessageGetClock(Message message) +{ + AVERT(Message, message); + + return message->postedClock; +} + +static void MessageDelete(Message message) +{ + AVERT(Message, message); + + (*message->klass->delete)(message); +} + + +/* Message Method Dispatchers, Type-specific + * + */ + + +void MessageFinalizationRef(Ref *refReturn, Arena arena, + Message message) +{ + AVER(refReturn != NULL); + AVERT(Arena, arena); + AVERT(Message, message); + AVER(MessageGetType(message) == MessageTypeFINALIZATION); + + (*message->klass->finalizationRef)(refReturn, arena, message); +} + +Size MessageGCLiveSize(Message message) +{ + AVERT(Message, message); + AVER(MessageGetType(message) == MessageTypeGC); + + return (*message->klass->gcLiveSize)(message); +} + +Size MessageGCCondemnedSize(Message message) +{ + AVERT(Message, message); + AVER(MessageGetType(message) == MessageTypeGC); + + return (*message->klass->gcCondemnedSize)(message); +} + +Size MessageGCNotCondemnedSize(Message message) +{ + AVERT(Message, message); + AVER(MessageGetType(message) == MessageTypeGC); + + return (*message->klass->gcNotCondemnedSize)(message); +} + +const char *MessageGCStartWhy(Message message) +{ + AVERT(Message, message); + AVER(MessageGetType(message) == MessageTypeGCSTART); + + return (*message->klass->gcStartWhy)(message); +} + + +/* Message Method Stubs, Type-specific + * + */ + + +void MessageNoFinalizationRef(Ref *refReturn, Arena arena, + Message message) +{ + AVER(refReturn != NULL); + AVERT(Arena, arena); + AVERT(Message, message); + + NOTREACHED; +} + +Size MessageNoGCLiveSize(Message message) +{ + AVERT(Message, message); + UNUSED(message); + + NOTREACHED; + + return (Size)0; +} + +Size MessageNoGCCondemnedSize(Message message) +{ + AVERT(Message, message); + UNUSED(message); + + NOTREACHED; + + return (Size)0; +} + +Size MessageNoGCNotCondemnedSize(Message message) +{ + AVERT(Message, message); + UNUSED(message); + + NOTREACHED; + + return (Size)0; +} + +const char *MessageNoGCStartWhy(Message message) +{ + AVERT(Message, message); + UNUSED(message); + + NOTREACHED; + + return NULL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/messtest.c b/mps/code/messtest.c new file mode 100644 index 00000000000..02337d3e746 --- /dev/null +++ b/mps/code/messtest.c @@ -0,0 +1,304 @@ +/* messtest.c: MESSAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "mpsavm.h" +#include "mps.h" +#include "testlib.h" +#include "mpslib.h" + +#include /* printf */ + +SRCID(messtest, "$Id$"); + + +/* Basic infrastructure for creating dummy messages */ + +static void dfMessageDelete(Message message) +{ + Arena arena; + arena = MessageArena(message); + ControlFree(arena, (void *)message, sizeof(MessageStruct)); +} + + +/* DFMessageClassStruct -- dummy finalization message class */ + +static MessageClassStruct DFMessageClassStruct = { + MessageClassSig, /* sig */ + "DummyFinal", /* name */ + MessageTypeFINALIZATION, /* Message Type */ + dfMessageDelete, /* Delete */ + MessageNoFinalizationRef, /* FinalizationRef */ + MessageNoGCLiveSize, /* GCLiveSize */ + MessageNoGCCondemnedSize, /* GCCondemnedSize */ + MessageNoGCNotCondemnedSize, /* GCNotCondemnedSize */ + MessageNoGCStartWhy, /* GCStartWhy */ + MessageClassSig /* */ +}; + + +/* DGCMessageClassStruct -- dummy GC message class */ + +static MessageClassStruct DGCMessageClassStruct = { + MessageClassSig, /* sig */ + "DummyGC", /* name */ + MessageTypeGC, /* Message Type */ + dfMessageDelete, /* Delete */ + MessageNoFinalizationRef, /* FinalizationRef */ + MessageNoGCLiveSize, /* GCLiveSize */ + MessageNoGCCondemnedSize, /* GCCondemnedSize */ + MessageNoGCNotCondemnedSize, /* GCNoteCondemnedSize */ + MessageNoGCStartWhy, /* GCStartWhy */ + MessageClassSig /* */ +}; + + +static void checkNoMessages(Arena arena) +{ + cdie(!MessagePoll(arena), "Queue not empty"); +} + + +static void topMessageType(MessageType *typeReturn, Arena arena) +{ + cdie(MessageQueueType(typeReturn, arena), "Queue empty"); +} + + +/* postDummyMessage -- post a dummy message */ + +static void postDummyMessage(Arena arena, MessageClass klass, + MessageType type) +{ + void *p; + Message message; + + die((mps_res_t)ControlAlloc(&p, arena, sizeof(MessageStruct)), + "AllocMessage"); + message = (Message)p; + MessageInit(arena, message, klass, type); + MessagePost(arena, message); +} + + +/* postFinalizationMessage -- post dummy finalization message */ + +static void postFinalizationMessage(Arena arena) +{ + postDummyMessage(arena, &DFMessageClassStruct, MessageTypeFINALIZATION); +} + +/* postGCMessage -- post dummy GC message */ + +static void postGCMessage(Arena arena) +{ + postDummyMessage(arena, &DGCMessageClassStruct, MessageTypeGC); +} + + +/* postInterleavedMessages -- post a couple of each message type */ + +static void postInterleavedMessages(Arena arena) +{ + postFinalizationMessage(arena); + postGCMessage(arena); + postFinalizationMessage(arena); + postGCMessage(arena); +} + + +/* eatMessageOfType -- get a message of a specified type + * + * There must be at least 1 message of that type on the queue. + */ + +static void eatMessageOfType(Arena arena, MessageType type) +{ + Message message; + cdie(MessageGet(&message, arena, type), "No message"); + MessageDiscard(arena, message); +} + + +/* eatHiddenMessage -- get a message which isn't at top of queue + * + * Assumes there is at least 1 message of each of Finalization + * and GC types. + */ + +static void eatHiddenMessage(Arena arena) +{ + MessageType type, eatType; + + topMessageType(&type, arena); + if (type != MessageTypeGC) { + eatType = MessageTypeGC; + } else { + eatType = MessageTypeFINALIZATION; + } + eatMessageOfType(arena, eatType); +} + + +/* eatTopMessageOfType -- get a message which is at top of queue + * + * The message must be of the specified type. + * Assumes there is at least 1 message on the queue. + */ + +static void eatTopMessageOfType(Arena arena, MessageType type) +{ + MessageType topType; + + topMessageType(&topType, arena); + cdie((topType == type), "Unexpected type"); + eatMessageOfType(arena, type); +} + + +/* eatTopMessage -- get a message which is at top of queue + * + * Assumes there is at least 1 message on the queue. + */ + +static void eatTopMessage(Arena arena) +{ + MessageType type; + + topMessageType(&type, arena); + eatMessageOfType(arena, type); +} + + + +/* testInterleaving -- test interleaving messages of different types + * + * See request.dylan.160204_ + * must be able to retrieve a message even if a message of + * another type is at the head of the queue. + * + * .. _request.dylan.160204: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160204 + */ + +static void testInterleaving(Arena arena) +{ + MessageEmpty(arena); + + /* enable both types of message */ + MessageTypeEnable(arena, MessageTypeGC); + MessageTypeEnable(arena, MessageTypeFINALIZATION); + + /* post a couple of interleaved messages of each type */ + postInterleavedMessages(arena); + + /* check that we can pull out 2 messages not at the head */ + eatHiddenMessage(arena); + eatHiddenMessage(arena); + + /* check that we can pull out 2 messages which are at the head */ + eatTopMessage(arena); + eatTopMessage(arena); +} + + +/* testDisabling -- test message types can be disabled + * + * See request.dylan.160204_ + * .. _request.dylan.160204: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160204 + */ + +static void testDisabling(Arena arena) +{ + MessageEmpty(arena); + + /* enable both types of message */ + MessageTypeEnable(arena, MessageTypeGC); + MessageTypeEnable(arena, MessageTypeFINALIZATION); + + /* post a couple of interleaved messages of each type */ + postInterleavedMessages(arena); + + /* Disable one of the types */ + MessageTypeDisable(arena, MessageTypeFINALIZATION); + + /* check that we can pull out 2 messages of the other type */ + eatTopMessageOfType(arena, MessageTypeGC); + eatTopMessageOfType(arena, MessageTypeGC); + + /* check that the queue is empty */ + checkNoMessages(arena); + + /* Post a disabled message */ + postFinalizationMessage(arena); + + /* check that the queue is still empty */ + checkNoMessages(arena); +} + + +/* testGetEmpty -- test we don't AVER when getting a non-existent message */ + +static void testGetEmpty(Arena arena) +{ + Message message; + + MessageEmpty(arena); + checkNoMessages(arena); + cdie(!MessageGet(&message, arena, MessageTypeGC), "Got non-existent message"); +} + + +#define testArenaSIZE (((size_t)64)<<20) + +int main(int argc, char *argv[]) +{ + mps_arena_t mpsArena; + Arena arena; + + testlib_init(argc, argv); + + die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + arena = (Arena)mpsArena; + + testGetEmpty(arena); + testInterleaving(arena); + testDisabling(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/meter.c b/mps/code/meter.c new file mode 100644 index 00000000000..58bf26f0fad --- /dev/null +++ b/mps/code/meter.c @@ -0,0 +1,126 @@ +/* meter.c: METERS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "meter.h" +#include "mpm.h" + +SRCID(meter, "$Id$"); + + +/* MeterInit -- initialize a meter */ + +void MeterInit(Meter meter, const char *name, void *owner) +{ + Word sym; + + meter->name = name; + meter->count = 0; + meter->total = 0.0; + meter->meanSquared = 0.0; + meter->max = 0; + meter->min = (Size)-1; + + sym = EventInternString(name); + EventLabelPointer(meter, sym); + EVENT2(MeterInit, meter, owner); +} + + +/* MeterAccumulate -- accumulate another data point in the meter */ + +void MeterAccumulate(Meter meter, Size amount) +{ + Count count = meter->count + 1; + double total = meter->total; + double meanSquared = meter->meanSquared; + double dcount = (double)count; + + /* .limitation.variance: This computation accumulates a running + * mean^2, minimizing overflow, but sacrificing numerical stability + * for small variances. For more accuracy, the data set should be + * emitted using a telemetry stream and analyzed off-line. + .stddev: stddev = sqrt(meanSquared - mean^2). + */ + meter->count = count; + meter->total = total + (double)amount; + meter->meanSquared = + meanSquared / dcount * (dcount - 1.0) + + (double)amount / dcount * (double)amount; + if (amount > meter->max) + meter->max = amount; + if (amount < meter->min) + meter->min = amount; +} + + +/* MeterWrite -- describe method for meters */ + +Res MeterWrite(Meter meter, mps_lib_FILE *stream, Count depth) +{ + Res res = ResOK; + + res = WriteF(stream, depth, + "meter \"$S\" {", (WriteFS)meter->name, + "count: $U", (WriteFU)meter->count, + NULL); + if (res != ResOK) + return res; + if (meter->count > 0) { + double mean = meter->total / (double)meter->count; + + res = WriteF(stream, 0, + ", total $D", (WriteFD)meter->total, + ", max $U", (WriteFU)meter->max, + ", min $U", (WriteFU)meter->min, + ", mean $D", (WriteFD)mean, + ", meanSquared $D", (WriteFD)meter->meanSquared, + NULL); + if (res != ResOK) + return res; + } + res = WriteF(stream, 0, "}\n", NULL); + + return res; +} + + +/* MeterEmit -- emit an event with the current data from the meter */ + +void MeterEmit(Meter meter) +{ + EVENT6(MeterValues, meter, meter->total, meter->meanSquared, + meter->count, meter->max, meter->min); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/meter.h b/mps/code/meter.h new file mode 100644 index 00000000000..97b61ee2c4c --- /dev/null +++ b/mps/code/meter.h @@ -0,0 +1,90 @@ +/* meter.h: METER INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .sources: mps.design.metrics. + * + * .purpose: Defines an interface for creating "meters" that accumulate + * the number, total and mean^2 of a set of data points. These + * accumulators can be used to report on the number, total, average, and + * variance of the data set. + */ + +#ifndef meter_h +#define meter_h + +#include "mpmtypes.h" +#include "config.h" +#include "misc.h" +#include "mpslib.h" + + +typedef struct MeterStruct *Meter; + +typedef struct MeterStruct +{ + const char *name; + Count count; + double total; + double meanSquared; + Size min; + Size max; +} MeterStruct; + + +extern void MeterInit(Meter meter, const char *name, void *owner); +extern void MeterAccumulate(Meter meter, Size amount); +extern Res MeterWrite(Meter meter, mps_lib_FILE *stream, Count depth); +extern void MeterEmit(Meter meter); + +#define METER_DECL(meter) STATISTIC_DECL(struct MeterStruct meter) +#define METER_INIT(meter, init, owner) \ + BEGIN STATISTIC(MeterInit(&(meter), init, owner)); UNUSED(owner); END +/* Hack: owner is typically only used for MeterInit */ +#define METER_ACC(meter, delta) \ + STATISTIC(MeterAccumulate(&(meter), delta)) +#if defined(STATISTICS) +#define METER_WRITE(meter, stream, depth) BEGIN \ + Res _res = MeterWrite(&(meter), (stream), (depth)); \ + if (_res != ResOK) return _res; \ + END +#elif defined(STATISTICS_NONE) +#define METER_WRITE(meter, stream, depth) NOOP +#else +#error "No statistics configured." +#endif +#define METER_EMIT(meter) STATISTIC(MeterEmit(meter)) + + +#endif /* meter_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/misc.h b/mps/code/misc.h new file mode 100644 index 00000000000..b434c4acdcf --- /dev/null +++ b/mps/code/misc.h @@ -0,0 +1,286 @@ +/* misc.h: MISCELLANEOUS DEFINITIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + * + * Small general things which are useful for C but aren't part of the + * memory manager itself. The only reason that this file exists is + * that these things are too small and trivial to be put in their own + * headers. If they ever become non-trivial they should be moved out. + */ + +#ifndef misc_h +#define misc_h + + +typedef int Bool; /* */ +enum BoolEnum { + FALSE = 0, + TRUE = 1 +}; + + +typedef int Compare; +enum CompareEnum { + CompareLESS = -1, + CompareEQUAL = 0, + CompareGREATER = 1 +}; + + +/* SrcId -- source identification + * + * Every C source file should start with a SRCID declaration to + * create a local static source identification structure. This + * is used by other macros (particularly assertions) and can be + * used to reverse engineer binary deliverables. + */ + +typedef const struct SrcIdStruct *SrcId; +typedef const struct SrcIdStruct { + const char *file; + const char *scmid; + const char *build_date; + const char *build_time; +} SrcIdStruct; + +#define SRCID(id, scmid) \ + static SrcIdStruct id ## FileSrcIdStruct = \ + {__FILE__, (scmid), __DATE__, __TIME__}; \ + extern SrcId id ## SrcId; \ + SrcId id ## SrcId = &id ## FileSrcIdStruct + + +/* BEGIN and END -- statement brackets + * + * BEGIN and END can be used to bracket multi-statement blocks which + * will be followed by a semicolon, such as multi-statement macros. + * BEGIN and END should be used to bracket ALL multi-statement macros. + * The block, with its semicolon, still counts as a single statement. + * This ensures that such macros can be used in all statement contexts, + * including in the first branch of an if() statement which has an else + * clause. + */ + +#define BEGIN do { +#define END } while(0) + + + +/* RVALUE -- for method-style macros + * + * RVALUE is used to enclose the expansion of a macro that must not be + * used as an lvalue, e.g. a getter method. + */ + +#define RVALUE(expr) ((void)0, (expr)) + +/* NOOP -- null statement + * + * Do not be tempted to use NULL, or just semicolon as the null + * statement. These items are dangerously ambiguous and could cause + * subtle bugs if misplaced. NOOP is a macro which is guaranteed to + * cause an error if it is not used in a statement context. + */ + +#define NOOP do {} while(0) + + +/* STR -- expands into a string of the expansion of the argument + * + * E.g., if we have: + * #define a b + * STR(a) will expand into "b". + */ + +#define STR_(x) #x +#define STR(x) STR_(x) + +/* NELEMS -- counts number of elements in an array + * + * NELEMS(a) expands into an expression that is the number + * of elements in the array a. + * + * WARNING: expands a more than once (but only in the context of + * sizeof, so does not cause double evaluation). + */ + +#define NELEMS(a) (sizeof(a)/sizeof((a)[0])) + + +/* DISCARD_EXP -- discard an expression, but check syntax + * + * .discard: DISCARD_EXP uses sizeof so that the expression is not + * evaluated and yet the compiler will check that it is a valid + * expression. The conditional is compared with zero so it can + * designate a bitfield object. + */ + +#define DISCARD_EXP(expr) ((void)sizeof((expr)!=0)) + + +/* DISCARD -- discards an expression in statement context, but checks syntax + * + * The argument is an expression; the expansion followed by a semicolon + * is syntactically a statement (to avoid it being used in computation). + */ + +#define DISCARD(expr) \ + BEGIN \ + DISCARD_EXP(expr); \ + END + + +/* DISCARD_STAT -- discards a statement, but checks syntax + * + * The argument is a statement; the expansion followed by a semicolon + * is syntactically a statement. + */ + +#define DISCARD_STAT(stat) \ + BEGIN \ + if (0) stat; \ + END + + +/* UNUSED -- declare parameter unused + * + * This macro suppresses warnings about unused parameters. It should be + * applied to the parameter at the beginning of the body of the + * procedure. + * + * The cast to void appears to work for GCC, MSVC, and CodeWarrior. + * It's a shame there's no way to ensure that the parameter won't be + * used. We could scramble it, but that's undesirable in release + * versions. + */ + +#define UNUSED(param) ((void)param) + + +/* UNUSED_POINTER, UNUSED_SIZE -- values for unused arguments + * + * Use these values for unused pointer, size closure arguments and + * check them in the callback or visitor. + * + * Ensure that they have high bits set on 64-bit platforms for maximum + * unusability. + */ +#define UNUSED_POINTER (Pointer)((Word)~0xFFFFFFFF | (Word)0xB60405ED) /* PointeR UNUSED */ +#define UNUSED_SIZE ((Size)~0xFFFFFFFF | (Size)0x520405ED) /* SiZe UNUSED */ + + +/* PARENT -- parent structure + * + * Given a pointer to a field of a structure this returns a pointer to + * the main structure. PARENT(foo_t, x, &(foo->x)) == foo. + * + * This macro is thread-safe, see . + * + * That intermediate (void *) is required to stop some compilers complaining + * about alignment of 'type *' being greater than that of 'char *'. Which + * is true, but not a bug, since the result really is a pointer to a 'type' + * struct. + */ + +#define PARENT(type, field, p) \ + ((type *)(void *)((char *)(p) - offsetof(type, field))) + + + +/* BOOLFIELD -- declare a Boolean bitfield + * + * A Boolean bitfield needs to be unsigned (not Bool), so that its + * values are 0 and 1 (not 0 and -1), in order to avoid a sign + * conversion (which would be a compiler error) when assigning TRUE to + * the field. + * + * + */ +#define BOOLFIELD(name) unsigned name : 1 + + +/* BITFIELD -- coerce a value into a bitfield + * + * This coerces value to the given width and type in a way that avoids + * warnings from gcc -Wconversion about possible loss of data. + */ + +#define BITFIELD(type, value, width) ((type)value & (((type)1 << (width)) - 1)) +#define BOOLOF(v) BITFIELD(unsigned, (v), 1) + + +/* Bit Sets -- sets of integers in [0,N-1]. + * + * Can be used on any unsigned integral type, ty. These definitions + * are _syntactic_, hence macroid, hence upper case + * . + */ + +#define BS_EMPTY(ty) ((ty)0) +#define BS_COMP(s) (~(s)) +#define BS_UNIV(ty) BS_COMP(BS_EMPTY(ty)) +#define BS_SINGLE(ty, i) ((ty)1 << (i)) +#define BS_IS_MEMBER(s, i) (((s) >> (i)) & 1) +#define BS_UNION(s1, s2) ((s1) | (s2)) +#define BS_ADD(ty, s, i) BS_UNION((s), BS_SINGLE(ty, (i))) +#define BS_INTER(s1, s2) ((s1) & (s2)) +#define BS_DIFF(s1, s2) BS_INTER((s1), BS_COMP(s2)) +#define BS_DEL(ty, s, i) BS_DIFF((s), BS_SINGLE(ty, (i))) +#define BS_SUPER(s1, s2) (BS_INTER((s1), (s2)) == (s2)) +#define BS_SUB(s1, s2) BS_SUPER((s2), (s1)) +#define BS_IS_SINGLE(s) ( ((s) != 0) && (((s) & ((s)-1)) == 0) ) +#define BS_SYM_DIFF(s1, s2) ((s1) ^ (s2)) +#define BS_BITFIELD(ty, s) BITFIELD(ty ## Set, (s), ty ## LIMIT) + + +/* Save and restore errno. See + * + * These macros must be used after #include so that errno is + * defined. + */ + +#define ERRNO_SAVE BEGIN int _saved_errno = errno; BEGIN +#define ERRNO_RESTORE END; errno = _saved_errno; END + + +/* Iterate over two expressions in parallel, avoiding warnings from + * clang -Wcomma -std=c89, while clearly expressing intention. See + * . + */ + +#define ITER_PARALLEL(expr1, expr2) ((void)(expr1), expr2) + + +#endif /* misc_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpm.c b/mps/code/mpm.c new file mode 100644 index 00000000000..d30edda93d7 --- /dev/null +++ b/mps/code/mpm.c @@ -0,0 +1,827 @@ +/* mpm.c: GENERAL MPM SUPPORT + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Miscellaneous support for the implementation of the MPM + * and pool classes. + * + * .sources: */ + +#include "check.h" +#include "misc.h" +#include "mpm.h" +#include "vm.h" + +#include +/* Get some floating constants for WriteDouble */ +#include +#include + +SRCID(mpm, "$Id$"); + + +#if defined(AVER_AND_CHECK) + + +/* CheckLevel -- Control check level + * + * This controls the behaviour of Check methods (see check.h). + */ + +#ifdef CHECKLEVEL_DYNAMIC +unsigned CheckLevel = CHECKLEVEL_DYNAMIC; +#endif + + +/* MPMCheck -- test MPM assumptions */ + +Bool MPMCheck(void) +{ + CHECKL(sizeof(Word) * CHAR_BIT == MPS_WORD_WIDTH); + CHECKL((Word)1 << MPS_WORD_SHIFT == MPS_WORD_WIDTH); + CHECKL(AlignCheck(MPS_PF_ALIGN)); + /* Check that trace ids will fit in the TraceId type. */ + CHECKL(TraceLIMIT <= UINT_MAX); + /* Check that there are enough bits in */ + /* a TraceSet to store all possible trace ids. */ + CHECKL(sizeof(TraceSet) * CHAR_BIT >= TraceLIMIT); + + CHECKL((SizeAlignUp(0, 2048) == 0)); + CHECKL(!SizeIsAligned(64, (unsigned) -1)); + CHECKL(SizeIsAligned(0, 32)); + CHECKL((SizeAlignUp(1024, 16) == 1024)); + /* .prime: 31051 is prime */ + CHECKL(SizeIsAligned(SizeAlignUp(31051, 256), 256)); + CHECKL(SizeIsAligned(SizeAlignUp(31051, 512), 512)); + CHECKL(!SizeIsAligned(31051, 1024)); + CHECKL(!SizeIsP2(0)); + CHECKL(SizeIsP2(128)); + CHECKL(SizeLog2((Size)1) == 0); + CHECKL(SizeLog2((Size)256) == 8); + CHECKL(SizeLog2((Size)65536) == 16); + CHECKL(SizeLog2((Size)131072) == 17); + + /* .check.writef: We check that various types will fit in a Word; */ + /* See .writef.check. Don't need to check WriteFS or WriteFF as they */ + /* should not be cast to Word. */ + CHECKL(sizeof(WriteFA) <= sizeof(Word)); + CHECKL(sizeof(WriteFP) <= sizeof(Word)); + CHECKL(sizeof(WriteFW) <= sizeof(Word)); /* Should be trivial*/ + CHECKL(sizeof(WriteFU) <= sizeof(Word)); + CHECKL(sizeof(WriteFB) <= sizeof(Word)); + CHECKL(sizeof(WriteFC) <= sizeof(Word)); + /* .check.write.double: See .write.double.check */ + { + int e, DBL_EXP_DIG = 1; + for (e = DBL_MAX_10_EXP; e > 0; e /= 10) + DBL_EXP_DIG++; + CHECKL(DBL_EXP_DIG < DBL_DIG); + CHECKL(-(DBL_MIN_10_EXP) <= DBL_MAX_10_EXP); + } + + /* The granularity of memory mapping must be a multiple of the + * granularity of protection (or we might not be able to protect an + * arena grain). */ + CHECKL(PageSize() % ProtGranularity() == 0); + + /* StackProbe mustn't skip over the stack guard page. See + * . */ + CHECKL(StackProbeDEPTH * sizeof(Word) < PageSize()); + + /* Check these values will fit in their bitfield. */ + CHECKL(WB_DEFER_INIT <= ((1ul << WB_DEFER_BITS) - 1)); + CHECKL(WB_DEFER_DELAY <= ((1ul << WB_DEFER_BITS) - 1)); + CHECKL(WB_DEFER_HIT <= ((1ul << WB_DEFER_BITS) - 1)); + + return TRUE; +} + + +/* FunCheck -- check that a function pointer is valid */ + +Bool FunCheck(Fun f) +{ + CHECKL(f != NULL); + /* Could assert various platform-specific things here. */ + UNUSED(f); /* see .check.unused */ + return TRUE; +} + + +/* ShiftCheck -- check that a shift is valid */ + +Bool ShiftCheck(Shift shift) +{ + CHECKL(shift < MPS_WORD_WIDTH); /* standard.ansic 6.3.7 */ + UNUSED(shift); /* see .check.unused */ + return TRUE; +} + + +/* AttrCheck -- check that a set of pool attributes are valid */ + +Bool AttrCheck(Attr attr) +{ + CHECKL((attr & ~AttrMASK) == 0); + /* Could check for legal combinations of attributes. */ + UNUSED(attr); /* see .check.unused */ + return TRUE; +} + + +/* AlignCheck -- check that an alignment is valid */ + +Bool AlignCheck(Align align) +{ + CHECKL(align > 0); + CHECKL((align & (align - 1)) == 0); + /* .check.unused: Check methods for signatureless types don't use */ + /* their argument in hot varieties, so UNUSED is needed. */ + UNUSED(align); + return TRUE; +} + + +/* AccessSetCheck -- check that an access set is valid */ + +Bool AccessSetCheck(AccessSet mode) +{ + CHECKL(mode < ((ULongest)1 << AccessLIMIT)); + UNUSED(mode); /* see .check.unused */ + return TRUE; +} + + +#endif /* defined(AVER_AND_CHECK) */ + + +/* WordIsAligned -- test whether a word is aligned */ + +Bool (WordIsAligned)(Word word, Align align) +{ + AVERT(Align, align); + return WordIsAligned(word, align); +} + + +/* WordAlignUp -- round a word up to the nearest aligned value */ + +Word (WordAlignUp)(Word word, Align align) +{ + AVERT(Align, align); + return WordAlignUp(word, align); +} + +/* WordRoundUp -- round word up to round. + * + * .wordroundup.arg.word: The word arg is quantity to be rounded. + * .wordroundup.arg.round: The modulus argument is not necessarily an + * alignment (i.e., not a power of two). + * + * .wordroundup.result: Let m be congruent to 0 mod r (m == 0(r)), and + * let m be the least m >= w. If w+r-1 (!) is representable in Word + * then result is m. Otherwise result is 0. Wittily. (NB. Result may + * be 0 even if m is representable.) */ + +Word (WordRoundUp)(Word word, Size modulus) +{ + AVER(modulus > 0); + return WordRoundUp(word, modulus); +} + + +/* WordAlignUp -- round a word down to the nearest aligned value */ + +Word (WordAlignDown)(Word word, Align alignment) +{ + AVERT(Align, alignment); + return WordAlignDown(word, alignment); +} + + +/* SizeIsP2 -- test whether a size is a power of two */ + +Bool (SizeIsP2)(Size size) +{ + return SizeIsP2(size); +} + +/* WordIsP2 -- tests whether a word is a power of two */ + +Bool (WordIsP2)(Word word) +{ + return WordIsP2(word); +} + + +/* Logarithms */ + +Shift SizeFloorLog2(Size size) +{ + Shift l = 0; + + AVER(size != 0); + while(size > 1) { + ++l; + size >>= 1; + } + return l; +} + +Shift SizeLog2(Size size) +{ + AVER(SizeIsP2(size)); + return SizeFloorLog2(size); +} + + +/* AddrAlignDown -- round a word down to the nearest aligned value */ + +Addr (AddrAlignDown)(Addr addr, Align alignment) +{ + AVERT(Align, alignment); + return AddrAlignDown(addr, alignment); +} + + +/* ResIsAllocFailure + * + * Test whether a result code is in the set of allocation failure codes. */ + +Bool ResIsAllocFailure(Res res) +{ + return (res == ResMEMORY || res == ResRESOURCE || res == ResCOMMIT_LIMIT); +} + + +/* WriteULongest -- output a textual representation of an integer to a stream + * + * Output as an unsigned value in the given base (2-16), padded to the + * given width. */ + +static Res WriteULongest(mps_lib_FILE *stream, ULongest w, unsigned base, + unsigned width) +{ + static const char digit[16 + 1] = "0123456789ABCDEF"; + /* + 1 for terminator: unused, but prevents compiler warning */ + static const char pad = '0'; /* padding character */ + char buf[MPS_WORD_WIDTH + 1]; /* enough for binary, */ + /* plus one for terminator */ + unsigned i; + int r; + + AVER(stream != NULL); + AVER(2 <= base); + AVER(base <= 16); + AVER(width <= MPS_WORD_WIDTH); + + /* Add digits to the buffer starting at the right-hand end, so that */ + /* the buffer forms a string representing the number. A do...while */ + /* loop is used to ensure that at least one digit (zero) is written */ + /* when the number is zero. */ + i = MPS_WORD_WIDTH; + buf[i] = '\0'; + do { + --i; + buf[i] = digit[w % base]; + w /= base; + } while(w > 0); + + /* If the number is not as wide as the requested field, pad out the */ + /* buffer with zeros. */ + while(i > MPS_WORD_WIDTH - width) { + --i; + buf[i] = pad; + } + + r = mps_lib_fputs(&buf[i], stream); + if (r == mps_lib_EOF) + return ResIO; + + return ResOK; +} + + +/* WriteDouble -- write a double float to a stream + * + * Cf.: Guy L. Steele, Jr. and Jon L. White, "How to print + * floating-point numbers accurately", ACM SIGPLAN Notices, Vol. 25, + * No. 6 (Jun. 1990), Pages 112-126 + * + * .write.double.limitation: Only the "simple" printer is implemented + * here. + * + * .write.double.check: There being no DBL_EXP_DIG, we assume that it is + * less than DBL_DIG. */ + +static Res WriteDouble(mps_lib_FILE *stream, double d) +{ + double F = d; + int E = 0, i, x = 0; + /* Largest exponent that will print in %f style. Larger will use %e */ + /* style. DBL_DIG is chosen for use of doubles as extra-large integers. */ + int expmax = DBL_DIG; + /* Smallest exponent that will print in %f style. Smaller will use */ + /* %e style. -4 is chosen because it is the %g default. */ + int expmin = -4; + /* Epsilon defines how many digits will be printed. Using DBL_EPSILON */ + /* prints all the significant digits. To print fewer digits, set */ + /* epsilon to 10 ^ - N, where N is the desired number of digits. */ + double epsilon = DBL_EPSILON / 2; + char digits[] = "0123456789"; + /* sign, DBL_DIG, '0.', 'e', '+/-', log10(DBL_MAX_10_EXP), */ + /* terminator. See .write.double.check. */ + char buf[1+DBL_DIG+2+1+1+DBL_DIG+1]; + int j = 0; + + if (F == 0.0) { + if (mps_lib_fputs("0", stream) == mps_lib_EOF) + return ResIO; + return ResOK; + } + + if (F < 0) { + buf[j] = '-'; + j++; + F = - F; + } + + /* This scaling operation could introduce rounding errors. */ + for ( ; F >= 1.0 ; F /= 10.0) { + E++; + if (E > DBL_MAX_10_EXP) { + if (mps_lib_fputs("Infinity", stream) == mps_lib_EOF) + return ResIO; + return ResOK; + } + } + for ( ; F < 0.1; F *= 10) + E--; + + /* See if %e notation is required */ + if (E > expmax || E <= expmin) { + x = E - 1; + E = 1; + } + + /* Insert leading 0's */ + if (E <= 0) { + buf[j] = '0'; + j++; + } + if (E < 0) { + buf[j] = '.'; + j++; + } + for (i = -E; i > 0; i--) { + buf[j] = '0'; + j++; + } + + /* Convert the fraction to base 10, inserting a decimal according to */ + /* the exponent. This is Steele and White's FP3 algorithm. */ + do { + int U; + + if (E == 0) { + buf[j] = '.'; + j++; + } + F *= 10.0; + U = (int)F; + F = F - U; + epsilon *= 10.0; + E--; + if (F < epsilon || F > 1.0 - epsilon) { + if (F < 0.5) + buf[j] = digits[U]; + else + buf[j] = digits[U + 1]; + j++; + break; + } + buf[j] = digits[U]; + j++; + } while (1); + + /* Insert trailing 0's */ + for (i = E; i > 0; i--) { + buf[j] = '0'; + j++; + } + + /* If %e notation is selected, append the exponent indicator and sign. */ + if (x != 0) { + buf[j] = 'e'; + j++; + if (x < 0) { + buf[j] = '-'; + j++; + x = - x; + } + else { + buf[j] = '+'; + j++; + } + + /* Format the exponent to at least two digits. */ + for (i = 100; i <= x; ) + i *= 10; + i /= 10; + do { + buf[j] = digits[x / i]; + j++; + x %= i; + i /= 10; + } while (i > 0); + } + buf[j] = '\0'; /* arnold */ + + if (mps_lib_fputs(buf, stream) == mps_lib_EOF) + return ResIO; + return ResOK; +} + + +/* WriteF -- write formatted output + * + * .writef.des: , also + * + * .writef.p: There is an assumption that void * fits in Word in + * the case of $P, and ULongest for $U and $B. This is checked in + * MPMCheck. + * + * .writef.div: Although MPS_WORD_WIDTH/4 appears three times, there + * are effectively three separate decisions to format at this width. + * + * .writef.check: See .check.writef. + */ + +Res WriteF(mps_lib_FILE *stream, Count depth, ...) +{ + Res res; + va_list args; + + va_start(args, depth); + res = WriteF_v(stream, depth, args); + va_end(args); + return res; +} + +Res WriteF_v(mps_lib_FILE *stream, Count depth, va_list args) +{ + const char *firstformat; + Res res; + + firstformat = va_arg(args, const char *); + res = WriteF_firstformat_v(stream, depth, firstformat, args); + return res; +} + +Res WriteF_firstformat_v(mps_lib_FILE *stream, Count depth, + const char *firstformat, va_list args) +{ + const char *format; + int r; + size_t i; + Res res; + Bool start_of_line = TRUE; + + AVER(stream != NULL); + + format = firstformat; + + for(;;) { + if (format == NULL) + break; + + while(*format != '\0') { + if (start_of_line) { + for (i = 0; i < depth; ++i) { + mps_lib_fputc(' ', stream); + } + start_of_line = FALSE; + } + if (*format != '$') { + r = mps_lib_fputc(*format, stream); /* Could be more efficient */ + if (r == mps_lib_EOF) + return ResIO; + if (*format == '\n') { + start_of_line = TRUE; + } + } else { + ++format; + AVER(*format != '\0'); + + switch(*format) { + case 'A': { /* address */ + WriteFA addr = va_arg(args, WriteFA); + res = WriteULongest(stream, (ULongest)addr, 16, + (sizeof(WriteFA) * CHAR_BIT + 3) / 4); + if (res != ResOK) + return res; + } break; + + case 'P': { /* pointer, see .writef.p */ + WriteFP p = va_arg(args, WriteFP); + res = WriteULongest(stream, (ULongest)p, 16, + (sizeof(WriteFP) * CHAR_BIT + 3)/ 4); + if (res != ResOK) + return res; + } break; + + case 'F': { /* function */ + WriteFF f = va_arg(args, WriteFF); + Byte *b = (Byte *)&f; + /* ISO C forbids casting function pointers to integer, so + decode bytes (see design.writef.f). + TODO: Be smarter about endianness. */ + for(i=0; i < sizeof(WriteFF); i++) { + res = WriteULongest(stream, (ULongest)(b[i]), 16, + (CHAR_BIT + 3) / 4); + if (res != ResOK) + return res; + } + } break; + + case 'S': { /* string */ + WriteFS s = va_arg(args, WriteFS); + r = mps_lib_fputs((const char *)s, stream); + if (r == mps_lib_EOF) + return ResIO; + } break; + + case 'C': { /* character */ + WriteFC c = va_arg(args, WriteFC); /* promoted */ + r = mps_lib_fputc((int)c, stream); + if (r == mps_lib_EOF) + return ResIO; + } break; + + case 'W': { /* word */ + WriteFW w = va_arg(args, WriteFW); + res = WriteULongest(stream, (ULongest)w, 16, + (sizeof(WriteFW) * CHAR_BIT + 3) / 4); + if (res != ResOK) + return res; + } break; + + case 'U': { /* decimal, see .writef.p */ + WriteFU u = va_arg(args, WriteFU); + res = WriteULongest(stream, (ULongest)u, 10, 0); + if (res != ResOK) + return res; + } break; + + case '3': { /* decimal for thousandths */ + WriteFU u = va_arg(args, WriteFU); + res = WriteULongest(stream, (ULongest)u, 10, 3); + if (res != ResOK) + return res; + } break; + + case 'B': { /* binary, see .writef.p */ + WriteFB b = va_arg(args, WriteFB); + res = WriteULongest(stream, (ULongest)b, 2, sizeof(WriteFB) * CHAR_BIT); + if (res != ResOK) + return res; + } break; + + case '$': { /* dollar char */ + r = mps_lib_fputc('$', stream); + if (r == mps_lib_EOF) + return ResIO; + } break; + + case 'D': { /* double */ + WriteFD d = va_arg(args, WriteFD); + res = WriteDouble(stream, d); + if (res != ResOK) + return res; + } break; + + default: + NOTREACHED; + } + } + + ++format; + } + + format = va_arg(args, const char *); + } + + return ResOK; +} + + +/* StringLength -- slow substitute for strlen */ + +size_t StringLength(const char *s) +{ + size_t i = 0; + + AVER(s != NULL); + + while (s[i] != '\0') + ++i; + + return i; +} + + +#if 0 /* This code is currently not in use in the MPS */ + +/* StringEqual -- slow substitute for (strcmp == 0) */ + +Bool StringEqual(const char *s1, const char *s2) +{ + Index i; + + AVER(s1); + AVER(s2); + + for(i = 0; ; i++) { + if(s1[i] != s2[i]) + return FALSE; + if(s1[i] == '\0') { + AVER(s2[i] == '\0'); + break; + } + } + return TRUE; +} + +#endif /* not currently in use */ + + +/* Random -- a random number generator + * + * TODO: This is a copy of the generator from testlib.c, which has + * extensive notes and verification tests. The notes need to go to a + * design document, and the tests to a test. + */ + +static unsigned RandomSeed = 1; +#define Random_m 2147483647UL +#define Random_a 48271UL +unsigned Random32(void) +{ + /* requires m == 2^31-1, a < 2^16 */ + unsigned bot = Random_a * (RandomSeed & 0x7FFF); + unsigned top = Random_a * (RandomSeed >> 15); + AVER(UINT_MAX >= 4294967295U); + RandomSeed = bot + ((top & 0xFFFF) << 15) + (top >> 16); + if (RandomSeed > Random_m) + RandomSeed -= Random_m; + return RandomSeed; +} + +Word RandomWord(void) +{ + Word word = 0; + Index i; + for (i = 0; i < MPS_WORD_WIDTH; i += 31) + word = (word << 31) | Random32(); + return word; +} + + +/* QuickSort -- non-recursive bounded sort + * + * We can't rely on the standard library's qsort, which might have + * O(n) stack usage. This version does not recurse. + */ + +#ifdef QUICKSORT_DEBUG +static Bool quickSorted(void *array[], Count length, + QuickSortCompare compare, void *closure) +{ + Index i; + if (length > 0) { + for (i = 0; i < length - 1; ++i) { + if (compare(array[i], array[i+1], closure) == CompareGREATER) + return FALSE; + } + } + return TRUE; +} +#endif + +void QuickSort(void *array[], Count length, + QuickSortCompare compare, void *closure, + SortStruct *sortStruct) +{ + Index left, right, sp, lo, hi, leftLimit, rightBase; + void *pivot, *temp; + + AVER(array != NULL); + /* can't check length */ + AVER(FUNCHECK(compare)); + /* can't check closure */ + AVER(sortStruct != NULL); + + sp = 0; + left = 0; + right = length; + + for (;;) { + while (right - left > 1) { /* only need to sort if two or more */ + /* Pick a random pivot. */ + pivot = array[left + RandomWord() % (right - left)]; + + /* Hoare partition: scan from left to right, dividing it into + elements less than the pivot and elements greater or + equal. */ + lo = left; + hi = right; + for (;;) { + while (compare(array[lo], pivot, closure) == CompareLESS) + ++lo; + do + --hi; + while (compare(pivot, array[hi], closure) == CompareLESS); + if (lo >= hi) + break; + temp = array[hi]; + array[hi] = array[lo]; + array[lo] = temp; + ++lo; /* step over what we just swapped */ + } + + /* After partition, if we ended up at a pivot, then it is in its + final position and we must skip it to ensure termination. + This handles the case where the pivot is at the start of the + array, and one of the partitions is the whole array, for + example. */ + if (lo == hi) { + AVER_CRITICAL(array[hi] == pivot); /* and it's in place */ + leftLimit = lo; + rightBase = lo + 1; + } else { + AVER_CRITICAL(lo == hi + 1); + leftLimit = lo; + rightBase = lo; + } + + /* Sort the smaller part now, so that we're sure to use at most + log2 length stack levels. Push the larger part on the stack + for later. */ + AVER_CRITICAL(sp < sizeof sortStruct->stack / sizeof sortStruct->stack[0]); + if (leftLimit - left < right - rightBase) { + sortStruct->stack[sp].left = rightBase; + sortStruct->stack[sp].right = right; + ++sp; + right = leftLimit; + } else { + sortStruct->stack[sp].left = left; + sortStruct->stack[sp].right = leftLimit; + ++sp; + left = rightBase; + } + } + + if (sp == 0) + break; + + --sp; + left = sortStruct->stack[sp].left; + right = sortStruct->stack[sp].right; + AVER_CRITICAL(left < right); /* we will have done a zero-length part first */ + } + +#ifdef QUICKSORT_DEBUG + AVER(quickSorted(array, length, compare, closure)); +#endif +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpm.h b/mps/code/mpm.h new file mode 100644 index 00000000000..e072a7a1ea7 --- /dev/null +++ b/mps/code/mpm.h @@ -0,0 +1,1077 @@ +/* mpm.h: MEMORY POOL MANAGER DEFINITIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .trans.bufferinit: The Buffer data structure has an Init field and + * an Init method, there's a name clash. We resolve this by calling the + * accessor BufferGetInit. + * + * .critical.macros: In manual-allocation-bound programs using MVFF, + * PoolFree and the Land generic functions are on the critical path + * via mps_free. In non-checking varieties we provide macro + * alternatives to these functions that call the underlying methods + * directly, giving a few percent improvement in performance. + */ + +#ifndef mpm_h +#define mpm_h + +#include "config.h" +#include "misc.h" +#include "check.h" + +#include "event.h" +#include "lock.h" +#include "prmc.h" +#include "prot.h" +#include "sp.h" +#include "th.h" +#include "ss.h" +#include "mpslib.h" +#include "ring.h" +#include "tract.h" /* only for certain Seg macros */ +#include "arg.h" +#include "mpmtypes.h" +#include "mpmst.h" + + +/* MPMCheck -- check MPM assumptions */ + +extern Bool MPMCheck(void); + + +/* Miscellaneous Checks -- see */ + +/* */ +#define BoolCheck(b) ((unsigned)(b) <= 1) + +extern Bool FunCheck(Fun f); +#define FUNCHECK(f) (FunCheck((Fun)f)) + +extern Bool ShiftCheck(Shift shift); +extern Bool AttrCheck(Attr attr); +extern Bool RootVarCheck(RootVar rootVar); +extern Bool AccessSetCheck(AccessSet mode); + + +/* Address/Size Interface -- see */ + +extern Bool AlignCheck(Align align); + +extern Bool (WordIsAligned)(Word word, Align align); +#define WordIsAligned(w, a) (((w) & ((a) - 1)) == 0) + +extern Word (WordAlignUp)(Word word, Align align); +#define WordAlignUp(w, a) (((w) + (a) - 1) & ~((Word)(a) - 1)) + +/* Rounds w up to a multiple of r, see for exact behaviour */ +extern Word (WordRoundUp)(Word word, Size round); +#define WordRoundUp(w, r) (((w)+(r)-1) - ((w)+(r)-1)%(r)) + +extern Word (WordAlignDown)(Word word, Align align); +#define WordAlignDown(w, a) ((w) & ~((Word)(a) - 1)) + +#define size_tAlignUp(s, a) ((size_t)WordAlignUp((Word)(s), a)) + +#define PointerAdd(p, s) ((void *)((char *)(p) + (s))) +#define PointerSub(p, s) ((void *)((char *)(p) - (s))) + +#define PointerOffset(base, limit) \ + ((size_t)((char *)(limit) - (char *)(base))) + +#define PointerAlignUp(p, s) \ + ((void *)WordAlignUp((Word)(p), (Align)(s))) + +#define AddrAdd(p, s) ((Addr)PointerAdd((void *)(p), s)) +#define AddrSub(p, s) ((Addr)PointerSub((void *)(p), s)) + +#define AddrOffset(b, l) \ + ((Size)(PointerOffset((void *)(b), (void *)(l)))) + +extern Addr (AddrAlignDown)(Addr addr, Align align); +#define AddrAlignDown(p, a) ((Addr)WordAlignDown((Word)(p), a)) + +#define AlignWord(s) ((Word)(s)) + +#define AddrIsAligned(p, a) WordIsAligned((Word)(p), a) +#define AddrAlignUp(p, a) ((Addr)WordAlignUp((Word)(p), a)) +#define AddrRoundUp(p, r) ((Addr)WordRoundUp((Word)(p), r)) + +#define ReadonlyAddrAdd(p, s) ((ReadonlyAddr)((const char *)(p) + (s))) + +#define SizeIsAligned(s, a) WordIsAligned((Word)(s), a) +#define SizeAlignUp(s, a) ((Size)WordAlignUp((Word)(s), a)) +#define SizeAlignDown(s, a) ((Size)WordAlignDown((Word)(s), a)) +/* r not required to be a power of 2 */ +#define SizeRoundUp(s, r) ((Size)WordRoundUp((Word)(s), (Size)(r))) + +#define IndexIsAligned(s, a) WordIsAligned((Word)(s), a) +#define IndexAlignUp(s, a) ((Index)WordAlignUp((Word)(s), a)) +#define IndexAlignDown(s, a) ((Index)WordAlignDown((Word)(s), a)) + +#define AlignIsAligned(a1, a2) WordIsAligned((Word)(a1), a2) + + +extern Addr (AddrSet)(Addr target, Byte value, Size size); +/* This is one of the places that implements Addr, so it's allowed to */ +/* convert to void *, see . */ +#define AddrSet(target, value, size) \ + mps_lib_memset(target, (int)(value), size) + +extern Addr (AddrCopy)(Addr target, Addr source, Size size); +#define AddrCopy(target, source, size) \ + mps_lib_memcpy(target, source, size) + +extern int (AddrComp)(Addr a, Addr b, Size size); +#define AddrComp(a, b, size) \ + mps_lib_memcmp(a, b, size) + + +/* ADDR_PTR -- turns an Addr into a pointer to the given type */ + +#define ADDR_PTR(type, addr) ((type *)(addr)) + + +/* Clock */ + +#define ClockNow() ((Clock)mps_clock()) +#define ClocksPerSec() ((Clock)mps_clocks_per_sec()) + + +/* Result codes */ + +extern Bool ResIsAllocFailure(Res res); + + +/* Logs and Powers + * + * SizeIsP2 returns TRUE if and only if size is a non-negative integer + * power of 2, and FALSE otherwise. + * + * SizeLog2 returns the logarithm in base 2 of size. size must be a + * power of 2. + * + * SizeFloorLog2 returns the floor of the logarithm in base 2 of size. + * size can be any positive non-zero value. */ + +extern Bool (SizeIsP2)(Size size); +#define SizeIsP2(size) WordIsP2((Word)size) +extern Shift SizeLog2(Size size); +extern Shift SizeFloorLog2(Size size); + +extern Bool (WordIsP2)(Word word); +#define WordIsP2(word) ((word) > 0 && ((word) & ((word) - 1)) == 0) + +/* Formatted Output -- see , */ + +extern Res WriteF(mps_lib_FILE *stream, Count depth, ...); +extern Res WriteF_v(mps_lib_FILE *stream, Count depth, va_list args); +extern Res WriteF_firstformat_v(mps_lib_FILE *stream, Count depth, + const char *firstformat, va_list args); +#define WriteFYesNo(condition) ((WriteFS)((condition) ? "YES" : "NO")) + + +/* Miscellaneous support -- see */ + +extern size_t StringLength(const char *s); +extern Bool StringEqual(const char *s1, const char *s2); + +extern unsigned Random32(void); +extern Word RandomWord(void); + +typedef Compare QuickSortCompare(void *left, void *right, + void *closure); +extern void QuickSort(void *array[], Count length, + QuickSortCompare compare, void *closure, + SortStruct *sortStruct); + + +/* Version Determination + * + * . */ + +extern char *MPSVersion(void); + + +/* Pool Interface -- see */ + +extern Res PoolInit(Pool pool, Arena arena, PoolClass klass, ArgList args); +extern void PoolFinish(Pool pool); +extern Bool PoolClassCheck(PoolClass klass); +extern Bool PoolCheck(Pool pool); +extern Res PoolDescribe(Pool pool, mps_lib_FILE *stream, Count depth); + +/* Must be thread-safe. . */ +#define PoolArena(pool) ((pool)->arena) +#define PoolAlignment(pool) ((pool)->alignment) +#define PoolSegRing(pool) (&(pool)->segRing) +#define PoolArenaRing(pool) (&(pool)->arenaRing) +#define PoolOfArenaRing(node) RING_ELT(Pool, arenaRing, node) +#define PoolHasAttr(pool, Attr) ((ClassOfPoly(Pool, pool)->attr & (Attr)) != 0) +#define PoolSizeGrains(pool, size) ((size) >> (pool)->alignShift) +#define PoolGrainsSize(pool, grains) ((grains) << (pool)->alignShift) +#define PoolIndexOfAddr(base, pool, p) \ + (AddrOffset((base), (p)) >> (pool)->alignShift) +#define PoolAddrOfIndex(base, pool, i) AddrAdd(base, PoolGrainsSize(pool, i)) + +extern Bool PoolFormat(Format *formatReturn, Pool pool); + +extern double PoolMutatorAllocSize(Pool pool); + +extern Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr); +extern Bool PoolOfRange(Pool *poolReturn, Arena arena, Addr base, Addr limit); +extern Bool PoolHasAddr(Pool pool, Addr addr); +extern Bool PoolHasRange(Pool pool, Addr base, Addr limit); + +extern Res PoolCreate(Pool *poolReturn, Arena arena, PoolClass klass, + ArgList args); +extern void PoolDestroy(Pool pool); +extern BufferClass PoolDefaultBufferClass(Pool pool); +extern Res PoolAlloc(Addr *pReturn, Pool pool, Size size); +extern void (PoolFree)(Pool pool, Addr old, Size size); +extern PoolGen PoolSegPoolGen(Pool pool, Seg seg); +extern Res PoolTraceBegin(Pool pool, Trace trace); +extern void PoolFreeWalk(Pool pool, FreeBlockVisitor f, void *p); +extern Size PoolTotalSize(Pool pool); +extern Size PoolFreeSize(Pool pool); +extern Res PoolAddrObject(Addr *pReturn, Pool pool, Addr addr); + +extern Res PoolAbsInit(Pool pool, Arena arena, PoolClass klass, ArgList arg); +extern void PoolAbsFinish(Inst inst); +extern Res PoolNoAlloc(Addr *pReturn, Pool pool, Size size); +extern Res PoolTrivAlloc(Addr *pReturn, Pool pool, Size size); +extern void PoolNoFree(Pool pool, Addr old, Size size); +extern void PoolTrivFree(Pool pool, Addr old, Size size); +extern PoolGen PoolNoSegPoolGen(Pool pool, Seg seg); +extern Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size); +extern Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size); +extern void PoolNoBufferEmpty(Pool pool, Buffer buffer); +extern void PoolSegBufferEmpty(Pool pool, Buffer buffer); +extern void PoolTrivBufferEmpty(Pool pool, Buffer buffer); +extern Res PoolAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth); +extern Res PoolNoTraceBegin(Pool pool, Trace trace); +extern Res PoolTrivTraceBegin(Pool pool, Trace trace); +extern Res PoolNoScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg); +extern void PoolNoRampBegin(Pool pool, Buffer buf, Bool collectAll); +extern void PoolTrivRampBegin(Pool pool, Buffer buf, Bool collectAll); +extern void PoolNoRampEnd(Pool pool, Buffer buf); +extern void PoolTrivRampEnd(Pool pool, Buffer buf); +extern Res PoolNoFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf); +extern Res PoolTrivFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf); +extern Res PoolNoFramePop(Pool pool, Buffer buf, AllocFrame frame); +extern Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame); +extern void PoolTrivFreeWalk(Pool pool, FreeBlockVisitor f, void *p); +extern PoolDebugMixin PoolNoDebugMixin(Pool pool); +extern BufferClass PoolNoBufferClass(void); +extern Size PoolNoSize(Pool pool); +extern Res PoolTrivAddrObject(Addr *pReturn, Pool pool, Addr addr); + +/* See .critical.macros. */ +#define PoolFreeMacro(pool, old, size) Method(Pool, pool, free)(pool, old, size) +#if !defined(AVER_AND_CHECK_ALL) +#define PoolFree(pool, old, size) PoolFreeMacro(pool, old, size) +#endif /* !defined(AVER_AND_CHECK_ALL) */ + +/* Abstract Pool Classes Interface -- see */ +extern void PoolClassMixInBuffer(PoolClass klass); +extern void PoolClassMixInCollect(PoolClass klass); +DECLARE_CLASS(Inst, PoolClass, InstClass); +DECLARE_CLASS(Pool, AbstractPool, Inst); +DECLARE_CLASS(Pool, AbstractBufferPool, AbstractPool); +DECLARE_CLASS(Pool, AbstractSegBufPool, AbstractBufferPool); +typedef Pool AbstractCollectPool; +#define AbstractCollectPoolCheck PoolCheck +DECLARE_CLASS(Pool, AbstractCollectPool, AbstractSegBufPool); + + +/* Message Interface -- see */ +/* -- Internal (MPM) Interface -- functions for message originator */ +extern Bool MessageCheck(Message message); +extern Bool MessageClassCheck(MessageClass klass); +extern Bool MessageTypeCheck(MessageType type); +extern void MessageInit(Arena arena, Message message, + MessageClass klass, MessageType type); +extern void MessageFinish(Message message); +extern Arena MessageArena(Message message); +extern Bool MessageOnQueue(Message message); +extern void MessagePost(Arena arena, Message message); +extern void MessageEmpty(Arena arena); +/* -- Delivery (Client) Interface -- functions for recipient */ +extern void MessageTypeEnable(Arena arena, MessageType type); +extern void MessageTypeDisable(Arena arena, MessageType type); +extern Bool MessagePoll(Arena arena); +extern Bool MessageQueueType(MessageType *typeReturn, Arena arena); +extern Bool MessageGet(Message *messageReturn, Arena arena, + MessageType type); +extern void MessageDiscard(Arena arena, Message message); +/* -- Message Methods, Generic */ +extern MessageType MessageGetType(Message message); +extern MessageClass MessageGetClass(Message message); +extern Clock MessageGetClock(Message message); +/* -- Message Method Dispatchers, Type-specific */ +extern void MessageFinalizationRef(Ref *refReturn, + Arena arena, Message message); +extern Size MessageGCLiveSize(Message message); +extern Size MessageGCCondemnedSize(Message message); +extern Size MessageGCNotCondemnedSize(Message message); +extern const char *MessageGCStartWhy(Message message); +/* -- Message Method Stubs, Type-specific */ +extern void MessageNoFinalizationRef(Ref *refReturn, + Arena arena, Message message); +extern Size MessageNoGCLiveSize(Message message); +extern Size MessageNoGCCondemnedSize(Message message); +extern Size MessageNoGCNotCondemnedSize(Message message); +extern const char *MessageNoGCStartWhy(Message message); + + +/* Trace Interface -- see */ + +#define TraceSetSingle(trace) BS_SINGLE(TraceSet, (trace)->ti) +#define TraceSetIsSingle(ts) BS_IS_SINGLE(ts) +#define TraceSetIsMember(ts, trace) BS_IS_MEMBER(ts, (trace)->ti) +#define TraceSetAdd(ts, trace) BS_ADD(TraceSet, ts, (trace)->ti) +#define TraceSetDel(ts, trace) BS_DEL(TraceSet, ts, (trace)->ti) +#define TraceSetUnion(ts1, ts2) BS_UNION(ts1, ts2) +#define TraceSetInter(ts1, ts2) BS_INTER(ts1, ts2) +#define TraceSetDiff(ts1, ts2) BS_DIFF(ts1, ts2) +#define TraceSetSuper(ts1, ts2) BS_SUPER(ts1, ts2) +#define TraceSetSub(ts1, ts2) BS_SUB(ts1, ts2) +#define TraceSetComp(ts) BS_COMP(ts) + +#define TRACE_SET_ITER(ti, trace, ts, arena) \ + BEGIN \ + for (ti = 0; ti < TraceLIMIT; ++ti) { \ + trace = ArenaTrace(arena, ti); \ + if (TraceSetIsMember(ts, trace)) { + +#define TRACE_SET_ITER_END(ti, trace, ts, arena) \ + } \ + } \ + END + + +extern void ScanStateInit(ScanState ss, TraceSet ts, Arena arena, + Rank rank, ZoneSet white); +extern void ScanStateInitSeg(ScanState ss, TraceSet ts, Arena arena, + Rank rank, ZoneSet white, Seg seg); +extern void ScanStateFinish(ScanState ss); +extern Bool ScanStateCheck(ScanState ss); +extern void ScanStateSetSummary(ScanState ss, RefSet summary); +extern RefSet ScanStateSummary(ScanState ss); +extern void ScanStateUpdateSummary(ScanState ss, Seg seg, Bool wasTotal); + +/* See impl.h.mpmst.ss */ +#define ScanStateZoneShift(ss) ((Shift)(ss)->ss_s._zs) +#define ScanStateWhite(ss) ((ZoneSet)(ss)->ss_s._w) +#define ScanStateUnfixedSummary(ss) ((RefSet)(ss)->ss_s._ufs) +#define ScanStateSetZoneShift(ss, shift) ((void)((ss)->ss_s._zs = (shift))) +#define ScanStateSetWhite(ss, zs) ((void)((ss)->ss_s._w = (zs))) +#define ScanStateSetUnfixedSummary(ss, rs) ((void)((ss)->ss_s._ufs = (rs))) + +extern Bool TraceIdCheck(TraceId id); +extern Bool TraceSetCheck(TraceSet ts); +extern Bool TraceCheck(Trace trace); +extern Res TraceCreate(Trace *traceReturn, Arena arena, TraceStartWhy why); +extern void TraceDestroyInit(Trace trace); +extern void TraceDestroyFinished(Trace trace); + +extern Bool TraceIsEmpty(Trace trace); +extern Res TraceAddWhite(Trace trace, Seg seg); +extern void TraceCondemnStart(Trace trace); +extern Res TraceCondemnEnd(double *mortalityReturn, Trace trace); +extern Res TraceStart(Trace trace, double mortality, double finishingTime); +extern Bool TracePoll(Work *workReturn, Bool *collectWorldReturn, + Globals globals, Bool collectWorldAllowed); + +extern Rank TraceRankForAccess(Arena arena, Seg seg); +extern void TraceSegAccess(Arena arena, Seg seg, AccessSet mode); + +extern void TraceAdvance(Trace trace); +extern Res TraceStartCollectAll(Trace *traceReturn, Arena arena, TraceStartWhy why); +extern Res TraceDescribe(Trace trace, mps_lib_FILE *stream, Count depth); + +/* traceanc.c -- Trace Ancillary */ + +extern Bool TraceStartMessageCheck(TraceStartMessage message); +extern const char *TraceStartWhyToString(TraceStartWhy why); +extern void TracePostStartMessage(Trace trace); +extern Bool TraceMessageCheck(TraceMessage message); /* trace end */ +extern void TracePostMessage(Trace trace); /* trace end */ +extern Bool TraceIdMessagesCheck(Arena arena, TraceId ti); +extern Res TraceIdMessagesCreate(Arena arena, TraceId ti); +extern void TraceIdMessagesDestroy(Arena arena, TraceId ti); + +/* Equivalent to MPS_SCAN_BEGIN */ + +#define TRACE_SCAN_BEGIN(ss) \ + BEGIN \ + /* Check range on zoneShift before casting to Shift. */ \ + AVER(ScanStateZoneShift(ss) < MPS_WORD_WIDTH); \ + { \ + Shift SCANzoneShift = ScanStateZoneShift(ss); \ + ZoneSet SCANwhite = ScanStateWhite(ss); \ + RefSet SCANsummary = ScanStateUnfixedSummary(ss); \ + Word SCANt; \ + mps_addr_t SCANref; \ + Res SCANres; \ + { + +/* Equivalent to MPS_FIX1 */ + +#define TRACE_FIX1(ss, ref) \ + (SCANt = (Word)1 << ((Word)(ref) >> SCANzoneShift & (MPS_WORD_WIDTH-1)), \ + SCANsummary |= SCANt, \ + (SCANwhite & SCANt) != 0) + +/* Equivalent to MPS_FIX2 */ + +/* TODO: The ref is copied to avoid breaking strict aliasing rules that could + well affect optimised scan loops. This code could be improved by + returning the fixed ref as a result and using longjmp to signal errors, + and that might well improve all scan loops too. The problem is whether + some embedded client platforms support longjmp. RB 2012-09-07 */ + +#define TRACE_FIX2(ss, refIO) \ + (SCANref = (mps_addr_t)*(refIO), \ + SCANres = _mps_fix2(&(ss)->ss_s, &SCANref), \ + *(refIO) = SCANref, \ + SCANres) + +/* Equivalent to MPS_FIX12 */ + +#define TRACE_FIX12(ss, refIO) \ + (TRACE_FIX1(ss, *(refIO)) ? TRACE_FIX2(ss, refIO) : ResOK) + +/* Equivalent to MPS_SCAN_END */ + +#define TRACE_SCAN_END(ss) \ + } \ + ScanStateSetUnfixedSummary(ss, SCANsummary); \ + } \ + END + +extern Res TraceScanFormat(ScanState ss, Addr base, Addr limit); +extern Res TraceScanArea(ScanState ss, Word *base, Word *limit, + mps_area_scan_t scan_area, + void *closure); +extern void TraceScanSingleRef(TraceSet ts, Rank rank, Arena arena, + Seg seg, Ref *refIO); + + +/* Arena Interface -- see */ + +DECLARE_CLASS(Inst, ArenaClass, InstClass); +DECLARE_CLASS(Arena, AbstractArena, Inst); +extern Bool ArenaClassCheck(ArenaClass klass); + +extern Bool ArenaCheck(Arena arena); +extern Res ArenaCreate(Arena *arenaReturn, ArenaClass klass, ArgList args); +extern void ArenaDestroy(Arena arena); +extern Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth); +extern Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth); +extern Bool ArenaAccess(Addr addr, AccessSet mode, MutatorContext context); +extern Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit); +extern Res ArenaFreeLandDelete(Arena arena, Addr base, Addr limit); + +extern Bool GlobalsCheck(Globals arena); +extern Res GlobalsInit(Globals arena); +extern void GlobalsFinish(Globals arena); +extern Res GlobalsCompleteCreate(Globals arenaGlobals); +extern void GlobalsPrepareToDestroy(Globals arenaGlobals); +extern Res GlobalsDescribe(Globals arena, mps_lib_FILE *stream, Count depth); +extern void GlobalsArenaMap(void (*func)(Arena arena)); +extern void GlobalsClaimAll(void); +extern void GlobalsReleaseAll(void); +extern void GlobalsReinitializeAll(void); + +#define ArenaGlobals(arena) (&(arena)->globals) +#define GlobalsArena(glob) PARENT(ArenaStruct, globals, glob) + +#define ArenaThreadRing(arena) (&(arena)->threadRing) +#define ArenaDeadRing(arena) (&(arena)->deadRing) +#define ArenaEpoch(arena) (ArenaHistory(arena)->epoch) /* .epoch.ts */ +#define ArenaTrace(arena, ti) (&(arena)->trace[ti]) +#define ArenaZoneShift(arena) ((arena)->zoneShift) +#define ArenaStripeSize(arena) ((Size)1 << ArenaZoneShift(arena)) +#define ArenaGrainSize(arena) ((arena)->grainSize) +#define ArenaGreyRing(arena, rank) (&(arena)->greyRing[rank]) +#define ArenaPoolRing(arena) (&ArenaGlobals(arena)->poolRing) +#define ArenaChunkTree(arena) RVALUE((arena)->chunkTree) +#define ArenaChunkRing(arena) (&(arena)->chunkRing) +#define ArenaShield(arena) (&(arena)->shieldStruct) +#define ArenaHistory(arena) (&(arena)->historyStruct) + +extern Bool ArenaGrainSizeCheck(Size size); +#define AddrArenaGrainUp(addr, arena) AddrAlignUp(addr, ArenaGrainSize(arena)) +#define AddrArenaGrainDown(addr, arena) AddrAlignDown(addr, ArenaGrainSize(arena)) +#define AddrIsArenaGrain(addr, arena) AddrIsAligned(addr, ArenaGrainSize(arena)) +#define SizeArenaGrains(size, arena) SizeAlignUp(size, ArenaGrainSize(arena)) +#define SizeIsArenaGrains(size, arena) SizeIsAligned(size, ArenaGrainSize(arena)) + +extern void ArenaEnterLock(Arena arena, Bool recursive); +extern void ArenaLeaveLock(Arena arena, Bool recursive); + +extern void ArenaEnter(Arena arena); +extern void ArenaLeave(Arena arena); +extern void (ArenaPoll)(Globals globals); + +#if defined(SHIELD) +#elif defined(SHIELD_NONE) +#define ArenaPoll(globals) UNUSED(globals) +#else +#error "No shield configuration." +#endif /* SHIELD */ + +extern void ArenaEnterRecursive(Arena arena); +extern void ArenaLeaveRecursive(Arena arena); + +extern Bool (ArenaStep)(Globals globals, double interval, double multiplier); +extern void ArenaClamp(Globals globals); +extern void ArenaRelease(Globals globals); +extern void ArenaPark(Globals globals); +extern void ArenaPostmortem(Globals globals); +extern Res ArenaStartCollect(Globals globals, TraceStartWhy why); +extern Res ArenaCollect(Globals globals, TraceStartWhy why); +extern Bool ArenaBusy(Arena arena); +extern Bool ArenaHasAddr(Arena arena, Addr addr); +extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr); +extern void ArenaChunkInsert(Arena arena, Chunk chunk); +extern void ArenaChunkRemoved(Arena arena, Chunk chunk); +extern void ArenaAccumulateTime(Arena arena, Clock start, Clock now); + +extern void ArenaSetEmergency(Arena arena, Bool emergency); +extern Bool ArenaEmergency(Arena arean); + +extern Res ControlInit(Arena arena); +extern void ControlFinish(Arena arena); +extern Res ControlAlloc(void **baseReturn, Arena arena, size_t size); +extern void ControlFree(Arena arena, void *base, size_t size); +extern Res ControlDescribe(Arena arena, mps_lib_FILE *stream, Count depth); + + +/* Peek/Poke/Read/Write -- read/write possibly through barrier + * + * These are provided so that modules in the MPS can make occasional + * access to client data, and to implement a software barrier for + * segments that are not handed out to the mutator. They protect the + * necessary colour, shield and summary invariants. + * + * Note that Peek and Poke can be called with an address that may or + * may not be in memory managed by arena, whereas Read and Write + * assert this is the case. + */ + +/* Peek reads a value */ +extern Ref ArenaPeek(Arena arena, Ref *p); +/* Same, but p known to be owned by arena */ +extern Ref ArenaRead(Arena arena, Ref *p); +/* Same, but p must be in seg */ +extern Ref ArenaPeekSeg(Arena arena, Seg seg, Ref *p); +/* Poke stores a value */ +extern void ArenaPoke(Arena arena, Ref *p, Ref ref); +/* Same, but p known to be owned by arena */ +extern void ArenaWrite(Arena arena, Ref *p, Ref ref); +/* Same, but p must be in seg */ +extern void ArenaPokeSeg(Arena arena, Seg seg, Ref *p, Ref ref); + + +extern Size ArenaReserved(Arena arena); +extern Size ArenaCommitted(Arena arena); +extern Size ArenaSpareCommitted(Arena arena); +extern double ArenaSpare(Arena arena); +extern void ArenaSetSpare(Arena arena, double spare); +#define ArenaSpareCommitLimit(arena) ((Size)((double)ArenaCommitted(arena) * ArenaSpare(arena))) +#define ArenaCurrentSpare(arena) ((double)ArenaSpareCommitted(arena) / (double)ArenaCommitted(arena)) + +extern Size ArenaCommitLimit(Arena arena); +extern Res ArenaSetCommitLimit(Arena arena, Size limit); +extern double ArenaPauseTime(Arena arena); +extern void ArenaSetPauseTime(Arena arena, double pauseTime); +extern Size ArenaNoPurgeSpare(Arena arena, Size size); +extern Res ArenaNoGrow(Arena arena, LocusPref pref, Size size); + +extern Size ArenaAvail(Arena arena); +extern Size ArenaCollectable(Arena arena); + +extern Res ArenaExtend(Arena, Addr base, Size size); + +extern void ArenaCompact(Arena arena, Trace trace); + +extern Res ArenaFinalize(Arena arena, Ref obj); +extern Res ArenaDefinalize(Arena arena, Ref obj); + +extern Res ArenaAlloc(Addr *baseReturn, LocusPref pref, + Size size, Pool pool); +extern Res ArenaFreeLandAlloc(Tract *tractReturn, Arena arena, ZoneSet zones, + Bool high, Size size, Pool pool); +extern void ArenaFree(Addr base, Size size, Pool pool); + +extern Res ArenaNoExtend(Arena arena, Addr base, Size size); + + +/* Policy interface */ + +extern Res PolicyAlloc(Tract *tractReturn, Arena arena, LocusPref pref, + Size size, Pool pool); +extern Bool PolicyShouldCollectWorld(Arena arena, double availableTime, + Clock now, Clock clocks_per_sec); +extern Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn, + Arena arena, Bool collectWorldAllowed); +extern Bool PolicyPoll(Arena arena); +extern Bool PolicyPollAgain(Arena arena, Clock start, Bool moreWork, Work tracedWork); + + +/* Locus interface */ + +extern Bool LocusPrefCheck(LocusPref pref); +extern LocusPref LocusPrefDefault(void); +extern void LocusPrefInit(LocusPref pref); +extern void LocusPrefExpress(LocusPref pref, LocusPrefKind kind, void *p); +extern Res LocusPrefDescribe(LocusPref pref, mps_lib_FILE *stream, Count depth); + +extern void LocusInit(Arena arena); +extern void LocusFinish(Arena arena); +extern Bool LocusCheck(Arena arena); + + +/* Segment interface */ + +extern Res SegAlloc(Seg *segReturn, SegClass klass, LocusPref pref, + Size size, Pool pool, + ArgList args); +extern void SegFree(Seg seg); +extern Bool SegOfAddr(Seg *segReturn, Arena arena, Addr addr); +extern Bool SegFirst(Seg *segReturn, Arena arena); +extern Bool SegNext(Seg *segReturn, Arena arena, Seg seg); +extern Bool SegNextOfRing(Seg *segReturn, Arena arena, Pool pool, Ring next); +extern void SegSetWhite(Seg seg, TraceSet white); +extern void SegSetGrey(Seg seg, TraceSet grey); +extern void SegFlip(Seg seg, Trace trace); +extern void SegSetRankSet(Seg seg, RankSet rankSet); +extern void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary); +extern Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi); +extern Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at); +extern Res SegAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context); +extern Res SegWholeAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context); +extern Res SegSingleAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context); +extern Res SegWhiten(Seg seg, Trace trace); +extern void SegGreyen(Seg seg, Trace trace); +extern void SegBlacken(Seg seg, TraceSet traceSet); +extern Res SegScan(Bool *totalReturn, Seg seg, ScanState ss); +extern Res SegFix(Seg seg, ScanState ss, Addr *refIO); +extern Res SegFixEmergency(Seg seg, ScanState ss, Addr *refIO); +extern void SegReclaim(Seg seg, Trace trace); +extern void SegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *v, size_t s); +extern Res SegAbsDescribe(Inst seg, mps_lib_FILE *stream, Count depth); +extern Res SegDescribe(Seg seg, mps_lib_FILE *stream, Count depth); +extern void SegSetSummary(Seg seg, RefSet summary); +extern Bool SegHasBuffer(Seg seg); +extern Bool SegBuffer(Buffer *bufferReturn, Seg seg); +extern void SegSetBuffer(Seg seg, Buffer buffer); +extern void SegUnsetBuffer(Seg seg); +extern Bool SegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet); +extern Addr SegBufferScanLimit(Seg seg); +extern Bool SegCheck(Seg seg); +extern Bool GCSegCheck(GCSeg gcseg); +extern Bool SegClassCheck(SegClass klass); +DECLARE_CLASS(Inst, SegClass, InstClass); +DECLARE_CLASS(Seg, Seg, Inst); +DECLARE_CLASS(Seg, GCSeg, Seg); +DECLARE_CLASS(Seg, MutatorSeg, GCSeg); +#define SegGCSeg(seg) MustBeA(GCSeg, (seg)) +extern void SegClassMixInNoSplitMerge(SegClass klass); + +extern Size SegSize(Seg seg); +extern Addr (SegBase)(Seg seg); +extern Addr (SegLimit)(Seg seg); +#define SegBase(seg) (TractBase((seg)->firstTract)) +#define SegLimit(seg) ((seg)->limit) +#define SegPool(seg) (TractPool((seg)->firstTract)) +/* .bitfield.promote: The bit field accesses need to be cast to the */ +/* right type, otherwise they'll be promoted to signed int, see */ +/* standard.ansic.6.2.1.1. */ +#define SegRankSet(seg) RVALUE((RankSet)(seg)->rankSet) +#define SegPM(seg) RVALUE((AccessSet)(seg)->pm) +#define SegSM(seg) RVALUE((AccessSet)(seg)->sm) +#define SegDepth(seg) RVALUE((unsigned)(seg)->depth) +#define SegGrey(seg) RVALUE((TraceSet)(seg)->grey) +#define SegWhite(seg) RVALUE((TraceSet)(seg)->white) +#define SegNailed(seg) RVALUE((TraceSet)(seg)->nailed) +#define SegPoolRing(seg) (&(seg)->poolRing) +#define SegOfPoolRing(node) RING_ELT(Seg, poolRing, (node)) +#define SegOfGreyRing(node) (&(RING_ELT(GCSeg, greyRing, (node)) \ + ->segStruct)) + +#define SegSummary(seg) (((GCSeg)(seg))->summary) + +#define SegSetPM(seg, mode) ((void)((seg)->pm = BS_BITFIELD(Access, (mode)))) +#define SegSetSM(seg, mode) ((void)((seg)->sm = BS_BITFIELD(Access, (mode)))) +#define SegSetDepth(seg, d) ((void)((seg)->depth = BITFIELD(unsigned, (d), ShieldDepthWIDTH))) +#define SegSetNailed(seg, ts) ((void)((seg)->nailed = BS_BITFIELD(Trace, (ts)))) + + +/* Buffer Interface -- see */ + +extern Res BufferCreate(Buffer *bufferReturn, BufferClass klass, + Pool pool, Bool isMutator, ArgList args); +extern void BufferDestroy(Buffer buffer); +extern Bool BufferCheck(Buffer buffer); +extern Bool SegBufCheck(SegBuf segbuf); +extern Res BufferDescribe(Buffer buffer, mps_lib_FILE *stream, Count depth); +extern Res BufferReserve(Addr *pReturn, Buffer buffer, Size size); +/* macro equivalent for BufferReserve, keep in sync with */ +/* TODO: Perhaps this isn't really necessary now that we build the MPS with + more global optimisation and inlining. RB 2012-09-07 */ +#define BUFFER_RESERVE(pReturn, buffer, size) \ + (AddrAdd(BufferAlloc(buffer), size) > BufferAlloc(buffer) && \ + AddrAdd(BufferAlloc(buffer), size) <= (Addr)BufferAP(buffer)->limit ? \ + (*(pReturn) = BufferAlloc(buffer), \ + BufferAP(buffer)->alloc = AddrAdd(BufferAlloc(buffer), size), \ + ResOK) : \ + BufferFill(pReturn, buffer, size)) + +extern Res BufferFill(Addr *pReturn, Buffer buffer, Size size); + +extern Bool BufferCommit(Buffer buffer, Addr p, Size size); +/* macro equivalent for BufferCommit, keep in sync with */ +/* TODO: Perhaps this isn't really necessary now that we build the MPS with + more global optimisation and inlining. RB 2012-09-07 */ +#define BUFFER_COMMIT(buffer, p, size) \ + (BufferAP(buffer)->init = BufferAlloc(buffer), \ + BufferAP(buffer)->limit != 0 || BufferTrip(buffer, p, size)) + +extern Bool BufferTrip(Buffer buffer, Addr p, Size size); +extern void BufferFinish(Buffer buffer); +extern Bool BufferIsReset(Buffer buffer); +extern Bool BufferIsReady(Buffer buffer); +extern Bool BufferIsMutator(Buffer buffer); +extern void BufferSetAllocAddr(Buffer buffer, Addr addr); +extern void BufferAttach(Buffer buffer, + Addr base, Addr limit, Addr init, Size size); +extern void BufferDetach(Buffer buffer, Pool pool); +extern void BufferFlip(Buffer buffer); + +extern mps_ap_t (BufferAP)(Buffer buffer); +#define BufferAP(buffer) (&(buffer)->ap_s) +extern Buffer BufferOfAP(mps_ap_t ap); +#define BufferOfAP(ap) PARENT(BufferStruct, ap_s, ap) + +#define BufferArena(buffer) ((buffer)->arena) +#define BufferPool(buffer) ((buffer)->pool) + +extern Seg BufferSeg(Buffer buffer); + +extern RankSet BufferRankSet(Buffer buffer); +extern void BufferSetRankSet(Buffer buffer, RankSet rankset); + +#define BufferBase(buffer) ((buffer)->base) +#define BufferGetInit(buffer) /* see .trans.bufferinit */ \ + ((Addr)(BufferAP(buffer)->init)) +#define BufferAlloc(buffer) ((Addr)(BufferAP(buffer)->alloc)) +#define BufferLimit(buffer) ((buffer)->poolLimit) +extern Addr BufferScanLimit(Buffer buffer); + +extern void BufferReassignSeg(Buffer buffer, Seg seg); + +extern Bool BufferIsTrapped(Buffer buffer); + +extern void BufferRampBegin(Buffer buffer, AllocPattern pattern); +extern Res BufferRampEnd(Buffer buffer); +extern void BufferRampReset(Buffer buffer); + +extern Res BufferFramePush(AllocFrame *frameReturn, Buffer buffer); +extern Res BufferFramePop(Buffer buffer, AllocFrame frame); + +extern Bool BufferClassCheck(BufferClass klass); +DECLARE_CLASS(Inst, BufferClass, InstClass); +DECLARE_CLASS(Buffer, Buffer, Inst); +DECLARE_CLASS(Buffer, SegBuf, Buffer); +typedef Buffer RankBuf; +#define RankBufCheck BufferCheck +DECLARE_CLASS(Buffer, RankBuf, SegBuf); + +extern AllocPattern AllocPatternRamp(void); +extern AllocPattern AllocPatternRampCollectAll(void); + + +/* FindDelete -- see */ + +extern Bool FindDeleteCheck(FindDelete findDelete); + + +/* Format Interface -- see */ + +extern Bool FormatCheck(Format format); +extern Res FormatCreate(Format *formatReturn, Arena arena, ArgList args); +extern void FormatDestroy(Format format); +extern Arena FormatArena(Format format); +extern Res FormatDescribe(Format format, mps_lib_FILE *stream, Count depth); +extern mps_res_t FormatNoScan(mps_ss_t mps_ss, mps_addr_t base, mps_addr_t limit); + + +/* Reference Interface -- see */ + +extern Bool RankCheck(Rank rank); +extern Bool RankSetCheck(RankSet rankSet); + +#define RankSetIsMember(rs, r) BS_IS_MEMBER((rs), (r)) +#define RankSetSingle(r) BS_SINGLE(RankSet, (r)) +#define RankSetIsSingle(r) BS_IS_SINGLE(r) +#define RankSetUnion(rs1, rs2) BS_UNION((rs1), (rs2)) +#define RankSetDel(rs, r) BS_DEL(RankSet, (rs), (r)) + +#define AddrZone(arena, addr) \ + (((Word)(addr) >> (arena)->zoneShift) & (MPS_WORD_WIDTH - 1)) + +#define RefSetUnion(rs1, rs2) BS_UNION((rs1), (rs2)) +#define RefSetInter(rs1, rs2) BS_INTER((rs1), (rs2)) +#define RefSetDiff(rs1, rs2) BS_DIFF((rs1), (rs2)) +#define RefSetAdd(arena, rs, addr) \ + BS_ADD(RefSet, rs, AddrZone(arena, addr)) +#define RefSetIsMember(arena, rs, addr) \ + BS_IS_MEMBER(rs, AddrZone(arena, addr)) +#define RefSetSuper(rs1, rs2) BS_SUPER((rs1), (rs2)) +#define RefSetSub(rs1, rs2) BS_SUB((rs1), (rs2)) + + +/* Zone sets -- see */ + +#define ZoneSetUnion(zs1, zs2) BS_UNION(zs1, zs2) +#define ZoneSetInter(zs1, zs2) BS_INTER(zs1, zs2) +#define ZoneSetDiff(zs1, zs2) BS_DIFF(zs1, zs2) +#define ZoneSetAddAddr(arena, zs, addr) \ + BS_ADD(ZoneSet, zs, AddrZone(arena, addr)) +#define ZoneSetHasAddr(arena, zs, addr) \ + BS_IS_MEMBER(zs, AddrZone(arena, addr)) +#define ZoneSetIsSingle(zs) BS_IS_SINGLE(zs) +#define ZoneSetSub(zs1, zs2) BS_SUB(zs1, zs2) +#define ZoneSetSuper(zs1, zs2) BS_SUPER(zs1, zs2) +#define ZoneSetComp(zs) BS_COMP(zs) +#define ZoneSetIsMember(zs, z) BS_IS_MEMBER(zs, z) + + +extern ZoneSet ZoneSetOfRange(Arena arena, Addr base, Addr limit); +extern ZoneSet ZoneSetOfSeg(Arena arena, Seg seg); +typedef Bool (*RangeInZoneSet)(Addr *baseReturn, Addr *limitReturn, + Addr base, Addr limit, + Arena arena, ZoneSet zoneSet, Size size); +extern Bool RangeInZoneSetFirst(Addr *baseReturn, Addr *limitReturn, + Addr base, Addr limit, + Arena arena, ZoneSet zoneSet, Size size); +extern Bool RangeInZoneSetLast(Addr *baseReturn, Addr *limitReturn, + Addr base, Addr limit, + Arena arena, ZoneSet zoneSet, Size size); +extern ZoneSet ZoneSetBlacklist(Arena arena); + + +/* Shield Interface -- see */ + +extern void ShieldInit(Shield shield); +extern void ShieldFinish(Shield shield); +extern Bool ShieldCheck(Shield shield); +extern Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth); +extern void ShieldDestroyQueue(Shield shield, Arena arena); +extern void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode); +extern void (ShieldLower)(Arena arena, Seg seg, AccessSet mode); +extern void (ShieldEnter)(Arena arena); +extern void (ShieldLeave)(Arena arena); +extern void (ShieldExpose)(Arena arena, Seg seg); +extern void (ShieldCover)(Arena arena, Seg seg); +extern void (ShieldHold)(Arena arena); +extern void (ShieldRelease)(Arena arena); +extern void (ShieldFlush)(Arena arena); + +#if defined(SHIELD) +/* Nothing to do: functions declared in all shield configurations. */ +#elif defined(SHIELD_NONE) +#define ShieldRaise(arena, seg, mode) \ + BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END +#define ShieldLower(arena, seg, mode) \ + BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END +#define ShieldEnter(arena) BEGIN UNUSED(arena); END +#define ShieldLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY) +#define ShieldExpose(arena, seg) \ + BEGIN UNUSED(arena); UNUSED(seg); END +#define ShieldCover(arena, seg) \ + BEGIN UNUSED(arena); UNUSED(seg); END +#define ShieldHold(arena) BEGIN UNUSED(arena); END +#define ShieldRelease(arena) BEGIN UNUSED(arena); END +#define ShieldFlush(arena) BEGIN UNUSED(arena); END +#else +#error "No shield configuration." +#endif /* SHIELD */ + + +/* Location Dependency -- see */ + +extern void HistoryInit(History history); +extern void HistoryFinish(History); +extern Res HistoryDescribe(History history, mps_lib_FILE *stream, Count depth); +extern Bool HistoryCheck(History history); +extern void LDReset(mps_ld_t ld, Arena arena); +extern void LDAdd(mps_ld_t ld, Arena arena, Addr addr); +extern Bool LDIsStaleAny(mps_ld_t ld, Arena arena); +extern Bool LDIsStale(mps_ld_t ld, Arena arena, Addr addr); +extern void LDAge(Arena arena, RefSet moved); +extern void LDMerge(mps_ld_t ld, Arena arena, mps_ld_t from); + + +/* Root Interface -- see */ + +extern Res RootCreateArea(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, + Word *base, Word *limit, + mps_area_scan_t scan_area, + void *closure); +extern Res RootCreateAreaTagged(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, + Word *base, Word *limit, + mps_area_scan_t scan_area, + Word mask, Word pattern); +extern Res RootCreateThread(Root *rootReturn, Arena arena, + Rank rank, Thread thread, + mps_area_scan_t scan_area, + void *closure, + Word *stackCold); +extern Res RootCreateThreadTagged(Root *rootReturn, Arena arena, + Rank rank, Thread thread, + mps_area_scan_t scan_area, + Word mask, Word pattern, + Word *stackCold); +extern Res RootCreateFmt(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, + mps_fmt_scan_t scan, + Addr base, Addr limit); +extern Res RootCreateFun(Root *rootReturn, Arena arena, + Rank rank, mps_root_scan_t scan, + void *p, size_t s); +extern void RootDestroy(Root root); +extern Bool RootModeCheck(RootMode mode); +extern Bool RootCheck(Root root); +extern Res RootDescribe(Root root, mps_lib_FILE *stream, Count depth); +extern Res RootsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth); +extern Rank RootRank(Root root); +extern AccessSet RootPM(Root root); +extern RefSet RootSummary(Root root); +extern void RootGrey(Root root, Trace trace); +extern Res RootScan(ScanState ss, Root root); +extern Arena RootArena(Root root); +extern Bool RootOfAddr(Root *root, Arena arena, Addr addr); +extern void RootAccess(Root root, AccessSet mode); +typedef Res (*RootIterateFn)(Root root, void *p); +extern Res RootsIterate(Globals arena, RootIterateFn f, void *p); + + +/* Land Interface -- see */ + +extern Bool LandCheck(Land land); +#define LandArena(land) ((land)->arena) +#define LandAlignment(land) ((land)->alignment) +extern Size (LandSize)(Land land); +extern Res LandInit(Land land, LandClass klass, Arena arena, Align alignment, void *owner, ArgList args); +extern void LandFinish(Land land); +extern Res (LandInsert)(Range rangeReturn, Land land, Range range); +extern Res LandInsertSteal(Range rangeReturn, Land land, Range rangeIO); +extern Res (LandDelete)(Range rangeReturn, Land land, Range range); +extern Res LandDeleteSteal(Range rangeReturn, Land land, Range range); +extern Bool (LandIterate)(Land land, LandVisitor visitor, void *closure); +extern Bool (LandIterateAndDelete)(Land land, LandDeleteVisitor visitor, void *closure); +extern Bool (LandFindFirst)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Bool (LandFindLast)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Bool (LandFindLargest)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Res (LandFindInZones)(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high); +extern Res LandDescribe(Land land, mps_lib_FILE *stream, Count depth); +extern Bool LandFlushVisitor(Bool *deleteReturn, Land land, Range range, void *closure); +extern Bool (LandFlush)(Land dest, Land src); +extern Size LandSlowSize(Land land); +extern Bool LandClassCheck(LandClass klass); + +/* See .critical.macros. */ +#define LandSizeMacro(land) Method(Land, land, sizeMethod)(land) +#define LandInsertMacro(rangeReturn, land, range) Method(Land, land, insert)(rangeReturn, land, range) +#define LandDeleteMacro(rangeReturn, land, range) Method(Land, land, delete)(rangeReturn, land, range) +#define LandIterateMacro(land, visitor, closure) Method(Land, land, iterate)(land, visitor, closure) +#define LandIterateAndDeleteMacro(land, visitor, closure) Method(Land, land, iterateAndDelete)(land, visitor, closure) +#define LandFindFirstMacro(rangeReturn, oldRangeReturn, land, size, findDelete) Method(Land, land, findFirst)(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindLastMacro(rangeReturn, oldRangeReturn, land, size, findDelete) Method(Land, land, findLast)(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindLargestMacro(rangeReturn, oldRangeReturn, land, size, findDelete) Method(Land, land, findLargest)(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindInZonesMacro(foundReturn, rangeReturn, oldRangeReturn, land, size, zoneSet, high) Method(Land, land, findInZones)(foundReturn, rangeReturn, oldRangeReturn, land, size, zoneSet, high) +#define LandFlushMacro(dest, src) LandIterateAndDelete(src, LandFlushVisitor, dest) +#if !defined(AVER_AND_CHECK_ALL) +#define LandSize(land) LandSizeMacro(land) +#define LandInsert(rangeReturn, land, range) LandInsertMacro(rangeReturn, land, range) +#define LandDelete(rangeReturn, land, range) LandDeleteMacro(rangeReturn, land, range) +#define LandIterate(land, visitor, closure) LandIterateMacro(land, visitor, closure) +#define LandIterateAndDelete(land, visitor, closure) LandIterateAndDeleteMacro(land, visitor, closure) +#define LandFindFirst(rangeReturn, oldRangeReturn, land, size, findDelete) LandFindFirstMacro(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindLast(rangeReturn, oldRangeReturn, land, size, findDelete) LandFindLastMacro(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindLargest(rangeReturn, oldRangeReturn, land, size, findDelete) LandFindLargestMacro(rangeReturn, oldRangeReturn, land, size, findDelete) +#define LandFindInZones(foundReturn, rangeReturn, oldRangeReturn, land, size, zoneSet, high) LandFindInZonesMacro(foundReturn, rangeReturn, oldRangeReturn, land, size, zoneSet, high) +#define LandFlush(dest, src) LandFlushMacro(dest, src) +#endif /* !defined(AVER_AND_CHECK_ALL) */ + +DECLARE_CLASS(Inst, LandClass, InstClass); +DECLARE_CLASS(Land, Land, Inst); + + +/* STATISTIC -- gather statistics (in some varieties) + * + * . + */ + +#if defined(STATISTICS) + +#define STATISTIC(gather) BEGIN gather; END +#define STATISTIC_WRITE(format, arg) (format), (arg), + +#elif defined(STATISTICS_NONE) + +#define STATISTIC(gather) NOOP +#define STATISTIC_WRITE(format, arg) + +#else /* !defined(STATISTICS) && !defined(STATISTICS_NONE) */ + +#error "No statistics configured." + +#endif + +#endif /* mpm_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpmss.c b/mps/code/mpmss.c new file mode 100644 index 00000000000..4a1267ff1e4 --- /dev/null +++ b/mps/code/mpmss.c @@ -0,0 +1,249 @@ +/* mpmss.c: MPM STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscmfs.h" +#include "mpscmvff.h" +#include "mpslib.h" +#include "mpslib.h" +#include "testlib.h" + +#include /* printf */ + + +#define testArenaSIZE ((((size_t)64)<<20) - 4) +#define smallArenaSIZE ((((size_t)1)<<20) - 4) +#define testSetSIZE 200 +#define testLOOPS 10 + + +/* check_allocated_size -- check the allocated size of the pool */ + +static void check_allocated_size(mps_pool_t pool, size_t allocated) +{ + size_t total_size = mps_pool_total_size(pool); + size_t free_size = mps_pool_free_size(pool); + Insist(total_size - free_size == allocated); +} + + +/* stress -- create a pool of the requested type and allocate in it */ + +static mps_res_t stress(mps_arena_t arena, mps_pool_debug_option_s *options, + size_t (*size)(size_t i, mps_align_t align), + mps_align_t align, const char *name, + mps_pool_class_t pool_class, mps_arg_s *args) +{ + mps_res_t res; + mps_pool_t pool; + size_t i, k; + int *ps[testSetSIZE]; + size_t ss[testSetSIZE]; + size_t allocated = 0; /* Total allocated memory */ + size_t debugOverhead = options ? 2 * alignUp(options->fence_size, align) : 0; + + printf("Pool class %s, alignment %u\n", name, (unsigned)align); + + res = mps_pool_create_k(&pool, arena, pool_class, args); + if (res != MPS_RES_OK) + return res; + + /* allocate a load of objects */ + for (i=0; i= sizeof(ps[i])) + *ps[i] = 1; /* Write something, so it gets swap. */ + check_allocated_size(pool, allocated); + } + + mps_pool_check_fenceposts(pool); + + for (k=0; k> (i / 10)), 2) + 1, align); +} + + +/* fixedSize -- produce always the same size */ + +static size_t fixedSizeSize = 0; + +static size_t fixedSize(size_t i, mps_align_t align) +{ + testlib_unused(i); + testlib_unused(align); + return fixedSizeSize; +} + + +static mps_pool_debug_option_s bothOptions = { + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", + /* .free_size = */ 4 +}; + +static mps_pool_debug_option_s fenceOptions = { + /* .fence_template = */ "123456789abcdef", + /* .fence_size = */ 15, + /* .free_template = */ NULL, + /* .free_size = */ 0 +}; + +/* testInArena -- test all the pool classes in the given arena */ + +static void testInArena(mps_arena_class_t arena_class, size_t arena_grain_size, + mps_arg_s *arena_args, mps_pool_debug_option_s *options) +{ + mps_arena_t arena; + + die(mps_arena_create_k(&arena, arena_class, arena_args), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, rnd_double()); + die(stress(arena, NULL, randomSizeAligned, align, "MVFF", + mps_class_mvff(), args), "stress MVFF"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, rnd_double()); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options); + die(stress(arena, options, randomSizeAligned, align, "MVFF debug", + mps_class_mvff_debug(), args), "stress MVFF debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + fixedSizeSize = 1 + rnd() % 64; + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize); + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 100000); + die(stress(arena, NULL, fixedSize, MPS_PF_ALIGN, "MFS", + mps_class_mfs(), args), "stress MFS"); + } MPS_ARGS_END(args); + + /* Manual allocation should not cause any garbage collections. */ + Insist(mps_collections(arena) == 0); + mps_arena_destroy(arena); +} + + +int main(int argc, char *argv[]) +{ + size_t arena_grain_size; + + testlib_init(argc, argv); + + arena_grain_size = rnd_grain(testArenaSIZE); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + testInArena(mps_arena_class_vm(), arena_grain_size, args, &bothOptions); + } MPS_ARGS_END(args); + + arena_grain_size = rnd_grain(smallArenaSIZE); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, smallArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size); + testInArena(mps_arena_class_vm(), arena_grain_size, args, &fenceOptions); + } MPS_ARGS_END(args); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h new file mode 100644 index 00000000000..f5ba00b8d64 --- /dev/null +++ b/mps/code/mpmst.h @@ -0,0 +1,859 @@ +/* mpmst.h: MEMORY POOL MANAGER DATA STRUCTURES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + * + * .design: This header file crosses module boundaries. The relevant + * design a module's structures should be found in that module's design + * document. + * + * .structure: Most structures have already been declared as incomplete + * types in . Most of the structures are the underlying + * aggregate types for an abstract data type. + * + * .rationale.sig: Object signatures (PoolSig, etc.) are defined here, + * along with the structures, so that any code which can see a structure + * can also check its signature before using any of its fields. See + * to check that signatures are unique. */ + +#ifndef mpmst_h +#define mpmst_h + +#include "config.h" +#include "mpmtypes.h" + +#include "protocol.h" +#include "ring.h" +#include "locus.h" +#include "splay.h" +#include "meter.h" + + +/* PoolClassStruct -- pool class structure + * + * . + * + * .class: The pool class structure is defined by each pool class + * implementation in order to provide an interface between the MPM and + * the class via generic functions . Pool + * classes use the class protocol and so + * CLASS(ABCPool) returns a PoolClass pointing to a PoolClassStruct of + * methods which implement the memory management policy for pool class + * ABC. + */ + +#define PoolClassSig ((Sig)0x519C7A55) /* SIGnature pool CLASS */ + +typedef struct mps_pool_class_s { + InstClassStruct instClassStruct; + size_t size; /* size of outer structure */ + Attr attr; /* attributes */ + PoolVarargsMethod varargs; /* convert deprecated varargs into keywords */ + PoolInitMethod init; /* initialize the pool descriptor */ + PoolAllocMethod alloc; /* allocate memory from pool */ + PoolFreeMethod free; /* free memory to pool */ + PoolSegPoolGenMethod segPoolGen; /* get pool generation of segment */ + PoolBufferFillMethod bufferFill; /* out-of-line reserve */ + PoolBufferEmptyMethod bufferEmpty; /* out-of-line commit */ + PoolRampBeginMethod rampBegin;/* begin a ramp pattern */ + PoolRampEndMethod rampEnd; /* end a ramp pattern */ + PoolFramePushMethod framePush; /* push an allocation frame */ + PoolFramePopMethod framePop; /* pop an allocation frame */ + PoolAddrObjectMethod addrObject; /* return object's base pointer */ + PoolFreeWalkMethod freewalk; /* walk over free blocks */ + PoolBufferClassMethod bufferClass; /* default BufferClass of pool */ + PoolDebugMixinMethod debugMixin; /* find the debug mixin, if any */ + PoolSizeMethod totalSize; /* total memory allocated from arena */ + PoolSizeMethod freeSize; /* free memory (unused by client program) */ + Sig sig; /* design.mps.sig.field.end.outer */ +} PoolClassStruct; + + +/* PoolStruct -- generic structure + * + * .pool: A generic structure is created when a pool is created and + * holds the generic part of the pool's state. Each pool class defines + * a "subclass" of the pool structure (the "outer structure") which + * contains PoolStruct as a field. The outer structure holds the + * class-specific part of the pool's state. See , + * . + */ + +#define PoolSig ((Sig)0x519B0019) /* SIGnature POOL */ + +typedef struct mps_pool_s { /* generic structure */ + InstStruct instStruct; + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->poolSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* link in list of pools in arena */ + RingStruct bufferRing; /* allocation buffers are attached to pool */ + Serial bufferSerial; /* serial of next buffer */ + RingStruct segRing; /* segs are attached to pool */ + Align alignment; /* alignment for grains */ + Shift alignShift; /* log2(alignment) */ + Format format; /* format or NULL */ +} PoolStruct; + + +/* MFSStruct -- MFS (Manual Fixed Small) pool outer structure + * + * .mfs: See , . + * + * The MFS outer structure is declared here because it is inlined + * in the control pool structure which is inlined in the arena. Normally, + * pool outer structures are declared with the pools. + * + * The signature is placed at the end, see + * . */ + +#define MFSSig ((Sig)0x5193F599) /* SIGnature MFS */ + +typedef struct MFSStruct { /* MFS outer structure */ + PoolStruct poolStruct; /* generic structure */ + Size unroundedUnitSize; /* the unit size requested */ + Size extendBy; /* arena alloc size rounded using unitSize */ + Bool extendSelf; /* whether to allocate tracts */ + Size unitSize; /* rounded for management purposes */ + struct MFSHeaderStruct *freeList; /* head of the free list */ + Size total; /* total size allocated from arena */ + Size free; /* free space in pool */ + RingStruct extentRing; /* ring of extents in pool */ + Sig sig; /* design.mps.sig.field.end.outer */ +} MFSStruct; + + +/* MessageClassStruct -- Message Class structure + * + * , , + * and . + */ + +#define MessageClassSig ((Sig)0x519359c1) /* SIGnature MeSsaGe CLass */ + +typedef struct MessageClassStruct { + Sig sig; /* design.mps.sig.field */ + const char *name; /* Human readable Class name */ + + MessageType type; /* Message Type */ + + /* generic methods */ + MessageDeleteMethod delete; /* terminates a message */ + + /* methods specific to MessageTypeFINALIZATION */ + MessageFinalizationRefMethod finalizationRef; + + /* methods specific to MessageTypeGC */ + MessageGCLiveSizeMethod gcLiveSize; + MessageGCCondemnedSizeMethod gcCondemnedSize; + MessageGCNotCondemnedSizeMethod gcNotCondemnedSize; + + /* methods specific to MessageTypeGCSTART */ + MessageGCStartWhyMethod gcStartWhy; + + Sig endSig; /* */ +} MessageClassStruct; + +#define MessageSig ((Sig)0x5193e559) /* SIG MESSaGe */ + +/* MessageStruct -- Message structure + * + * . */ + +typedef struct mps_message_s { + Sig sig; /* design.mps.sig.field */ + Arena arena; /* owning arena */ + MessageClass klass; /* Message Class Structure */ + Clock postedClock; /* mps_clock() at post time, or 0 */ + RingStruct queueRing; /* Message queue ring */ +} MessageStruct; + + +/* SegClassStruct -- segment class structure + * + * & . + * + * .seg.class: The segment class structure is defined by each segment + * class implementation in order to provide a generic interface to + * segments. */ + +#define SegClassSig ((Sig)0x5195E9C7) /* SIGnature SEG CLass */ + +typedef struct SegClassStruct { + InstClassStruct instClassStruct; + size_t size; /* size of outer structure */ + SegInitMethod init; /* initialize the segment */ + SegSetSummaryMethod setSummary; /* set the segment summary */ + SegBufferMethod buffer; /* get the segment buffer */ + SegSetBufferMethod setBuffer; /* set the segment buffer */ + SegUnsetBufferMethod unsetBuffer; /* unset the segment buffer */ + SegBufferFillMethod bufferFill; /* try filling buffer from segment */ + SegBufferEmptyMethod bufferEmpty; /* empty buffer to segment */ + SegSetGreyMethod setGrey; /* change greyness of segment */ + SegFlipMethod flip; /* raise barrier for a flipped trace */ + SegSetWhiteMethod setWhite; /* change whiteness of segment */ + SegSetRankSetMethod setRankSet; /* change rank set of segment */ + SegSetRankSummaryMethod setRankSummary; /* change rank set & summary */ + SegMergeMethod merge; /* merge two adjacent segments */ + SegSplitMethod split; /* split a segment into two */ + SegAccessMethod access; /* handles read/write accesses */ + SegWhitenMethod whiten; /* whiten objects */ + SegGreyenMethod greyen; /* greyen non-white objects */ + SegBlackenMethod blacken; /* blacken grey objects without scanning */ + SegScanMethod scan; /* find references during tracing */ + SegFixMethod fix; /* referent reachable during tracing */ + SegFixMethod fixEmergency; /* as fix, no failure allowed */ + SegReclaimMethod reclaim; /* reclaim dead objects after tracing */ + SegWalkMethod walk; /* walk over a segment */ + Sig sig; /* design.mps.sig.field.end.outer */ +} SegClassStruct; + + +/* SegStruct -- segment structure + * + * .seg: Segments are the basic units of protection and tracer activity + * for allocated memory. . */ + +#define SegSig ((Sig)0x5195E999) /* SIGnature SEG */ + +typedef struct SegStruct { /* segment structure */ + InstStruct instStruct; + Sig sig; /* design.mps.sig.field */ + Tract firstTract; /* first tract of segment */ + RingStruct poolRing; /* link in list of segs in pool */ + Addr limit; /* limit of segment */ + unsigned depth : ShieldDepthWIDTH; /* see */ + BOOLFIELD(queued); /* in shield queue? */ + AccessSet pm : AccessLIMIT; /* protection mode, */ + AccessSet sm : AccessLIMIT; /* shield mode, */ + TraceSet grey : TraceLIMIT; /* traces for which seg is grey */ + TraceSet white : TraceLIMIT; /* traces for which seg is white */ + TraceSet nailed : TraceLIMIT; /* traces for which seg has nailed objects */ + RankSet rankSet : RankLIMIT; /* ranks of references in this seg */ + unsigned defer : WB_DEFER_BITS; /* defer write barrier for this many scans */ +} SegStruct; + + +/* GCSegStruct -- GCable segment structure + * + * .seggc: GCSeg is a subclass of Seg with support for buffered + * allocation and GC. . */ + +#define GCSegSig ((Sig)0x5199C5E9) /* SIGnature GC SEG */ + +typedef struct GCSegStruct { /* GC segment structure */ + SegStruct segStruct; /* superclass fields must come first */ + RingStruct greyRing; /* link in list of grey segs */ + RefSet summary; /* summary of references out of seg */ + Buffer buffer; /* non-NULL if seg is buffered */ + RingStruct genRing; /* link in list of segs in gen */ + Sig sig; /* design.mps.sig.field.end.outer */ +} GCSegStruct; + + +/* LocusPrefStruct -- locus preference structure + * + * .locus-pref: arena memory users (pool class code) need a way of + * expressing preferences about the locus of the segments they + * allocate. . + */ + +#define LocusPrefSig ((Sig)0x51970CB6) /* SIGnature LOCus PRef */ + +typedef struct LocusPrefStruct { /* locus placement preferences */ + Sig sig; /* design.mps.sig.field */ + Bool high; /* high or low */ + ZoneSet zones; /* preferred zones */ + ZoneSet avoid; /* zones to avoid */ +} LocusPrefStruct; + + +/* BufferClassStruct -- buffer class structure + * + * & . + * + * .buffer.class: The buffer class structure is defined by each buffer + * class implementation in order to provide a generic interface to + * buffers. */ + +#define BufferClassSig ((Sig)0x519B0FC7) /* SIGnature BUFfer CLass */ + +typedef struct BufferClassStruct { + InstClassStruct instClassStruct; + size_t size; /* size of outer structure */ + BufferVarargsMethod varargs; /* parse obsolete varargs */ + BufferInitMethod init; /* initialize the buffer */ + BufferAttachMethod attach; /* attach the buffer */ + BufferDetachMethod detach; /* detach the buffer */ + BufferSegMethod seg; /* seg of buffer */ + BufferRankSetMethod rankSet; /* rank set of buffer */ + BufferSetRankSetMethod setRankSet; /* change rank set of buffer */ + BufferReassignSegMethod reassignSeg; /* change seg of attached buffer */ + Sig sig; /* design.mps.sig.field.end.outer */ +} BufferClassStruct; + + +/* BufferStruct -- allocation buffer structure + * + * See , . + * + * The buffer contains an AP which may be exported to the client. + * AP are part of the design of buffers see . + * The allocation point is exported to the client code so that it can + * do in-line buffered allocation. + */ + +#define BufferSig ((Sig)0x519B0FFE) /* SIGnature BUFFEr */ + +typedef struct BufferStruct { + InstStruct instStruct; + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from pool->bufferSerial */ + Arena arena; /* owning arena */ + Pool pool; /* owning pool */ + RingStruct poolRing; /* buffers are attached to pools */ + Bool isMutator; /* TRUE iff buffer used by mutator */ + BufferMode mode; /* Attached/Logged/Flipped/etc */ + double fillSize; /* bytes filled in this buffer */ + double emptySize; /* bytes emptied from this buffer */ + Addr base; /* base address of allocation buffer */ + Addr initAtFlip; /* limit of initialized data at flip */ + mps_ap_s ap_s; /* the allocation point */ + Addr poolLimit; /* the pool's idea of the limit */ + Align alignment; /* allocation alignment */ + unsigned rampCount; /* see */ +} BufferStruct; + + +/* SegBufStruct -- Buffer structure associated with segments + * + * .segbuf: SegBuf is a subclass of Buffer with support for attachment + * to segments. */ + +#define SegBufSig ((Sig)0x51959B0F) /* SIGnature SeG BUFfer */ + +typedef struct SegBufStruct { + BufferStruct bufferStruct; /* superclass fields must come first */ + RankSet rankSet; /* ranks of references being created */ + Seg seg; /* segment being buffered */ + Sig sig; /* design.mps.sig.field.end.outer */ +} SegBufStruct; + + +/* FormatStruct -- object format structure + * + * , . + * + * .single: In future, when more variants are added, FormatStruct should + * really be replaced by a collection of format classes. */ + +#define FormatSig ((Sig)0x519F63A2) /* Signature FoRMAT */ + +typedef struct mps_fmt_s { + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->formatSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* formats are attached to the arena */ + Count poolCount; /* number of pools using the format */ + Align alignment; /* alignment of formatted objects */ + mps_fmt_scan_t scan; + mps_fmt_skip_t skip; + mps_fmt_fwd_t move; + mps_fmt_isfwd_t isMoved; + mps_fmt_pad_t pad; + mps_fmt_class_t klass; /* pointer indicating class */ + Size headerSize; /* size of header */ +} FormatStruct; + + +/* ScanState + * + * .ss: See . + * + * .ss: The mps_ss field of the scan state structure is exported + * through the MPS interface to optimise the critical path scan loop. + * See ["The critical path through the MPS"](../design/critical-path.txt). + * + * .ss.fix-closure: The fixClosure member allows the caller of the + * scanning protocol to pass data through to this fix function. This + * is not used in the public MPS, but is needed by the transforms + * extension. + * + * .ss.zone: For binary compatibility, the zone shift is exported as + * a word rather than a shift, so that the external mps_ss_s is a uniform + * three-word structure. See and . + * + * zs Shift zoneShift copy of arena->zoneShift. See .ss.zone + * w ZoneSet white white set, for inline fix test + * ufs RefSet unfixedSummary accumulated summary of scanned references + * + * NOTE: The mps_ss structure used to be obfuscated to preserve Harlequin's + * trade secrets in the MPS technology. These days they just seek to + * emphasize the abstraction, and could maybe be given better names and + * types. RB 2012-09-07 + */ + +#define ScanStateSig ((Sig)0x5195CA45) /* SIGnature SCAN State */ + +typedef struct ScanStateStruct { + Sig sig; /* design.mps.sig.field */ + struct mps_ss_s ss_s; /* .ss */ + Arena arena; /* owning arena */ + mps_fmt_scan_t formatScan; /* callback for scanning formatted objects */ + mps_area_scan_t areaScan; /* ditto via the area scanning interface */ + void *areaScanClosure; /* closure argument for areaScan */ + SegFixMethod fix; /* third stage fix function */ + void *fixClosure; /* see .ss.fix-closure */ + TraceSet traces; /* traces to scan for */ + Rank rank; /* reference rank of scanning */ + Bool wasMarked; /* */ + RefSet fixedSummary; /* accumulated summary of fixed references */ + STATISTIC_DECL(Count fixRefCount) /* refs which pass zone check */ + STATISTIC_DECL(Count segRefCount) /* refs which refer to segs */ + STATISTIC_DECL(Count whiteSegRefCount) /* refs which refer to white segs */ + STATISTIC_DECL(Count nailCount) /* segments nailed by ambig refs */ + STATISTIC_DECL(Count snapCount) /* refs snapped to forwarded objs */ + STATISTIC_DECL(Count forwardedCount) /* objects preserved by moving */ + STATISTIC_DECL(Count preservedInPlaceCount) /* objects preserved in place */ + STATISTIC_DECL(Size copiedSize) /* bytes copied */ + Size scannedSize; /* bytes scanned */ +} ScanStateStruct; + + +/* TraceStruct -- tracer state structure */ + +#define TraceSig ((Sig)0x51924ACE) /* SIGnature TRACE */ + +typedef struct TraceStruct { + Sig sig; /* design.mps.sig.field */ + TraceId ti; /* index into TraceSets */ + Arena arena; /* owning arena */ + TraceStartWhy why; /* why the trace began */ + ZoneSet white; /* zones in the white set */ + ZoneSet mayMove; /* zones containing possibly moving objs */ + TraceState state; /* current state of trace */ + Rank band; /* current band */ + Bool firstStretch; /* in first stretch of band (see accessor) */ + SegFixMethod fix; /* fix method to apply to references */ + void *fixClosure; /* see .ss.fix-closure */ + RingStruct genRing; /* ring of generations condemned for trace */ + STATISTIC_DECL(Size preTraceArenaReserved) /* ArenaReserved before this trace */ + Size condemned; /* condemned bytes */ + Size notCondemned; /* collectable but not condemned */ + Size foundation; /* initial grey set size */ + Work quantumWork; /* tracing work to be done in each poll */ + STATISTIC_DECL(Count greySegCount) /* number of grey segments */ + STATISTIC_DECL(Count greySegMax) /* maximum number of grey segments */ + STATISTIC_DECL(Count rootScanCount) /* number of roots scanned */ + Count rootScanSize; /* total size of scanned roots */ + STATISTIC_DECL(Size rootCopiedSize) /* bytes copied by scanning roots */ + STATISTIC_DECL(Count segScanCount) /* number of segments scanned */ + Count segScanSize; /* total size of scanned segments */ + STATISTIC_DECL(Size segCopiedSize) /* bytes copied by scanning segments */ + STATISTIC_DECL(Count singleScanCount) /* number of single refs scanned */ + STATISTIC_DECL(Count singleScanSize) /* total size of single refs scanned */ + STATISTIC_DECL(Size singleCopiedSize) /* bytes copied by scanning single refs */ + STATISTIC_DECL(Count fixRefCount) /* refs which pass zone check */ + STATISTIC_DECL(Count segRefCount) /* refs which refer to segments */ + STATISTIC_DECL(Count whiteSegRefCount) /* refs which refer to white segs */ + STATISTIC_DECL(Count nailCount) /* segments nailed by ambiguous refs */ + STATISTIC_DECL(Count snapCount) /* refs snapped to forwarded objects */ + STATISTIC_DECL(Count readBarrierHitCount) /* read barrier faults */ + STATISTIC_DECL(Count pointlessScanCount) /* pointless segment scans */ + STATISTIC_DECL(Count forwardedCount) /* objects preserved by moving */ + Size forwardedSize; /* bytes preserved by moving */ + STATISTIC_DECL(Count preservedInPlaceCount) /* objects preserved in place */ + Size preservedInPlaceSize; /* bytes preserved in place */ + STATISTIC_DECL(Count reclaimCount) /* segments reclaimed */ + STATISTIC_DECL(Count reclaimSize) /* bytes reclaimed */ +} TraceStruct; + + +/* ArenaClassStruct -- generic arena class interface */ + +#define ArenaClassSig ((Sig)0x519A6C1A) /* SIGnature ARena CLAss */ + +typedef struct mps_arena_class_s { + InstClassStruct instClassStruct; + size_t size; /* size of outer structure */ + ArenaVarargsMethod varargs; + ArenaInitMethod init; + ArenaCreateMethod create; + ArenaDestroyMethod destroy; + ArenaPurgeSpareMethod purgeSpare; + ArenaExtendMethod extend; + ArenaGrowMethod grow; + ArenaFreeMethod free; + ArenaChunkInitMethod chunkInit; + ArenaChunkFinishMethod chunkFinish; + ArenaCompactMethod compact; + ArenaPagesMarkAllocatedMethod pagesMarkAllocated; + ArenaChunkPageMappedMethod chunkPageMapped; + Sig sig; /* design.mps.sig.field.end.outer */ +} ArenaClassStruct; + + +/* GlobalsStruct -- the global state associated with an arena + * + * .space: The arena structure holds the entire state of the MPS, and as + * such contains a lot of fields which are considered "global". These + * fields belong to different modules. The module which owns each group + * of fields is commented. */ + +#define GlobalsSig ((Sig)0x519970BA) /* SIGnature GLOBAls */ + +typedef struct GlobalsStruct { + Sig sig; /* design.mps.sig.field */ + + /* general fields */ + RingStruct globalRing; /* node in global ring of arenas */ + Lock lock; /* arena's lock */ + + /* polling fields */ + double pollThreshold; /* */ + Bool insidePoll; + Bool clamped; /* prevent background activity */ + double fillMutatorSize; /* total bytes filled, mutator buffers */ + double emptyMutatorSize; /* total bytes emptied, mutator buffers */ + double allocMutatorSize; /* fill-empty, only asymptotically accurate */ + double fillInternalSize; /* total bytes filled, internal buffers */ + double emptyInternalSize; /* total bytes emptied, internal buffers */ + + /* version field */ + const char *mpsVersionString; /* MPSVersion() */ + + /* buffer fields */ + Bool bufferLogging; /* */ + + /* pool fields */ + RingStruct poolRing; /* ring of pools in arena */ + Serial poolSerial; /* serial of next created pool */ + Count systemPools; /* count of pools remaining at ArenaDestroy */ + + /* root fields */ + RingStruct rootRing; /* ring of roots attached to arena */ + Serial rootSerial; /* serial of next root */ + + /* locus */ + Chain defaultChain; /* default chain for GC pool */ +} GlobalsStruct; + + +/* LandClassStruct -- land class structure + * + * . + */ + +#define LandClassSig ((Sig)0x5197A4DC) /* SIGnature LAND Class */ + +typedef struct LandClassStruct { + InstClassStruct instClassStruct; + size_t size; /* size of outer structure */ + LandSizeMethod sizeMethod; /* total size of ranges in land */ + LandInitMethod init; /* initialize the land */ + LandInsertMethod insert; /* insert a range into the land */ + LandInsertMethod insertSteal; /* insert a range, possibly stealing memory */ + LandDeleteMethod delete; /* delete a range from the land */ + LandDeleteMethod deleteSteal; /* delete a range, possibly stealing memory */ + LandIterateMethod iterate; /* iterate over ranges in the land */ + LandIterateAndDeleteMethod iterateAndDelete; /* iterate and maybe delete */ + LandFindMethod findFirst; /* find first range of given size */ + LandFindMethod findLast; /* find last range of given size */ + LandFindMethod findLargest; /* find largest range */ + LandFindInZonesMethod findInZones; /* find first range of given size in zone set */ + Sig sig; /* design.mps.sig.field.end.outer */ +} LandClassStruct; + + +/* LandStruct -- generic land structure + * + * , + */ + +#define LandSig ((Sig)0x5197A4D9) /* SIGnature LAND */ + +typedef struct LandStruct { + InstStruct instStruct; + Sig sig; /* design.mps.sig.field */ + Arena arena; /* owning arena */ + Align alignment; /* alignment of addresses */ + Bool inLand; /* prevent reentrance */ +} LandStruct; + + +/* CBSStruct -- coalescing block structure + * + * CBS is a Land implementation that maintains a collection of + * disjoint ranges in a splay tree. + * + * See . + */ + +#define CBSSig ((Sig)0x519CB599) /* SIGnature CBS */ + +typedef struct CBSStruct { + LandStruct landStruct; /* superclass fields come first */ + SplayTreeStruct splayTreeStruct; + STATISTIC_DECL(Count treeSize) + Pool blockPool; /* pool that manages blocks */ + Size blockStructSize; /* size of block structure */ + Bool ownPool; /* did we create blockPool? */ + Size size; /* total size of ranges in CBS */ + /* meters for sizes of search structures at each op */ + METER_DECL(treeSearch) + Sig sig; /* design.mps.sig.field.end.outer */ +} CBSStruct; + + +/* FailoverStruct -- fail over from one land to another + * + * Failover is a Land implementation that combines two other Lands, + * using primary until it fails, and then using secondary. + * + * See . + */ + +#define FailoverSig ((Sig)0x519FA170) /* SIGnature FAILOver */ + +typedef struct FailoverStruct { + LandStruct landStruct; /* superclass fields come first */ + Land primary; /* use this land normally */ + Land secondary; /* but use this one if primary fails */ + Sig sig; /* design.mps.sig.field.end.outer */ +} FailoverStruct; + + +/* FreelistStruct -- address-ordered freelist + * + * Freelist is a subclass of Land that maintains a collection of + * disjoint ranges in an address-ordered freelist. + * + * See . + */ + +#define FreelistSig ((Sig)0x519F6331) /* SIGnature FREEL */ + +typedef union FreelistBlockUnion *FreelistBlock; + +typedef struct FreelistStruct { + LandStruct landStruct; /* superclass fields come first */ + FreelistBlock list; /* first block in list or NULL if empty */ + Count listSize; /* number of blocks in list */ + Size size; /* total size of ranges in list */ + Sig sig; /* design.mps.sig.field.end.outer */ +} FreelistStruct; + + +/* SortStruct -- extra memory required by sorting + * + * See QuickSort in mpm.c. This exists so that the caller can make + * the choice about where to allocate the memory, since the MPS has to + * operate in tight stack constraints -- see . + */ + +typedef struct SortStruct { + struct { + Index left, right; + } stack[MPS_WORD_WIDTH]; +} SortStruct; + + +/* ShieldStruct -- per-arena part of the shield + * + * , . + */ + +#define ShieldSig ((Sig)0x519581E1) /* SIGnature SHEILd */ + +typedef struct ShieldStruct { + Sig sig; /* design.mps.sig.field */ + BOOLFIELD(inside); /* */ + BOOLFIELD(suspended); /* mutator suspended? */ + BOOLFIELD(queuePending); /* queue insertion pending? */ + Seg *queue; /* queue of unsynced segs */ + Count length; /* number of elements in shield queue */ + Index next; /* next free element in shield queue */ + Index limit; /* high water mark for cache usage */ + Count depth; /* sum of depths of all segs */ + Count unsynced; /* number of unsynced segments */ + Count holds; /* number of holds */ + SortStruct sortStruct; /* workspace for queue sort */ +} ShieldStruct; + + +/* History -- location dependency history + * + * . + */ + +#define HistorySig ((Sig)0x51981520) /* SIGnature HISTOry */ + +typedef struct HistoryStruct { + Sig sig; /* design.mps.sig.field */ + Epoch epoch; /* */ + RefSet prehistory; /* */ + RefSet history[LDHistoryLENGTH]; /* */ +} HistoryStruct; + + +/* MVFFStruct -- MVFF (Manual Variable First Fit) pool outer structure + * + * The signature is placed at the end, see + * + * + * The MVFF pool outer structure is declared here because it is the + * control pool structure which is inlined in the arena. Normally, + * pool outer structures are declared with the pools. + */ + +#define MVFFSig ((Sig)0x5193FFF9) /* SIGnature MVFF */ + +typedef struct MVFFStruct { /* MVFF pool outer structure */ + PoolStruct poolStruct; /* generic structure */ + LocusPrefStruct locusPrefStruct; /* the preferences for allocation */ + Size extendBy; /* size to extend pool by */ + Size avgSize; /* client estimate of allocation size */ + double spare; /* spare space fraction, see MVFFReduce */ + MFSStruct cbsBlockPoolStruct; /* stores blocks for CBSs */ + CBSStruct totalCBSStruct; /* all memory allocated from the arena */ + CBSStruct freeCBSStruct; /* free memory (primary) */ + FreelistStruct flStruct; /* free memory (secondary, for emergencies) */ + FailoverStruct foStruct; /* free memory (fail-over mechanism) */ + Bool firstFit; /* as opposed to last fit */ + Bool slotHigh; /* prefers high part of large block */ + Sig sig; /* design.mps.sig.field.end.outer */ +} MVFFStruct; + + +/* ArenaStruct -- generic arena + * + * See . + */ + +#define ArenaSig ((Sig)0x519A6E4A) /* SIGnature ARENA */ + +typedef struct mps_arena_s { + InstStruct instStruct; + + GlobalsStruct globals; /* must be first, see */ + Serial serial; + + Bool poolReady; /* */ + MVFFStruct controlPoolStruct; /* */ + + Size reserved; /* total reserved address space */ + Size committed; /* total committed memory */ + Size commitLimit; /* client-configurable commit limit */ + + Size spareCommitted; /* amount of memory in hysteresis fund */ + double spare; /* maximum spareCommitted/committed */ + double pauseTime; /* maximum pause time, in seconds */ + + Shift zoneShift; /* see also */ + Size grainSize; /* */ + + Tract lastTract; /* most recently allocated tract */ + Addr lastTractBase; /* base address of lastTract */ + + Chunk primary; /* the primary chunk */ + RingStruct chunkRing; /* all the chunks, in a ring for iteration */ + Tree chunkTree; /* all the chunks, in a tree for fast lookup */ + Serial chunkSerial; /* next chunk number */ + + Bool hasFreeLand; /* Is freeLand available? */ + MFSStruct freeCBSBlockPoolStruct; + CBSStruct freeLandStruct; + ZoneSet freeZones; /* zones not yet allocated */ + Bool zoned; /* use zoned allocation? */ + + /* locus fields */ + GenDescStruct topGen; /* generation descriptor for dynamic gen */ + Serial genSerial; /* serial of next generation */ + + /* format fields */ + RingStruct formatRing; /* ring of formats attached to arena */ + Serial formatSerial; /* serial of next format */ + + /* message fields , */ + RingStruct messageRing; /* ring of pending messages */ + BT enabledMessageTypes; /* map of which types are enabled */ + Count droppedMessages; /* */ + + /* finalization fields , */ + Bool isFinalPool; /* indicator for finalPool */ + Pool finalPool; /* either NULL or an MRG pool */ + + /* thread fields */ + RingStruct threadRing; /* ring of attached threads */ + RingStruct deadRing; /* ring of dead threads */ + Serial threadSerial; /* serial of next thread */ + + ShieldStruct shieldStruct; + + /* trace fields */ + TraceSet busyTraces; /* set of running traces */ + TraceSet flippedTraces; /* set of running and flipped traces */ + TraceStruct trace[TraceLIMIT]; /* trace structures. See + */ + + /* trace ancillary fields */ + TraceStartMessage tsMessage[TraceLIMIT]; /* */ + TraceMessage tMessage[TraceLIMIT]; /* */ + + /* policy fields */ + double tracedWork; + double tracedTime; + Clock lastWorldCollect; + + RingStruct greyRing[RankLIMIT]; /* ring of grey segments at each rank */ + RingStruct chainRing; /* ring of chains */ + + struct HistoryStruct historyStruct; + + Bool emergency; /* garbage collect in emergency mode? */ + + /* Stack scanning -- see */ + void *stackWarm; /* NULL or stack pointer warmer than + mutator state. */ + + Sig sig; /* design.mps.sig.field.end.outer */ +} ArenaStruct; + + +typedef struct AllocPatternStruct { + char dummy; +} AllocPatternStruct; + + +#endif /* mpmst_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h new file mode 100644 index 00000000000..c219e908e06 --- /dev/null +++ b/mps/code/mpmtypes.h @@ -0,0 +1,475 @@ +/* mpmtypes.h: MEMORY POOL MANAGER TYPES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2001 Global Graphics Software. + * + * .design: + * + * .rationale: Types and type constants are almost all defined + * in this header, in advance of any declarations of prototypes + * or structures. This avoids difficulties in defining recursive + * data structures. + */ + +#ifndef mpmtypes_h +#define mpmtypes_h + +#include "config.h" /* this must come first: it defines target options */ +#include "misc.h" /* miscellaneous non-specific bits and bobs */ +#include "mpslib.h" +#include "mpstd.h" /* for MPS_T_ULONGEST */ + +#include +#include + + +/* TYPES */ + +typedef unsigned long Sig; /* design.mps.sig */ +typedef int Res; /* */ + +typedef void (*Fun)(void); /* */ +typedef MPS_T_WORD Word; /* */ +typedef unsigned char Byte; /* */ +typedef struct AddrStruct *Addr; /* */ +typedef const struct AddrStruct *ReadonlyAddr; /* */ +typedef Word Size; /* */ +typedef Word Count; /* */ +typedef Word Index; /* */ +typedef Word Align; /* */ +typedef Word Work; /* */ +typedef unsigned Shift; /* */ +typedef unsigned Serial; /* */ +typedef Addr Ref; /* */ +typedef void *Pointer; /* */ +typedef Word Clock; /* */ +typedef MPS_T_ULONGEST ULongest; /* */ + +typedef mps_arg_s ArgStruct; +typedef mps_arg_s *Arg; +typedef mps_arg_s *ArgList; +typedef mps_key_t Key; + +typedef Word RefSet; /* */ +typedef Word ZoneSet; /* */ +typedef unsigned Rank; /* */ +typedef unsigned RankSet; /* */ +typedef unsigned RootMode; /* */ +typedef Size Epoch; /* */ +typedef unsigned TraceId; /* */ +typedef unsigned TraceSet; /* */ +typedef unsigned TraceState; /* */ +typedef unsigned TraceStartWhy; /* */ +typedef unsigned AccessSet; /* */ +typedef unsigned Attr; /* */ +typedef unsigned RootVar; /* */ + +typedef Word *BT; /* */ +typedef struct BootBlockStruct *BootBlock; /* */ +typedef struct BufferStruct *Buffer; /* */ +typedef struct SegBufStruct *SegBuf; /* */ +typedef struct BufferClassStruct *BufferClass; /* */ +typedef unsigned BufferMode; /* */ +typedef struct mps_fmt_s *Format; /* */ +typedef struct LockStruct *Lock; /* * */ +typedef struct mps_pool_s *Pool; /* */ +typedef Pool AbstractPool; +typedef struct mps_pool_class_s *PoolClass; /* */ +typedef struct TraceStruct *Trace; /* */ +typedef struct ScanStateStruct *ScanState; /* */ +typedef struct mps_chain_s *Chain; /* */ +typedef struct TractStruct *Tract; /* */ +typedef struct ChunkStruct *Chunk; /* */ +typedef struct ChunkCacheEntryStruct *ChunkCacheEntry; /* */ +typedef union PageUnion *Page; /* */ +typedef struct SegStruct *Seg; /* */ +typedef struct GCSegStruct *GCSeg; /* */ +typedef struct SegClassStruct *SegClass; /* */ +typedef struct LocusPrefStruct *LocusPref; /* , */ +typedef unsigned LocusPrefKind; /* , */ +typedef struct mps_arena_class_s *ArenaClass; /* */ +typedef struct mps_arena_s *Arena; /* */ +typedef Arena AbstractArena; +typedef struct GlobalsStruct *Globals; /* */ +typedef struct VMStruct *VM; /* * */ +typedef struct RootStruct *Root; /* */ +typedef struct mps_thr_s *Thread; /* * */ +typedef struct MutatorContextStruct *MutatorContext; /* */ +typedef struct PoolDebugMixinStruct *PoolDebugMixin; +typedef struct AllocPatternStruct *AllocPattern; +typedef struct AllocFrameStruct *AllocFrame; /* */ +typedef struct StackContextStruct *StackContext; +typedef struct RangeStruct *Range; /* */ +typedef struct RangeTreeStruct *RangeTree; +typedef struct LandStruct *Land; /* */ +typedef struct LandClassStruct *LandClass; /* */ +typedef unsigned FindDelete; /* */ +typedef struct ShieldStruct *Shield; /* */ +typedef struct HistoryStruct *History; /* */ +typedef struct PoolGenStruct *PoolGen; /* */ + + +/* Arena*Method -- see */ + +typedef void (*ArenaVarargsMethod)(ArgStruct args[], va_list varargs); +typedef Res (*ArenaCreateMethod)(Arena *arenaReturn, ArgList args); +typedef void (*ArenaDestroyMethod)(Arena arena); +typedef Res (*ArenaInitMethod)(Arena arena, Size grainSize, ArgList args); +typedef Size (*ArenaPurgeSpareMethod)(Arena arena, Size size); +typedef Res (*ArenaExtendMethod)(Arena arena, Addr base, Size size); +typedef Res (*ArenaGrowMethod)(Arena arena, LocusPref pref, Size size); +typedef void (*ArenaFreeMethod)(Addr base, Size size, Pool pool); +typedef Res (*ArenaChunkInitMethod)(Chunk chunk, BootBlock boot); +typedef void (*ArenaChunkFinishMethod)(Chunk chunk); +typedef void (*ArenaCompactMethod)(Arena arena, Trace trace); +typedef Res (*ArenaPagesMarkAllocatedMethod)(Arena arena, Chunk chunk, + Index baseIndex, Count pages, + Pool pool); +typedef Bool (*ArenaChunkPageMappedMethod)(Chunk chunk, Index index); + + +/* These are not generally exposed and public, but are part of a commercial + extension to the MPS. */ +typedef void (*ArenaVMExtendedCallback)(Arena arena, Addr base, Size size); +typedef void (*ArenaVMContractedCallback)(Arena arena, Addr base, Size size); + + +/* TraceFixMethod */ + +typedef Res (*TraceFixMethod)(ScanState ss, Ref *refIO); + + +/* Heap Walker */ + +/* This type is used by the PoolClass method Walk */ +typedef void (*FormattedObjectsVisitor)(Addr obj, Format fmt, Pool pool, + void *v, size_t s); + +/* This type is used by the PoolClass method Walk */ +typedef void (*FreeBlockVisitor)(Addr base, Addr limit, Pool pool, void *p); + + +/* Seg*Method -- see */ + +typedef Res (*SegInitMethod)(Seg seg, Pool pool, Addr base, Size size, + ArgList args); +typedef void (*SegSetGreyMethod)(Seg seg, TraceSet grey); +typedef void (*SegFlipMethod)(Seg seg, Trace trace); +typedef void (*SegSetWhiteMethod)(Seg seg, TraceSet white); +typedef void (*SegSetRankSetMethod)(Seg seg, RankSet rankSet); +typedef void (*SegSetRankSummaryMethod)(Seg seg, RankSet rankSet, + RefSet summary); +typedef void (*SegSetSummaryMethod)(Seg seg, RefSet summary); +typedef Bool (*SegBufferMethod)(Buffer *bufferReturn, Seg seg); +typedef void (*SegSetBufferMethod)(Seg seg, Buffer buffer); +typedef void (*SegUnsetBufferMethod)(Seg seg); +typedef Bool (*SegBufferFillMethod)(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet); +typedef void (*SegBufferEmptyMethod)(Seg seg, Buffer buffer); +typedef Res (*SegMergeMethod)(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit); +typedef Res (*SegSplitMethod)(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit); +typedef Res (*SegAccessMethod)(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context); +typedef Res (*SegWhitenMethod)(Seg seg, Trace trace); +typedef void (*SegGreyenMethod)(Seg seg, Trace trace); +typedef void (*SegBlackenMethod)(Seg seg, TraceSet traceSet); +typedef Res (*SegScanMethod)(Bool *totalReturn, Seg seg, ScanState ss); +typedef Res (*SegFixMethod)(Seg seg, ScanState ss, Ref *refIO); +typedef void (*SegReclaimMethod)(Seg seg, Trace trace); +typedef void (*SegWalkMethod)(Seg seg, Format format, FormattedObjectsVisitor f, + void *v, size_t s); + + +/* Buffer*Method -- see */ + +typedef void (*BufferVarargsMethod)(ArgStruct args[], va_list varargs); +typedef Res (*BufferInitMethod)(Buffer buffer, Pool pool, Bool isMutator, ArgList args); +typedef void (*BufferAttachMethod)(Buffer buffer, Addr base, Addr limit, + Addr init, Size size); +typedef void (*BufferDetachMethod)(Buffer buffer); +typedef Seg (*BufferSegMethod)(Buffer buffer); +typedef RankSet (*BufferRankSetMethod)(Buffer buffer); +typedef void (*BufferSetRankSetMethod)(Buffer buffer, RankSet rankSet); +typedef void (*BufferReassignSegMethod)(Buffer buffer, Seg seg); + + +/* Pool*Method -- see */ + +/* Order of types corresponds to PoolClassStruct in */ + +typedef void (*PoolVarargsMethod)(ArgStruct args[], va_list varargs); +typedef Res (*PoolInitMethod)(Pool pool, Arena arena, PoolClass klass, ArgList args); +typedef Res (*PoolAllocMethod)(Addr *pReturn, Pool pool, Size size); +typedef void (*PoolFreeMethod)(Pool pool, Addr old, Size size); +typedef PoolGen (*PoolSegPoolGenMethod)(Pool pool, Seg seg); +typedef Res (*PoolBufferFillMethod)(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size); +typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer); +typedef void (*PoolRampBeginMethod)(Pool pool, Buffer buf, Bool collectAll); +typedef void (*PoolRampEndMethod)(Pool pool, Buffer buf); +typedef Res (*PoolFramePushMethod)(AllocFrame *frameReturn, + Pool pool, Buffer buf); +typedef Res (*PoolFramePopMethod)(Pool pool, Buffer buf, + AllocFrame frame); +typedef Res (*PoolAddrObjectMethod)(Addr *pReturn, Pool pool, Addr addr); +typedef void (*PoolFreeWalkMethod)(Pool pool, FreeBlockVisitor f, void *p); +typedef BufferClass (*PoolBufferClassMethod)(void); +typedef PoolDebugMixin (*PoolDebugMixinMethod)(Pool pool); +typedef Size (*PoolSizeMethod)(Pool pool); + + +/* Messages + * + * + */ + +typedef unsigned MessageType; +typedef struct mps_message_s *Message; +typedef struct MessageClassStruct *MessageClass; + +/* Message*Method -- */ + +typedef void (*MessageDeleteMethod)(Message message); +typedef void (*MessageFinalizationRefMethod) + (Ref *refReturn, Arena arena, Message message); +typedef Size (*MessageGCLiveSizeMethod)(Message message); +typedef Size (*MessageGCCondemnedSizeMethod)(Message message); +typedef Size (*MessageGCNotCondemnedSizeMethod)(Message message); +typedef const char * (*MessageGCStartWhyMethod)(Message message); + +/* Message Types -- and elsewhere */ + +typedef struct TraceStartMessageStruct *TraceStartMessage; +typedef struct TraceMessageStruct *TraceMessage; /* trace end */ + + +/* Land*Method -- see */ + +typedef Res (*LandInitMethod)(Land land, Arena arena, Align alignment, ArgList args); +typedef Size (*LandSizeMethod)(Land land); +typedef Res (*LandInsertMethod)(Range rangeReturn, Land land, Range range); +typedef Res (*LandDeleteMethod)(Range rangeReturn, Land land, Range range); +typedef Bool (*LandVisitor)(Land land, Range range, void *closure); +typedef Bool (*LandDeleteVisitor)(Bool *deleteReturn, Land land, Range range, void *closure); +typedef Bool (*LandIterateMethod)(Land land, LandVisitor visitor, void *closure); +typedef Bool (*LandIterateAndDeleteMethod)(Land land, LandDeleteVisitor visitor, void *closure); +typedef Bool (*LandFindMethod)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +typedef Res (*LandFindInZonesMethod)(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high); + + +/* CONSTANTS */ + + +/* design.mps.sig */ +#define SigInvalid ((Sig)0x51915BAD) /* SIGnature IS BAD */ + +#define SizeMAX ((Size)-1) +#define AccessSetEMPTY ((AccessSet)0) /* */ +#define AccessREAD ((AccessSet)(1<<0)) +#define AccessWRITE ((AccessSet)(1<<1)) +#define AccessLIMIT (2) +#define RefSetEMPTY BS_EMPTY(RefSet) +#define RefSetUNIV BS_UNIV(RefSet) +#define ZoneSetEMPTY BS_EMPTY(ZoneSet) +#define ZoneSetUNIV BS_UNIV(ZoneSet) +#define ZoneShiftUNSET ((Shift)-1) +#define TraceSetEMPTY BS_EMPTY(TraceSet) +#define TraceSetUNIV ((TraceSet)((1u << TraceLIMIT) - 1)) +#define RankSetEMPTY BS_EMPTY(RankSet) +#define RankSetUNIV ((RankSet)((1u << RankLIMIT) - 1)) +#define AttrGC ((Attr)(1<<0)) +#define AttrMOVINGGC ((Attr)(1<<1)) +#define AttrMASK (AttrGC | AttrMOVINGGC) + + +/* Locus preferences */ +enum { + LocusPrefHIGH = 1, + LocusPrefLOW, + LocusPrefZONESET, + LocusPrefLIMIT +}; + + +/* Buffer modes */ +#define BufferModeATTACHED ((BufferMode)(1<<0)) +#define BufferModeFLIPPED ((BufferMode)(1<<1)) +#define BufferModeLOGGED ((BufferMode)(1<<2)) +#define BufferModeTRANSITION ((BufferMode)(1<<3)) + + +/* Rank constants -- see */ +/* These definitions must match . */ +/* This is checked by . */ + +#define RANK_LIST(X) X(AMBIG) X(EXACT) X(FINAL) X(WEAK) + +enum { +#define X(RANK) Rank ## RANK, + RANK_LIST(X) +#undef X + RankLIMIT, + RankMIN = 0 +}; + + +/* Root Modes -- not implemented */ +/* .rm: Synchronize with . */ +/* This comment exists as a placeholder for when root modes are */ +/* implemented. */ + +#define RootModeCONSTANT ((RootMode)1<<0) +#define RootModePROTECTABLE ((RootMode)1<<1) +#define RootModePROTECTABLE_INNER ((RootMode)1<<2) + + +/* Root Variants -- see + * + * .rootvar: Synchronize with + */ + +enum { + RootFUN, + RootAREA, + RootAREA_TAGGED, + RootTHREAD, + RootTHREAD_TAGGED, + RootFMT, + RootLIMIT +}; + + +/* .result-codes: Result Codes -- see */ + +_mps_ENUM_DEF(_mps_RES_ENUM, Res) + + +/* TraceStates -- see */ + +enum { + TraceINIT = 1, + TraceUNFLIPPED, + TraceFLIPPED, + TraceRECLAIM, + TraceFINISHED +}; + + +/* TraceStart reasons: the trigger that caused a trace to start. */ +/* Make these specific trigger names, not broad categories; */ +/* and if a new trigger is added, add a new reason. */ +/* TODO: A better way for MPS extensions to extend the list of reasons + instead of the catch-all TraceStartWhyEXTENSION. */ + +#define TRACE_START_WHY_LIST(X) \ + X(CHAIN_GEN0CAP, "gen 0 capacity", \ + "Generation 0 of a chain has reached capacity: start a minor " \ + "collection.") \ + X(DYNAMICCRITERION, "dynamic criterion", \ + "Need to start full collection now, or there won't be enough " \ + "memory (ArenaAvail) to complete it.") \ + X(OPPORTUNISM, "opportunism", \ + "Opportunism: client predicts plenty of idle time, so start full " \ + "collection.") \ + X(CLIENTFULL_INCREMENTAL, "full incremental", \ + "Client requests: start incremental full collection now.") \ + X(CLIENTFULL_BLOCK, "full", \ + "Client requests: immediate full collection.") \ + X(WALK, "walk", "Walking all live objects.") \ + X(EXTENSION, "extension", \ + "Extension: an MPS extension started the trace.") + +enum { +#define X(WHY, SHORT, LONG) TraceStartWhy ## WHY, + TRACE_START_WHY_LIST(X) +#undef X + TraceStartWhyLIMIT +}; + + +/* MessageTypes -- see */ +/* .message.types: Keep in sync with */ + +enum { + MessageTypeFINALIZATION, /* MPS_MESSAGE_TYPE_FINALIZATION */ + MessageTypeGC, /* MPS_MESSAGE_TYPE_GC = trace end */ + MessageTypeGCSTART, /* MPS_MESSAGE_TYPE_GC_START */ + MessageTypeLIMIT /* not a message type, the limit of the enum. */ +}; + + +/* FindDelete operations -- see */ + +enum { + FindDeleteNONE = 1, /* don't delete after finding */ + FindDeleteLOW, /* delete size bytes from low end of block */ + FindDeleteHIGH, /* delete size bytes from high end of block */ + FindDeleteENTIRE, /* delete entire range */ + FindDeleteLIMIT /* not a FindDelete operation; the limit of the enum. */ +}; + + +/* Types for WriteF formats */ +/* These should be used with calls to WriteF. */ +/* These must be unpromotable types. */ + +typedef Addr WriteFA; +typedef Pointer WriteFP; +typedef const char *WriteFS; +typedef Word WriteFW; +typedef ULongest WriteFU; +typedef ULongest WriteFB; +typedef void (*WriteFF)(void); +typedef int WriteFC; /* Promoted */ +typedef double WriteFD; + + +/* STATISTIC_DECL -- declare a field to accumulate statistics in + * + * The argument is a field declaration (a struct-declaration minus the + * semicolon) for a single field (no commas). + */ + +#if defined(STATISTICS) +#define STATISTIC_DECL(field) field; +#elif defined(STATISTICS_NONE) +#define STATISTIC_DECL(field) +#else +#error "No statistics configured." +#endif + + +#endif /* mpmtypes_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mps.c b/mps/code/mps.c new file mode 100644 index 00000000000..e7872b3c14a --- /dev/null +++ b/mps/code/mps.c @@ -0,0 +1,294 @@ +/* mps.c: MEMORY POOL SYSTEM ALL-IN-ONE TRANSLATION UNIT + * + * $Id$ + * Copyright (C) 2012-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This file can be compiled to create the complete MPS library in + * a single compilation, allowing the compiler to apply global optimizations + * and inlining effectively. On most modern compilers this is also faster + * than compiling each file separately. + * + * .purpose.universal: This file also allows simple building of a macOS + * "universal" (multiple architecture) binary when the set of source files + * differs by architecture. It may work for other platforms in a similar + * manner. + * + * .rule.simple: This file should never be more than a simple list of + * includes of other source code, with ifdefs for platform configuration, + * which closely mirror those in the makefiles. + */ + + +/* Platform interface + * + * This must be included first as it defines symbols which affect system + * headers, such as _POSIX_C_SOURCE _REENTRANT etc. + */ + +#include "mpstd.h" + + +/* MPM Core */ + +#include "mpsi.c" +#include "mpm.c" +#include "arenavm.c" +#include "arenacl.c" +#include "arena.c" +#include "global.c" +#include "locus.c" +#include "tract.c" +#include "walk.c" +#include "protocol.c" +#include "pool.c" +#include "poolabs.c" +#include "trace.c" +#include "traceanc.c" +#include "scan.c" +#include "root.c" +#include "seg.c" +#include "format.c" +#include "buffer.c" +#include "ref.c" +#include "bt.c" +#include "ring.c" +#include "shield.c" +#include "ld.c" +#include "event.c" +#include "sac.c" +#include "message.c" +#include "poolmrg.c" +#include "poolmfs.c" +#include "dbgpool.c" +#include "dbgpooli.c" +#include "boot.c" +#include "meter.c" +#include "tree.c" +#include "rangetree.c" +#include "splay.c" +#include "cbs.c" +#include "ss.c" +#include "version.c" +#include "table.c" +#include "arg.c" +#include "abq.c" +#include "range.c" +#include "freelist.c" +#include "sa.c" +#include "nailboard.c" +#include "land.c" +#include "failover.c" +#include "vm.c" +#include "policy.c" +#include "trans.c" + +/* Additional pool classes */ + +#include "poolamc.c" +#include "poolams.c" +#include "poolawl.c" +#include "poollo.c" +#include "poolsnc.c" +#include "poolmv2.c" +#include "poolmvff.c" + +/* ANSI Plinth */ + +#if defined(PLINTH) /* see CONFIG_PLINTH_NONE in config.h */ +#include "mpsliban.c" +#include "mpsioan.c" +#endif + +/* Generic ("ANSI") platform */ + +#if defined(PLATFORM_ANSI) + +#include "lockan.c" /* generic locks */ +#include "than.c" /* generic threads manager */ +#include "vman.c" /* malloc-based pseudo memory mapping */ +#include "protan.c" /* generic memory protection */ +#include "prmcan.c" /* generic operating system mutator context */ +#include "prmcanan.c" /* generic architecture mutator context */ +#include "span.c" /* generic stack probe */ + +/* macOS on ARM64 built with Clang */ + +#elif defined(MPS_PF_XCA6LL) + +#include "lockix.c" /* Posix locks */ +#include "thxc.c" /* macOS Mach threading */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protxc.c" /* macOS Mach exception handling */ +#include "prmcanan.c" /* generic architecture mutator context */ +#include "prmcxc.c" /* macOS mutator context */ +#include "prmcxca6.c" /* ARM64 for macOS mutator context */ +#include "span.c" /* generic stack probe */ + +/* macOS on IA-32 built with Clang or GCC */ + +#elif defined(MPS_PF_XCI3LL) || defined(MPS_PF_XCI3GC) + +#include "lockix.c" /* Posix locks */ +#include "thxc.c" /* macOS Mach threading */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protxc.c" /* macOS Mach exception handling */ +#include "prmci3.c" /* IA-32 mutator context */ +#include "prmcxc.c" /* macOS mutator context */ +#include "prmcxci3.c" /* IA-32 for macOS mutator context */ +#include "span.c" /* generic stack probe */ + +/* macOS on x86-64 build with Clang or GCC */ + +#elif defined(MPS_PF_XCI6LL) || defined(MPS_PF_XCI6GC) + +#include "lockix.c" /* Posix locks */ +#include "thxc.c" /* macOS Mach threading */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protxc.c" /* macOS Mach exception handling */ +#include "prmci6.c" /* x86-64 mutator context */ +#include "prmcxc.c" /* macOS mutator context */ +#include "prmcxci6.c" /* x86-64 for macOS mutator context */ +#include "span.c" /* generic stack probe */ + +/* FreeBSD on IA-32 built with GCC or Clang */ + +#elif defined(MPS_PF_FRI3GC) || defined(MPS_PF_FRI3LL) + +#include "lockix.c" /* Posix locks */ +#include "thix.c" /* Posix threading */ +#include "pthrdext.c" /* Posix thread extensions */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protsgix.c" /* Posix signal handling */ +#include "prmcanan.c" /* generic architecture mutator context */ +#include "prmcix.c" /* Posix mutator context */ +#include "prmcfri3.c" /* IA-32 for FreeBSD mutator context */ +#include "span.c" /* generic stack probe */ + +/* FreeBSD on x86-64 built with GCC or Clang */ + +#elif defined(MPS_PF_FRI6GC) || defined(MPS_PF_FRI6LL) + +#include "lockix.c" /* Posix locks */ +#include "thix.c" /* Posix threading */ +#include "pthrdext.c" /* Posix thread extensions */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protsgix.c" /* Posix signal handling */ +#include "prmcanan.c" /* generic architecture mutator context */ +#include "prmcix.c" /* Posix mutator context */ +#include "prmcfri6.c" /* x86-64 for FreeBSD mutator context */ +#include "span.c" /* generic stack probe */ + +/* Linux on ARM64 with GCC or Clang */ + +#elif defined(MPS_PF_LIA6GC) || defined(MPS_PF_LIA6LL) + +#include "lockix.c" /* Posix locks */ +#include "thix.c" /* Posix threading */ +#include "pthrdext.c" /* Posix thread extensions */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protsgix.c" /* Posix signal handling */ +#include "prmcanan.c" /* generic architecture mutator context */ +#include "prmcix.c" /* Posix mutator context */ +#include "prmclia6.c" /* x86-64 for Linux mutator context */ +#include "span.c" /* generic stack probe */ + +/* Linux on IA-32 with GCC */ + +#elif defined(MPS_PF_LII3GC) + +#include "lockix.c" /* Posix locks */ +#include "thix.c" /* Posix threading */ +#include "pthrdext.c" /* Posix thread extensions */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protsgix.c" /* Posix signal handling */ +#include "prmci3.c" /* IA-32 mutator context */ +#include "prmcix.c" /* Posix mutator context */ +#include "prmclii3.c" /* IA-32 for Linux mutator context */ +#include "span.c" /* generic stack probe */ + +/* Linux on x86-64 with GCC or Clang */ + +#elif defined(MPS_PF_LII6GC) || defined(MPS_PF_LII6LL) + +#include "lockix.c" /* Posix locks */ +#include "thix.c" /* Posix threading */ +#include "pthrdext.c" /* Posix thread extensions */ +#include "vmix.c" /* Posix virtual memory */ +#include "protix.c" /* Posix protection */ +#include "protsgix.c" /* Posix signal handling */ +#include "prmci6.c" /* x86-64 mutator context */ +#include "prmcix.c" /* Posix mutator context */ +#include "prmclii6.c" /* x86-64 for Linux mutator context */ +#include "span.c" /* generic stack probe */ + +/* Windows on IA-32 with Microsoft Visual Studio or Pelles C */ + +#elif defined(MPS_PF_W3I3MV) || defined(MPS_PF_W3I3PC) + +#include "lockw3.c" /* Windows locks */ +#include "thw3.c" /* Windows threading */ +#include "vmw3.c" /* Windows virtual memory */ +#include "protw3.c" /* Windows protection */ +#include "prmci3.c" /* IA-32 mutator context */ +#include "prmcw3.c" /* Windows mutator context */ +#include "prmcw3i3.c" /* Windows on IA-32 mutator context */ +#include "spw3i3.c" /* Windows on IA-32 stack probe */ +#include "mpsiw3.c" /* Windows interface layer extras */ + +/* Windows on x86-64 with Microsoft Visual Studio or Pelles C */ + +#elif defined(MPS_PF_W3I6MV) || defined(MPS_PF_W3I6PC) + +#include "lockw3.c" /* Windows locks */ +#include "thw3.c" /* Windows threading */ +#include "vmw3.c" /* Windows virtual memory */ +#include "protw3.c" /* Windows protection */ +#include "prmci6.c" /* x86-64 mutator context */ +#include "prmcw3.c" /* Windows mutator context */ +#include "prmcw3i6.c" /* Windows on x86-64 mutator context */ +#include "spw3i6.c" /* Windows on x86-64 stack probe */ +#include "mpsiw3.c" /* Windows interface layer extras */ + +#else + +#error "Unknown platform -- can't determine platform specific parts." + +#endif + + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2012-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mps.h b/mps/code/mps.h new file mode 100644 index 00000000000..700c41462d5 --- /dev/null +++ b/mps/code/mps.h @@ -0,0 +1,893 @@ +/* mps.h: RAVENBROOK MEMORY POOL SYSTEM C INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * THIS HEADER IS NOT DOCUMENTATION. + * Please refer to the [MPS Manual](../manual/). + * + * But if you are a human reading this, please note: + * + * .naming: The MPS interface only uses identifiers beginning `mps_`, + * `MPS_` or `_mps_` and may use any identifiers with these prefixes in + * future. + * + * .naming.internal: Any identifier beginning with an underscore is for + * internal use within the interface and may change or be withdrawn without + * warning. + * + * .readership: compilers, MPS developers. + * + * .sources: [The design of the MPS Interface to C](../design/interface-c). + */ + +#ifndef mps_h +#define mps_h + +#include +#include +#include + + +/* Platform Dependencies + * + * We went for over ten years without any platform ifdefs in this header. + * Then Microsoft made unsigned long shorter than a pointer on Win64. Ugh. + */ + +#if defined(_MSC_VER) && defined(_WIN32) && defined(_WIN64) && defined(_M_X64) +typedef unsigned __int64 mps_word_t; +#else +typedef unsigned long mps_word_t; +#endif + + +/* Abstract Types */ + +typedef struct mps_arena_s *mps_arena_t; /* arena */ +typedef struct mps_arena_class_s *mps_arena_class_t; /* arena class */ +typedef struct mps_pool_s *mps_pool_t; /* pool */ +typedef struct mps_chain_s *mps_chain_t; /* chain */ +typedef struct mps_fmt_s *mps_fmt_t; /* object format */ +typedef struct mps_root_s *mps_root_t; /* root */ +typedef struct mps_pool_class_s *mps_pool_class_t; /* pool class */ +typedef mps_pool_class_t mps_class_t; /* deprecated alias */ +typedef struct mps_thr_s *mps_thr_t; /* thread registration */ +typedef struct mps_ap_s *mps_ap_t; /* allocation point */ +typedef struct mps_ld_s *mps_ld_t; /* location dependency */ +typedef struct mps_ss_s *mps_ss_t; /* scan state */ +typedef struct mps_message_s + *mps_message_t; /* message */ +typedef struct mps_alloc_pattern_s + *mps_alloc_pattern_t; /* allocation patterns */ +typedef struct mps_frame_s + *mps_frame_t; /* allocation frames */ +typedef const struct mps_key_s *mps_key_t; /* argument key */ + +/* Concrete Types */ + +typedef int mps_bool_t; /* boolean (int) */ +typedef int mps_res_t; /* result code (int) */ +typedef void *mps_addr_t; /* managed address (void *) */ +typedef size_t mps_align_t; /* alignment (size_t) */ +typedef unsigned mps_rm_t; /* root mode (unsigned) */ +typedef unsigned mps_rank_t; /* ranks (unsigned) */ +typedef unsigned mps_message_type_t; /* message type (unsigned) */ +typedef mps_word_t mps_clock_t; /* processor time */ +typedef mps_word_t mps_label_t; /* telemetry label */ + +/* Result Codes */ + +#define _mps_RES_ENUM(R, X) \ + R(X, OK, "success (always zero)") \ + R(X, FAIL, "unspecified failure") \ + R(X, RESOURCE, "unable to obtain resources") \ + R(X, MEMORY, "unable to obtain memory") \ + R(X, LIMIT, "limitation reached") \ + R(X, UNIMPL, "unimplemented facility") \ + R(X, IO, "system I/O error") \ + R(X, COMMIT_LIMIT, "arena commit limit exceeded") \ + R(X, PARAM, "illegal user parameter value") + +#define _mps_ENUM_DEF_ROW(prefix, ident, doc) prefix##ident, +#define _mps_ENUM_DEF(REL, prefix) \ + enum { \ + REL(_mps_ENUM_DEF_ROW, prefix) \ + _mps_##prefix##LIMIT \ + }; +_mps_ENUM_DEF(_mps_RES_ENUM, MPS_RES_) + +/* Format and Root Method Types */ +/* see */ +/* see */ + +typedef struct mps_scan_tag_s *mps_scan_tag_t; +typedef struct mps_scan_tag_s { + mps_word_t mask; + mps_word_t pattern; +} mps_scan_tag_s; + +typedef mps_res_t (*mps_root_scan_t)(mps_ss_t, void *, size_t); +typedef mps_res_t (*mps_area_scan_t)(mps_ss_t, void *, void *, void *); +typedef mps_res_t (*mps_fmt_scan_t)(mps_ss_t, mps_addr_t, mps_addr_t); +typedef mps_res_t (*mps_reg_scan_t)(mps_ss_t, mps_thr_t, + void *, size_t); +typedef mps_addr_t (*mps_fmt_skip_t)(mps_addr_t); +typedef void (*mps_fmt_copy_t)(mps_addr_t, mps_addr_t); +typedef void (*mps_fmt_fwd_t)(mps_addr_t, mps_addr_t); +typedef mps_addr_t (*mps_fmt_isfwd_t)(mps_addr_t); +typedef void (*mps_fmt_pad_t)(mps_addr_t, size_t); +typedef mps_addr_t (*mps_fmt_class_t)(mps_addr_t); + +/* Callbacks indicating that the arena has extended or contracted. + * These are used to register chunks with RtlInstallFunctionTableCallback + * + * so that the client can unwind the stack through functions in the arena. + */ +typedef void (*mps_arena_extended_t)(mps_arena_t, void *, size_t); +typedef void (*mps_arena_contracted_t)(mps_arena_t, void *, size_t); + +/* Keyword argument lists */ + +typedef void (*mps_fun_t)(void); + +typedef struct mps_arg_s { + mps_key_t key; + union { + mps_bool_t b; + char c; + const char *string; + int i; + unsigned u; + long l; + unsigned long ul; + float f; + double d; + size_t size; + mps_fun_t fun; + mps_addr_t addr; + mps_fmt_t format; + mps_chain_t chain; + struct mps_pool_debug_option_s *pool_debug_options; + mps_addr_t (*addr_method)(mps_addr_t); + mps_align_t align; + mps_word_t count; + void *p; + mps_rank_t rank; + mps_fmt_scan_t fmt_scan; + mps_fmt_skip_t fmt_skip; + mps_fmt_fwd_t fmt_fwd; + mps_fmt_isfwd_t fmt_isfwd; + mps_fmt_pad_t fmt_pad; + mps_fmt_class_t fmt_class; + mps_pool_t pool; + } val; +} mps_arg_s; + +extern const struct mps_key_s _mps_key_ARGS_END; +#define MPS_KEY_ARGS_END (&_mps_key_ARGS_END) +extern mps_arg_s mps_args_none[]; + +extern const struct mps_key_s _mps_key_ARENA_GRAIN_SIZE; +#define MPS_KEY_ARENA_GRAIN_SIZE (&_mps_key_ARENA_GRAIN_SIZE) +#define MPS_KEY_ARENA_GRAIN_SIZE_FIELD size +extern const struct mps_key_s _mps_key_ARENA_SIZE; +#define MPS_KEY_ARENA_SIZE (&_mps_key_ARENA_SIZE) +#define MPS_KEY_ARENA_SIZE_FIELD size +extern const struct mps_key_s _mps_key_ARENA_ZONED; +#define MPS_KEY_ARENA_ZONED (&_mps_key_ARENA_ZONED) +#define MPS_KEY_ARENA_ZONED_FIELD b +extern const struct mps_key_s _mps_key_arena_extended; +#define MPS_KEY_ARENA_EXTENDED (&_mps_key_arena_extended) +#define MPS_KEY_ARENA_EXTENDED_FIELD fun +extern const struct mps_key_s _mps_key_arena_contracted; +#define MPS_KEY_ARENA_CONTRACTED (&_mps_key_arena_contracted) +#define MPS_KEY_ARENA_CONTRACTED_FIELD fun +extern const struct mps_key_s _mps_key_FORMAT; +#define MPS_KEY_FORMAT (&_mps_key_FORMAT) +#define MPS_KEY_FORMAT_FIELD format +extern const struct mps_key_s _mps_key_CHAIN; +#define MPS_KEY_CHAIN (&_mps_key_CHAIN) +#define MPS_KEY_CHAIN_FIELD chain +extern const struct mps_key_s _mps_key_GEN; +#define MPS_KEY_GEN (&_mps_key_GEN) +#define MPS_KEY_GEN_FIELD u +extern const struct mps_key_s _mps_key_RANK; +#define MPS_KEY_RANK (&_mps_key_RANK) +#define MPS_KEY_RANK_FIELD rank +extern const struct mps_key_s _mps_key_COMMIT_LIMIT; +#define MPS_KEY_COMMIT_LIMIT (&_mps_key_COMMIT_LIMIT) +#define MPS_KEY_COMMIT_LIMIT_FIELD size +extern const struct mps_key_s _mps_key_SPARE_COMMIT_LIMIT; +#define MPS_KEY_SPARE_COMMIT_LIMIT (&_mps_key_SPARE_COMMIT_LIMIT) +#define MPS_KEY_SPARE_COMMIT_LIMIT_FIELD size +extern const struct mps_key_s _mps_key_PAUSE_TIME; +#define MPS_KEY_PAUSE_TIME (&_mps_key_PAUSE_TIME) +#define MPS_KEY_PAUSE_TIME_FIELD d + +extern const struct mps_key_s _mps_key_EXTEND_BY; +#define MPS_KEY_EXTEND_BY (&_mps_key_EXTEND_BY) +#define MPS_KEY_EXTEND_BY_FIELD size +extern const struct mps_key_s _mps_key_LARGE_SIZE; +#define MPS_KEY_LARGE_SIZE (&_mps_key_LARGE_SIZE) +#define MPS_KEY_LARGE_SIZE_FIELD size +extern const struct mps_key_s _mps_key_MIN_SIZE; +#define MPS_KEY_MIN_SIZE (&_mps_key_MIN_SIZE) +#define MPS_KEY_MIN_SIZE_FIELD size +extern const struct mps_key_s _mps_key_MEAN_SIZE; +#define MPS_KEY_MEAN_SIZE (&_mps_key_MEAN_SIZE) +#define MPS_KEY_MEAN_SIZE_FIELD size +extern const struct mps_key_s _mps_key_MAX_SIZE; +#define MPS_KEY_MAX_SIZE (&_mps_key_MAX_SIZE) +#define MPS_KEY_MAX_SIZE_FIELD size +extern const struct mps_key_s _mps_key_ALIGN; +#define MPS_KEY_ALIGN (&_mps_key_ALIGN) +#define MPS_KEY_ALIGN_FIELD align +extern const struct mps_key_s _mps_key_SPARE; +#define MPS_KEY_SPARE (&_mps_key_SPARE) +#define MPS_KEY_SPARE_FIELD d +extern const struct mps_key_s _mps_key_INTERIOR; +#define MPS_KEY_INTERIOR (&_mps_key_INTERIOR) +#define MPS_KEY_INTERIOR_FIELD b + +extern const struct mps_key_s _mps_key_VMW3_TOP_DOWN; +#define MPS_KEY_VMW3_TOP_DOWN (&_mps_key_VMW3_TOP_DOWN) +#define MPS_KEY_VMW3_TOP_DOWN_FIELD b + +extern const struct mps_key_s _mps_key_FMT_ALIGN; +#define MPS_KEY_FMT_ALIGN (&_mps_key_FMT_ALIGN) +#define MPS_KEY_FMT_ALIGN_FIELD align +extern const struct mps_key_s _mps_key_FMT_HEADER_SIZE; +#define MPS_KEY_FMT_HEADER_SIZE (&_mps_key_FMT_HEADER_SIZE) +#define MPS_KEY_FMT_HEADER_SIZE_FIELD size +extern const struct mps_key_s _mps_key_FMT_SCAN; +#define MPS_KEY_FMT_SCAN (&_mps_key_FMT_SCAN) +#define MPS_KEY_FMT_SCAN_FIELD fmt_scan +extern const struct mps_key_s _mps_key_FMT_SKIP; +#define MPS_KEY_FMT_SKIP (&_mps_key_FMT_SKIP) +#define MPS_KEY_FMT_SKIP_FIELD fmt_skip +extern const struct mps_key_s _mps_key_FMT_FWD; +#define MPS_KEY_FMT_FWD (&_mps_key_FMT_FWD) +#define MPS_KEY_FMT_FWD_FIELD fmt_fwd +extern const struct mps_key_s _mps_key_FMT_ISFWD; +#define MPS_KEY_FMT_ISFWD (&_mps_key_FMT_ISFWD) +#define MPS_KEY_FMT_ISFWD_FIELD fmt_isfwd +extern const struct mps_key_s _mps_key_FMT_PAD; +#define MPS_KEY_FMT_PAD (&_mps_key_FMT_PAD) +#define MPS_KEY_FMT_PAD_FIELD fmt_pad +extern const struct mps_key_s _mps_key_FMT_CLASS; +#define MPS_KEY_FMT_CLASS (&_mps_key_FMT_CLASS) +#define MPS_KEY_FMT_CLASS_FIELD fmt_class +extern const struct mps_key_s _mps_key_ap_hash_arrays; +#define MPS_KEY_AP_HASH_ARRAYS (&_mps_key_ap_hash_arrays) +#define MPS_KEY_AP_HASH_ARRAYS_FIELD b + +/* Maximum length of a keyword argument list. */ +#define MPS_ARGS_MAX 32 + +extern void _mps_args_set_key(mps_arg_s args[MPS_ARGS_MAX], unsigned i, + mps_key_t key); + +#define MPS_ARGS_BEGIN(_var) \ + MPS_BEGIN \ + mps_arg_s _var[MPS_ARGS_MAX]; \ + unsigned _var##_i = 0; \ + _mps_args_set_key(_var, _var##_i, MPS_KEY_ARGS_END); \ + MPS_BEGIN + +#define MPS_ARGS_ADD_FIELD(_var, _key, _field, _val) \ + MPS_BEGIN \ + _mps_args_set_key(_var, _var##_i, _key); \ + _var[_var##_i].val._field = (_val); \ + ++_var##_i; \ + _mps_args_set_key(_var, _var##_i, MPS_KEY_ARGS_END); \ + MPS_END + +#define MPS_ARGS_ADD(_var, _key, _val) \ + MPS_ARGS_ADD_FIELD(_var, _key, _key##_FIELD, _val) + +#define MPS_ARGS_DONE(_var) \ + MPS_BEGIN \ + _mps_args_set_key(_var, _var##_i, MPS_KEY_ARGS_END); \ + _var##_i = MPS_ARGS_MAX; \ + MPS_END + +#define MPS_ARGS_END(_var) \ + MPS_END; \ + MPS_END + + +/* Keep in sync with + * */ +/* Not meant to be used by clients, they should use the macros below. */ +enum { + _mps_MESSAGE_TYPE_FINALIZATION, + _mps_MESSAGE_TYPE_GC, + _mps_MESSAGE_TYPE_GC_START +}; + +/* Message Types + * This is what clients should use. */ +#define mps_message_type_finalization() _mps_MESSAGE_TYPE_FINALIZATION +#define mps_message_type_gc() _mps_MESSAGE_TYPE_GC +#define mps_message_type_gc_start() _mps_MESSAGE_TYPE_GC_START + + +/* Reference Ranks + * + * See protocol.mps.reference. */ + +extern mps_rank_t mps_rank_ambig(void); +extern mps_rank_t mps_rank_exact(void); +extern mps_rank_t mps_rank_weak(void); + + +/* Root Modes */ +/* .rm: Keep in sync with */ + +#define MPS_RM_CONST (((mps_rm_t)1<<0)) +#define MPS_RM_PROT (((mps_rm_t)1<<1)) +#define MPS_RM_PROT_INNER (((mps_rm_t)1<<1)) + + +/* Allocation Point */ + +typedef struct mps_ap_s { /* allocation point descriptor */ + mps_addr_t init; /* limit of initialized memory */ + mps_addr_t alloc; /* limit of allocated memory */ + mps_addr_t limit; /* limit of available memory */ +} mps_ap_s; + + +/* Segregated-fit Allocation Caches */ +/* .sac: Keep in sync with . */ + +typedef struct _mps_sac_s *mps_sac_t; + +#define MPS_SAC_CLASS_LIMIT ((size_t)8) + +typedef struct _mps_sac_freelist_block_s { + size_t _size; + size_t _count; + size_t _count_max; + mps_addr_t _blocks; +} _mps_sac_freelist_block_s; + +typedef struct _mps_sac_s { + size_t _middle; + mps_bool_t _trapped; + _mps_sac_freelist_block_s _freelists[2 * MPS_SAC_CLASS_LIMIT]; +} _mps_sac_s; + +/* .sacc: Keep in sync with . */ +typedef struct mps_sac_class_s { + size_t mps_block_size; + size_t mps_cached_count; + unsigned mps_frequency; +} mps_sac_class_s; + +#define mps_sac_classes_s mps_sac_class_s + + +/* Location Dependency */ +/* .ld: Keep in sync with . */ + +typedef struct mps_ld_s { /* location dependency descriptor */ + mps_word_t _epoch, _rs; +} mps_ld_s; + + +/* Scan State */ +/* .ss: See also . */ + +typedef struct mps_ss_s { + mps_word_t _zs, _w, _ufs; +} mps_ss_s; + + +/* Format Variants */ + +typedef struct mps_fmt_A_s { + mps_align_t align; + mps_fmt_scan_t scan; + mps_fmt_skip_t skip; + mps_fmt_copy_t copy; + mps_fmt_fwd_t fwd; + mps_fmt_isfwd_t isfwd; + mps_fmt_pad_t pad; +} mps_fmt_A_s; +typedef struct mps_fmt_A_s *mps_fmt_A_t; +/* type-name mps_fmt_A_t is deprecated: use mps_fmt_A_s* instead */ + +typedef struct mps_fmt_B_s { + mps_align_t align; + mps_fmt_scan_t scan; + mps_fmt_skip_t skip; + mps_fmt_copy_t copy; + mps_fmt_fwd_t fwd; + mps_fmt_isfwd_t isfwd; + mps_fmt_pad_t pad; + mps_fmt_class_t mps_class; +} mps_fmt_B_s; +typedef struct mps_fmt_B_s *mps_fmt_B_t; +/* type-name mps_fmt_B_t is deprecated: use mps_fmt_B_s* instead */ + + +typedef struct mps_fmt_auto_header_s { + mps_align_t align; + mps_fmt_scan_t scan; + mps_fmt_skip_t skip; + mps_fmt_fwd_t fwd; + mps_fmt_isfwd_t isfwd; + mps_fmt_pad_t pad; + size_t mps_headerSize; +} mps_fmt_auto_header_s; + + +typedef struct mps_fmt_fixed_s { + mps_align_t align; + mps_fmt_scan_t scan; + mps_fmt_fwd_t fwd; + mps_fmt_isfwd_t isfwd; + mps_fmt_pad_t pad; +} mps_fmt_fixed_s; + + +/* Internal Definitions */ + +#define MPS_BEGIN do { +#define MPS_END } while(0) +/* MPS_END might cause compiler warnings about constant conditionals. + * This could be avoided with some loss of efficiency by replacing 0 + * with a variable always guaranteed to be 0. In Visual C, the + * warning can be turned off using: + * #pragma warning(disable: 4127) + */ + + +/* arenas */ + +extern void mps_arena_clamp(mps_arena_t); +extern void mps_arena_release(mps_arena_t); +extern void mps_arena_park(mps_arena_t); +extern void mps_arena_postmortem(mps_arena_t); +extern mps_res_t mps_arena_start_collect(mps_arena_t); +extern mps_res_t mps_arena_collect(mps_arena_t); +extern mps_bool_t mps_arena_step(mps_arena_t, double, double); + +extern mps_res_t mps_arena_create(mps_arena_t *, mps_arena_class_t, ...); +extern mps_res_t mps_arena_create_v(mps_arena_t *, mps_arena_class_t, va_list); +extern mps_res_t mps_arena_create_k(mps_arena_t *, mps_arena_class_t, + mps_arg_s []); +extern void mps_arena_destroy(mps_arena_t); + +extern size_t mps_arena_reserved(mps_arena_t); +extern size_t mps_arena_committed(mps_arena_t); +extern size_t mps_arena_spare_committed(mps_arena_t); + +extern size_t mps_arena_commit_limit(mps_arena_t); +extern mps_res_t mps_arena_commit_limit_set(mps_arena_t, size_t); +extern double mps_arena_spare(mps_arena_t); +extern void mps_arena_spare_set(mps_arena_t, double); +extern void mps_arena_spare_commit_limit_set(mps_arena_t, size_t); +extern size_t mps_arena_spare_commit_limit(mps_arena_t); + +extern double mps_arena_pause_time(mps_arena_t); +extern void mps_arena_pause_time_set(mps_arena_t, double); + +extern mps_bool_t mps_arena_busy(mps_arena_t); +extern mps_bool_t mps_arena_has_addr(mps_arena_t, mps_addr_t); +extern mps_bool_t mps_addr_pool(mps_pool_t *, mps_arena_t, mps_addr_t); +extern mps_bool_t mps_addr_fmt(mps_fmt_t *, mps_arena_t, mps_addr_t); + +/* Client memory arenas */ +extern mps_res_t mps_arena_extend(mps_arena_t, mps_addr_t, size_t); +#if 0 +/* There's no implementation for this function. */ +extern mps_res_t mps_arena_retract(mps_arena_t, mps_addr_t, size_t); +#endif + + +/* Object Formats */ + +extern mps_res_t mps_fmt_create_k(mps_fmt_t *, mps_arena_t, mps_arg_s []); +extern mps_res_t mps_fmt_create_A(mps_fmt_t *, mps_arena_t, + mps_fmt_A_s *); +extern mps_res_t mps_fmt_create_B(mps_fmt_t *, mps_arena_t, + mps_fmt_B_s *); +extern mps_res_t mps_fmt_create_auto_header(mps_fmt_t *, mps_arena_t, + mps_fmt_auto_header_s *); +extern mps_res_t mps_fmt_create_fixed(mps_fmt_t *, mps_arena_t, + mps_fmt_fixed_s *); +extern void mps_fmt_destroy(mps_fmt_t); + + +/* Pools */ + +extern mps_res_t mps_pool_create(mps_pool_t *, mps_arena_t, + mps_pool_class_t, ...); +extern mps_res_t mps_pool_create_v(mps_pool_t *, mps_arena_t, + mps_pool_class_t, va_list); +extern mps_res_t mps_pool_create_k(mps_pool_t *, mps_arena_t, + mps_pool_class_t, mps_arg_s []); +extern void mps_pool_destroy(mps_pool_t); +extern size_t mps_pool_total_size(mps_pool_t); +extern size_t mps_pool_free_size(mps_pool_t); +extern mps_res_t mps_pool_walk(mps_pool_t, mps_area_scan_t, void *); + + +/* Chains */ + +/* .gen-param: This structure must match . */ +typedef struct mps_gen_param_s { + size_t mps_capacity; + double mps_mortality; +} mps_gen_param_s; + +extern mps_res_t mps_chain_create(mps_chain_t *, mps_arena_t, + size_t, mps_gen_param_s *); +extern void mps_chain_destroy(mps_chain_t); + + +/* Manual Allocation */ + +extern mps_res_t mps_alloc(mps_addr_t *, mps_pool_t, size_t); +extern void mps_free(mps_pool_t, mps_addr_t, size_t); + + +/* Allocation Points */ + +extern mps_res_t mps_ap_create(mps_ap_t *, mps_pool_t, ...); +extern mps_res_t mps_ap_create_v(mps_ap_t *, mps_pool_t, va_list); +extern mps_res_t mps_ap_create_k(mps_ap_t *, mps_pool_t, mps_arg_s []); +extern void mps_ap_destroy(mps_ap_t); + +extern mps_res_t (mps_reserve)(mps_addr_t *, mps_ap_t, size_t); +extern mps_bool_t (mps_commit)(mps_ap_t, mps_addr_t, size_t); + +extern mps_res_t mps_ap_fill(mps_addr_t *, mps_ap_t, size_t); + +extern mps_res_t (mps_ap_frame_push)(mps_frame_t *, mps_ap_t); +extern mps_res_t (mps_ap_frame_pop)(mps_ap_t, mps_frame_t); + +extern mps_bool_t mps_ap_trip(mps_ap_t, mps_addr_t, size_t); + +extern mps_alloc_pattern_t mps_alloc_pattern_ramp(void); +extern mps_alloc_pattern_t mps_alloc_pattern_ramp_collect_all(void); +extern mps_res_t mps_ap_alloc_pattern_begin(mps_ap_t, mps_alloc_pattern_t); +extern mps_res_t mps_ap_alloc_pattern_end(mps_ap_t, mps_alloc_pattern_t); +extern mps_res_t mps_ap_alloc_pattern_reset(mps_ap_t); + + +/* Segregated-fit Allocation Caches */ + +extern mps_res_t mps_sac_create(mps_sac_t *, mps_pool_t, size_t, + mps_sac_classes_s *); +extern void mps_sac_destroy(mps_sac_t); +extern mps_res_t mps_sac_alloc(mps_addr_t *, mps_sac_t, size_t, mps_bool_t); +extern void mps_sac_free(mps_sac_t, mps_addr_t, size_t); +extern void mps_sac_flush(mps_sac_t); + +/* Direct access to mps_sac_fill and mps_sac_empty is not supported. */ +extern mps_res_t mps_sac_fill(mps_addr_t *, mps_sac_t, size_t, mps_bool_t); +extern void mps_sac_empty(mps_sac_t, mps_addr_t, size_t); + +#define MPS_SAC_ALLOC_FAST(res_o, p_o, sac, size, unused) \ + MPS_BEGIN \ + size_t _mps_i, _mps_s; \ + \ + _mps_s = (size); \ + if (_mps_s > (sac)->_middle) { \ + _mps_i = 0; \ + while (_mps_s > (sac)->_freelists[_mps_i]._size) \ + _mps_i += 2; \ + } else { \ + _mps_i = 1; \ + while (_mps_s <= (sac)->_freelists[_mps_i]._size) \ + _mps_i += 2; \ + } \ + if ((sac)->_freelists[_mps_i]._count != 0) { \ + (p_o) = (sac)->_freelists[_mps_i]._blocks; \ + (sac)->_freelists[_mps_i]._blocks = *(mps_addr_t *)(p_o); \ + --(sac)->_freelists[_mps_i]._count; \ + (res_o) = MPS_RES_OK; \ + } else \ + (res_o) = mps_sac_fill(&(p_o), sac, _mps_s, unused); \ + MPS_END + +#define MPS_SAC_FREE_FAST(sac, p, size) \ + MPS_BEGIN \ + size_t _mps_i, _mps_s; \ + \ + _mps_s = (size); \ + if (_mps_s > (sac)->_middle) { \ + _mps_i = 0; \ + while (_mps_s > (sac)->_freelists[_mps_i]._size) \ + _mps_i += 2; \ + } else { \ + _mps_i = 1; \ + while (_mps_s <= (sac)->_freelists[_mps_i]._size) \ + _mps_i += 2; \ + } \ + if ((sac)->_freelists[_mps_i]._count \ + < (sac)->_freelists[_mps_i]._count_max) { \ + *(mps_addr_t *)(p) = (sac)->_freelists[_mps_i]._blocks; \ + (sac)->_freelists[_mps_i]._blocks = (p); \ + ++(sac)->_freelists[_mps_i]._count; \ + } else \ + mps_sac_empty(sac, p, _mps_s); \ + MPS_END + +/* backward compatibility */ +#define MPS_SAC_ALLOC(res_o, p_o, sac, size, unused) \ + MPS_SAC_ALLOC_FAST(res_o, p_o, sac, size, unused) +#define MPS_SAC_FREE(sac, p, size) MPS_SAC_FREE_FAST(sac, p, size) + + +/* Reserve Macros */ +/* .reserve: Keep in sync with . */ + +#define mps_reserve(_p_o, _mps_ap, _size) \ + ((char *)(_mps_ap)->alloc + (_size) > (char *)(_mps_ap)->alloc && \ + (char *)(_mps_ap)->alloc + (_size) <= (char *)(_mps_ap)->limit ? \ + ((_mps_ap)->alloc = \ + (mps_addr_t)((char *)(_mps_ap)->alloc + (_size)), \ + *(_p_o) = (_mps_ap)->init, \ + MPS_RES_OK) : \ + mps_ap_fill(_p_o, _mps_ap, _size)) + + +#define MPS_RESERVE_BLOCK(_res_v, _p_v, _mps_ap, _size) \ + MPS_BEGIN \ + char *_alloc = (char *)(_mps_ap)->alloc; \ + char *_next = _alloc + (_size); \ + if(_next > _alloc && _next <= (char *)(_mps_ap)->limit) { \ + (_mps_ap)->alloc = (mps_addr_t)_next; \ + (_p_v) = (_mps_ap)->init; \ + (_res_v) = MPS_RES_OK; \ + } else \ + (_res_v) = mps_ap_fill(&(_p_v), _mps_ap, _size); \ + MPS_END + + +/* Commit Macros */ +/* .commit: Keep in sync with . */ + +#define mps_commit(_mps_ap, _p, _size) \ + ((_mps_ap)->init = (_mps_ap)->alloc, \ + (_mps_ap)->limit != 0 || mps_ap_trip(_mps_ap, _p, _size)) + + +/* Root Creation and Destruction */ + +extern mps_res_t mps_root_create(mps_root_t *, mps_arena_t, mps_rank_t, + mps_rm_t, mps_root_scan_t, + void *, size_t); +extern mps_res_t mps_root_create_table(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, + mps_addr_t *, size_t); +extern mps_res_t mps_root_create_table_masked(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, + mps_addr_t *, size_t, + mps_word_t); +extern mps_res_t mps_root_create_area(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, + void *, void *, + mps_area_scan_t, void *); +extern mps_res_t mps_root_create_area_tagged(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, + void *, void *, + mps_area_scan_t, + mps_word_t, mps_word_t); +extern mps_res_t mps_root_create_fmt(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, + mps_fmt_scan_t, mps_addr_t, + mps_addr_t); +extern mps_res_t mps_root_create_reg(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, mps_thr_t, + mps_reg_scan_t, void *, size_t); +extern mps_res_t mps_root_create_thread(mps_root_t *, mps_arena_t, + mps_thr_t, void *); +extern mps_res_t mps_root_create_thread_scanned(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, mps_thr_t, + mps_area_scan_t, + void *, + void *); +extern mps_res_t mps_root_create_thread_tagged(mps_root_t *, mps_arena_t, + mps_rank_t, mps_rm_t, mps_thr_t, + mps_area_scan_t, + mps_word_t, mps_word_t, + void *); +extern void mps_root_destroy(mps_root_t); + +extern mps_res_t mps_stack_scan_ambig(mps_ss_t, mps_thr_t, + void *, size_t); + + +/* Thread Registration */ + +extern mps_res_t mps_thread_reg(mps_thr_t *, mps_arena_t); +extern void mps_thread_dereg(mps_thr_t); + + +/* Location Dependency */ + +extern void mps_ld_reset(mps_ld_t, mps_arena_t); +extern void mps_ld_add(mps_ld_t, mps_arena_t, mps_addr_t); +extern void mps_ld_merge(mps_ld_t, mps_arena_t, mps_ld_t); +extern mps_bool_t mps_ld_isstale(mps_ld_t, mps_arena_t, mps_addr_t); +extern mps_bool_t mps_ld_isstale_any(mps_ld_t, mps_arena_t); + +extern mps_word_t mps_collections(mps_arena_t); + + +/* Messages */ + +extern void mps_message_type_enable(mps_arena_t, mps_message_type_t); +extern void mps_message_type_disable(mps_arena_t, mps_message_type_t); +extern mps_bool_t mps_message_poll(mps_arena_t); +extern mps_bool_t mps_message_queue_type(mps_message_type_t *, mps_arena_t); +extern mps_bool_t mps_message_get(mps_message_t *, + mps_arena_t, mps_message_type_t); +extern void mps_message_discard(mps_arena_t, mps_message_t); + +/* Message Methods */ + +/* -- All Message Types */ +extern mps_message_type_t mps_message_type(mps_arena_t, mps_message_t); +extern mps_clock_t mps_message_clock(mps_arena_t, mps_message_t); + +/* -- mps_message_type_finalization */ +extern void mps_message_finalization_ref(mps_addr_t *, + mps_arena_t, mps_message_t); + +/* -- mps_message_type_gc */ +extern size_t mps_message_gc_live_size(mps_arena_t, mps_message_t); +extern size_t mps_message_gc_condemned_size(mps_arena_t, mps_message_t); +extern size_t mps_message_gc_not_condemned_size(mps_arena_t, + mps_message_t); + +/* -- mps_message_type_gc_start */ +extern const char *mps_message_gc_start_why(mps_arena_t, mps_message_t); + + +/* Finalization */ + +extern mps_res_t mps_finalize(mps_arena_t, mps_addr_t *); +extern mps_res_t mps_definalize(mps_arena_t, mps_addr_t *); + + +/* Telemetry */ + +extern void mps_telemetry_set(mps_word_t); +extern void mps_telemetry_reset(mps_word_t); +extern mps_word_t mps_telemetry_get(void); +extern mps_label_t mps_telemetry_intern(const char *); +extern void mps_telemetry_label(mps_addr_t, mps_label_t); +extern void mps_telemetry_flush(void); + + +/* Heap Walking */ + +typedef void (*mps_formatted_objects_stepper_t)(mps_addr_t, mps_fmt_t, + mps_pool_t, + void *, size_t); +extern void mps_arena_formatted_objects_walk(mps_arena_t, + mps_formatted_objects_stepper_t, + void *, size_t); + + +/* Root Walking */ + +typedef void (*mps_roots_stepper_t)(mps_addr_t *, + mps_root_t, + void *, size_t); +extern void mps_arena_roots_walk(mps_arena_t, + mps_roots_stepper_t, + void *, size_t); + + +/* Allocation debug options */ + + +typedef struct mps_pool_debug_option_s { + const void *fence_template; + size_t fence_size; + const void *free_template; + size_t free_size; +} mps_pool_debug_option_s; + +extern const struct mps_key_s _mps_key_POOL_DEBUG_OPTIONS; +#define MPS_KEY_POOL_DEBUG_OPTIONS (&_mps_key_POOL_DEBUG_OPTIONS) +#define MPS_KEY_POOL_DEBUG_OPTIONS_FIELD pool_debug_options + +extern void mps_pool_check_fenceposts(mps_pool_t); +extern void mps_pool_check_free_space(mps_pool_t); + + +/* Scanner Support */ + +extern mps_res_t mps_scan_area(mps_ss_t, void *, void *, void *); +extern mps_res_t mps_scan_area_masked(mps_ss_t, void *, void *, void *); +extern mps_res_t mps_scan_area_tagged(mps_ss_t, void *, void *, void *); +extern mps_res_t mps_scan_area_tagged_or_zero(mps_ss_t, void *, void *, void *); + +#define MPS_SCAN_BEGIN(ss) \ + MPS_BEGIN \ + mps_ss_t _ss = (ss); \ + mps_word_t _mps_zs = (_ss)->_zs; \ + mps_word_t _mps_w = (_ss)->_w; \ + mps_word_t _mps_ufs = (_ss)->_ufs; \ + mps_word_t _mps_wt; \ + { + +#define MPS_FIX1(ss, ref) \ + (_mps_wt = (mps_word_t)1 << ((mps_word_t)(ref) >> _mps_zs \ + & (sizeof(mps_word_t) * CHAR_BIT - 1)), \ + _mps_ufs |= _mps_wt, \ + (_mps_w & _mps_wt) != 0) + +extern mps_res_t _mps_fix2(mps_ss_t, mps_addr_t *); +#define MPS_FIX2(ss, ref_io) _mps_fix2(ss, ref_io) + +#define MPS_FIX12(ss, ref_io) \ + (MPS_FIX1(ss, *(ref_io)) ? \ + MPS_FIX2(ss, ref_io) : MPS_RES_OK) + +/* MPS_FIX is deprecated */ +#define MPS_FIX(ss, ref_io) MPS_FIX12(ss, ref_io) + +#define MPS_FIX_CALL(ss, call) \ + MPS_BEGIN \ + (call); _mps_ufs |= (ss)->_ufs; \ + MPS_END + +#define MPS_SCAN_END(ss) \ + } \ + (ss)->_ufs = _mps_ufs; \ + MPS_END + +/* Misc interface */ +extern mps_res_t mps_addr_object(mps_addr_t *p_o, mps_arena_t arena, mps_addr_t addr); + +/* Transforms interface. */ + +typedef struct mps_transform_s *mps_transform_t; +extern mps_res_t mps_transform_create(mps_transform_t *, mps_arena_t); +extern mps_res_t mps_transform_add_oldnew(mps_transform_t, mps_addr_t *, mps_addr_t *, size_t); +extern mps_res_t mps_transform_apply(mps_bool_t *, mps_transform_t); +extern void mps_transform_destroy(mps_transform_t); + + +#endif /* mps_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mps.xcodeproj/project.pbxproj b/mps/code/mps.xcodeproj/project.pbxproj new file mode 100644 index 00000000000..e478fd3f269 --- /dev/null +++ b/mps/code/mps.xcodeproj/project.pbxproj @@ -0,0 +1,6796 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXAggregateTarget section */ + 2215A9A9192A47BB00E9E2CE /* testci */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9AD192A47BB00E9E2CE /* Build configuration list for PBXAggregateTarget "testci" */; + buildPhases = ( + 2215A9AC192A47BB00E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9AA192A47BB00E9E2CE /* PBXTargetDependency */, + ); + name = testci; + productName = testrun; + }; + 2215A9B1192A47C500E9E2CE /* testansi */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9B5192A47C500E9E2CE /* Build configuration list for PBXAggregateTarget "testansi" */; + buildPhases = ( + 2215A9B4192A47C500E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9B2192A47C500E9E2CE /* PBXTargetDependency */, + ); + name = testansi; + productName = testrun; + }; + 2215A9B9192A47CE00E9E2CE /* testall */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9BD192A47CE00E9E2CE /* Build configuration list for PBXAggregateTarget "testall" */; + buildPhases = ( + 2215A9BC192A47CE00E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9BA192A47CE00E9E2CE /* PBXTargetDependency */, + ); + name = testall; + productName = testrun; + }; + 2215A9C1192A47D500E9E2CE /* testpollnone */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9C5192A47D500E9E2CE /* Build configuration list for PBXAggregateTarget "testpollnone" */; + buildPhases = ( + 2215A9C4192A47D500E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9C2192A47D500E9E2CE /* PBXTargetDependency */, + ); + name = testpollnone; + productName = testrun; + }; + 22CDE8EF16E9E97D00366D0A /* testrun */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 22CDE8F016E9E97E00366D0A /* Build configuration list for PBXAggregateTarget "testrun" */; + buildPhases = ( + 22CDE8F416E9E9D400366D0A /* ShellScript */, + ); + dependencies = ( + 22CDE92E16E9EB9300366D0A /* PBXTargetDependency */, + ); + name = testrun; + productName = testrun; + }; + 3104AFF1156D37A0000A585A /* all */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 3104AFF2156D37A0000A585A /* Build configuration list for PBXAggregateTarget "all" */; + buildPhases = ( + ); + dependencies = ( + 319F7A192A30D2F000E5B418 /* PBXTargetDependency */, + 3104AFF6156D37BC000A585A /* PBXTargetDependency */, + 3114A644156E94FB001E0AA3 /* PBXTargetDependency */, + 22FACEF1188809B5000FDBC1 /* PBXTargetDependency */, + 3104AFF8156D37BE000A585A /* PBXTargetDependency */, + 3104B004156D37CD000A585A /* PBXTargetDependency */, + 22FA177916E8DB0C0098B23F /* PBXTargetDependency */, + 3104B01D156D398B000A585A /* PBXTargetDependency */, + 3104B02D156D39DF000A585A /* PBXTargetDependency */, + 3104AFFA156D37C1000A585A /* PBXTargetDependency */, + 3114A600156E940A001E0AA3 /* PBXTargetDependency */, + 3104AFFC156D37C3000A585A /* PBXTargetDependency */, + 31D60022156D3CF200337B26 /* PBXTargetDependency */, + 2291A5C0175CAB5F001D4920 /* PBXTargetDependency */, + 3114A677156E961C001E0AA3 /* PBXTargetDependency */, + 3114A612156E943B001E0AA3 /* PBXTargetDependency */, + 22B2BC3D18B643B300C33E63 /* PBXTargetDependency */, + 3114A5CC156E932C001E0AA3 /* PBXTargetDependency */, + 3114A5EA156E93C4001E0AA3 /* PBXTargetDependency */, + 22EA3F4820D2B23F0065F5B6 /* PBXTargetDependency */, + 224CC79D175E187C002FF81B /* PBXTargetDependency */, + 22B2BC3F18B643B700C33E63 /* PBXTargetDependency */, + 3114A65B156E95B4001E0AA3 /* PBXTargetDependency */, + 2231BB6D18CA986B002D6322 /* PBXTargetDependency */, + 31D60034156D3D5A00337B26 /* PBXTargetDependency */, + 2286E4C918F4389E004111E2 /* PBXTargetDependency */, + 2231BB6F18CA986D002D6322 /* PBXTargetDependency */, + 3114A5A0156E915A001E0AA3 /* PBXTargetDependency */, + 3114A6A7156E9739001E0AA3 /* PBXTargetDependency */, + 3104AFFE156D37C6000A585A /* PBXTargetDependency */, + 3104B000156D37C8000A585A /* PBXTargetDependency */, + 3114A68D156E9686001E0AA3 /* PBXTargetDependency */, + 22C2ACB218BE4056006B3677 /* PBXTargetDependency */, + 31D6004F156D3EF700337B26 /* PBXTargetDependency */, + 3114A5B6156E92DC001E0AA3 /* PBXTargetDependency */, + 3104B002156D37CB000A585A /* PBXTargetDependency */, + 22B2BC3918B643AD00C33E63 /* PBXTargetDependency */, + 22B2BC3B18B643B000C33E63 /* PBXTargetDependency */, + 3104B04A156D3AE4000A585A /* PBXTargetDependency */, + 229E228819EAB10D00E21417 /* PBXTargetDependency */, + 31D6009D156D404B00337B26 /* PBXTargetDependency */, + 314CB6EB1C6D272A0073CA42 /* PBXTargetDependency */, + 3114A62E156E94AA001E0AA3 /* PBXTargetDependency */, + 3114A6B9156E9763001E0AA3 /* PBXTargetDependency */, + 31D60063156D3F5C00337B26 /* PBXTargetDependency */, + 31D60087156D3FE600337B26 /* PBXTargetDependency */, + 220FD3F419533E8F00967A35 /* PBXTargetDependency */, + 3114A6D5156E9839001E0AA3 /* PBXTargetDependency */, + 2265D72220E54020003019E8 /* PBXTargetDependency */, + 2D07B9791636FCBD00DB751B /* PBXTargetDependency */, + 2275798916C5422900B662B0 /* PBXTargetDependency */, + ); + name = all; + productName = all; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 220FD3DD195339C000967A35 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 220FD3DE195339C000967A35 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 220FD3DF195339C000967A35 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 220FD3E1195339C000967A35 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 220FD3E3195339C000967A35 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 220FD3F119533E7200967A35 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 220FD3F219533E7900967A35 /* ztfm.c in Sources */ = {isa = PBXBuildFile; fileRef = 220FD3F019533C3200967A35 /* ztfm.c */; }; + 2215A9C9192A495F00E9E2CE /* pooln.c in Sources */ = {isa = PBXBuildFile; fileRef = 22FACEDE18880933000FDBC1 /* pooln.c */; }; + 2231BB5118CA97D8002D6322 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 2231BB5318CA97D8002D6322 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 2231BB5F18CA97DC002D6322 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 2231BB6118CA97DC002D6322 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 2231BB6A18CA984F002D6322 /* locusss.c in Sources */ = {isa = PBXBuildFile; fileRef = 2231BB6918CA983C002D6322 /* locusss.c */; }; + 2231BB6B18CA9861002D6322 /* locbwcss.c in Sources */ = {isa = PBXBuildFile; fileRef = 2231BB6818CA9834002D6322 /* locbwcss.c */; }; + 223E795D19EAB00B00DC26A6 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 223E795F19EAB00B00DC26A6 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 223E796719EAB05C00DC26A6 /* sncss.c in Sources */ = {isa = PBXBuildFile; fileRef = 223E796619EAB04100DC26A6 /* sncss.c */; }; + 224CC791175E1821002FF81B /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 224CC793175E1821002FF81B /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 224CC79F175E321C002FF81B /* mv2test.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A686156E9674001E0AA3 /* mv2test.c */; }; + 224CC7A0175E322C002FF81B /* fotest.c in Sources */ = {isa = PBXBuildFile; fileRef = 224CC79E175E3202002FF81B /* fotest.c */; }; + 22561A9818F4265D00372C66 /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 22561A9918F4266600372C66 /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 22561A9A18F426BB00372C66 /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 22561A9B18F426F300372C66 /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 2265D71720E53F9C003019E8 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 2265D72020E54010003019E8 /* eventpy.c in Sources */ = {isa = PBXBuildFile; fileRef = 2265D71F20E5400F003019E8 /* eventpy.c */; }; + 2291A5B1175CAB2F001D4920 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 2291A5B2175CAB2F001D4920 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 2291A5B3175CAB2F001D4920 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 2291A5B4175CAB2F001D4920 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 2291A5B5175CAB2F001D4920 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 2291A5B7175CAB2F001D4920 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 2291A5BE175CAB4E001D4920 /* awlutth.c in Sources */ = {isa = PBXBuildFile; fileRef = 2291A5A9175CAA9B001D4920 /* awlutth.c */; }; + 2291A5ED175CB5E2001D4920 /* landtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 2291A5E9175CB4EC001D4920 /* landtest.c */; }; + 22B2BC2E18B6434F00C33E63 /* mps.c in Sources */ = {isa = PBXBuildFile; fileRef = 31A47BA3156C1E130039B1C2 /* mps.c */; }; + 22B2BC3718B6437C00C33E63 /* scheme-advanced.c in Sources */ = {isa = PBXBuildFile; fileRef = 22B2BC2B18B6434000C33E63 /* scheme-advanced.c */; }; + 22C2ACA718BE400A006B3677 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 22C2ACA918BE400A006B3677 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 22C2ACB018BE4049006B3677 /* nailboardtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 22C2ACA018BE3FEC006B3677 /* nailboardtest.c */; }; + 22EA3F3D20D2B0D90065F5B6 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 22EA3F3F20D2B0D90065F5B6 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 22EA3F4620D2B0FD0065F5B6 /* forktest.c in Sources */ = {isa = PBXBuildFile; fileRef = 22EA3F3720D2B0730065F5B6 /* forktest.c */; }; + 22F846B518F437B900982BA7 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 22F846B718F437B900982BA7 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 22F846BE18F437D700982BA7 /* lockut.c in Sources */ = {isa = PBXBuildFile; fileRef = 22F846AF18F4379C00982BA7 /* lockut.c */; }; + 22F846BF18F437E000982BA7 /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 22FA176916E8D6FC0098B23F /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 22FA176A16E8D6FC0098B23F /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 22FA176B16E8D6FC0098B23F /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 22FA176C16E8D6FC0098B23F /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 22FA176D16E8D6FC0098B23F /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 22FA176F16E8D6FC0098B23F /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 22FA177716E8D7A80098B23F /* amcssth.c in Sources */ = {isa = PBXBuildFile; fileRef = 22FA177616E8D7A80098B23F /* amcssth.c */; }; + 22FACEE518880983000FDBC1 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 22FACEE718880983000FDBC1 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 22FACEEE188809A3000FDBC1 /* fmtscheme.c in Sources */ = {isa = PBXBuildFile; fileRef = 22FACED6188807FF000FDBC1 /* fmtscheme.c */; }; + 22FACEEF188809A7000FDBC1 /* airtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 22FACED1188807FF000FDBC1 /* airtest.c */; }; + 2D07B97A1636FCCE00DB751B /* eventsql.c in Sources */ = {isa = PBXBuildFile; fileRef = 2D07B96C1636FC7200DB751B /* eventsql.c */; }; + 2D07B97C163705E400DB751B /* libsqlite3.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 2D07B97B163705E400DB751B /* libsqlite3.dylib */; }; + 2D53F2E716515A63009A1829 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 2D604BA516514C4F003AAF46 /* eventtxt.c in Sources */ = {isa = PBXBuildFile; fileRef = 2D604BA416514C4F003AAF46 /* eventtxt.c */; }; + 3104AFBF156D3591000A585A /* apss.c in Sources */ = {isa = PBXBuildFile; fileRef = 3104AFBE156D3591000A585A /* apss.c */; }; + 3104AFC2156D35B2000A585A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3104AFC3156D35C3000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104AFD4156D35F7000A585A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3104AFD5156D35FB000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104AFD8156D3607000A585A /* sacss.c in Sources */ = {isa = PBXBuildFile; fileRef = 3104AFD6156D3602000A585A /* sacss.c */; }; + 3104AFE9156D3690000A585A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3104AFEA156D3697000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104AFEC156D36A5000A585A /* amcsshe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3104AFEB156D36A5000A585A /* amcsshe.c */; }; + 3104AFED156D374A000A585A /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 3104AFEE156D374D000A585A /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3104AFEF156D3753000A585A /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3104AFF0156D3756000A585A /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3104B016156D390B000A585A /* amsss.c in Sources */ = {isa = PBXBuildFile; fileRef = 3104B015156D390B000A585A /* amsss.c */; }; + 3104B017156D3915000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104B018156D3953000A585A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3104B019156D3960000A585A /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3104B01A156D396E000A585A /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3104B01B156D3973000A585A /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3104B02E156D39E2000A585A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3104B031156D39FD000A585A /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 3104B032156D3A00000A585A /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3104B033156D3A05000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104B034156D3A2C000A585A /* amssshe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3104B02F156D39F2000A585A /* amssshe.c */; }; + 3104B035156D3A39000A585A /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3104B036156D3A49000A585A /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3104B04E156D3AFE000A585A /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3104B04F156D3B09000A585A /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3104B050156D3B09000A585A /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3104B051156D3B09000A585A /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 31108A3E1C6B90E900E728EA /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31108A411C6B90E900E728EA /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31108A481C6B911B00E728EA /* tagtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 31108A391C6B90D600E728EA /* tagtest.c */; }; + 3114A59B156E914B001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A59C156E914F001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A5A2156E9168001E0AA3 /* locv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A5A1156E9168001E0AA3 /* locv.c */; }; + 3114A5B1156E92C8001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A5B2156E92CB001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A5B8156E92F1001E0AA3 /* qs.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A5B7156E92F0001E0AA3 /* qs.c */; }; + 3114A5C7156E9322001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A5C8156E9322001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A5CE156E9369001E0AA3 /* finalcv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A5CD156E9369001E0AA3 /* finalcv.c */; }; + 3114A5CF156E9381001E0AA3 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3114A5D0156E9381001E0AA3 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3114A5D1156E9381001E0AA3 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3114A5E0156E93AE001E0AA3 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3114A5E1156E93AE001E0AA3 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3114A5E2156E93AE001E0AA3 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3114A5E3156E93AE001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A5E4156E93AE001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A5E6156E93B9001E0AA3 /* finaltest.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A5E5156E93B9001E0AA3 /* finaltest.c */; }; + 3114A5F9156E93F3001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A5FA156E93F3001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A5FC156E93FC001E0AA3 /* arenacv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A5FB156E93FC001E0AA3 /* arenacv.c */; }; + 3114A615156E944E001E0AA3 /* bttest.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A613156E944A001E0AA3 /* bttest.c */; }; + 3114A616156E9455001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A617156E946B001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A626156E948C001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A627156E9490001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A62A156E949E001E0AA3 /* teletest.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A628156E949A001E0AA3 /* teletest.c */; }; + 3114A63E156E94EA001E0AA3 /* abqtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A63D156E94EA001E0AA3 /* abqtest.c */; }; + 3114A63F156E94F0001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A640156E94F0001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A66E156E95F2001E0AA3 /* btcv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A66C156E95EB001E0AA3 /* btcv.c */; }; + 3114A66F156E95F2001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A670156E95F2001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A672156E95F6001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A673156E95F6001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A688156E967C001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A6A1156E9729001E0AA3 /* messtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A69F156E9725001E0AA3 /* messtest.c */; }; + 3114A6A2156E972D001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A6A3156E972D001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A6BC156E976C001E0AA3 /* walkt0.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A6BA156E9768001E0AA3 /* walkt0.c */; }; + 3114A6BD156E9771001E0AA3 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3114A6BE156E9771001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A6BF156E97B8001E0AA3 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3114A6C0156E97B8001E0AA3 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3114A6C1156E97B8001E0AA3 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3114A6D1156E9829001E0AA3 /* eventcnv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3114A6D0156E9829001E0AA3 /* eventcnv.c */; }; + 3114A6D7156E9923001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3114A6DD156E9A0F001E0AA3 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 311A44F81C8B1EBD00852E2B /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 311A44F91C8B1EC200852E2B /* testthrix.c in Sources */ = {isa = PBXBuildFile; fileRef = 22561A9718F4263300372C66 /* testthrix.c */; }; + 3124CAC3156BE40100753214 /* awlut.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC2156BE40100753214 /* awlut.c */; }; + 3124CAC4156BE40D00753214 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3124CAC5156BE41700753214 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3124CAC8156BE48D00753214 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3124CAC9156BE48D00753214 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3124CACD156BE4C200753214 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3124CADF156BE65900753214 /* mpsicv.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CADE156BE65900753214 /* mpsicv.c */; }; + 3124CAE0156BE66B00753214 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3124CAE1156BE67000753214 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3124CAE2156BE68E00753214 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3124CAE3156BE69B00753214 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3124CAE5156BE6D500753214 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 3124CAE6156BE6F700753214 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3124CAF6156BE81100753214 /* amcss.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAF5156BE81100753214 /* amcss.c */; }; + 3124CAF7156BE82000753214 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 3124CAF8156BE82000753214 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 3124CAF9156BE82000753214 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 3124CAFA156BE82000753214 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 3124CAFB156BE82000753214 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 3124CAFC156BE82900753214 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 3150AE53156ABA2500A6E22A /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 318DA8D31892B27E0089718C /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 319F7A082A30D08500E5B418 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 319F7A0A2A30D08500E5B418 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 319F7A0B2A30D08500E5B418 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 319F7A0C2A30D08500E5B418 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 319F7A0E2A30D08500E5B418 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 319F7A172A30D11400E5B418 /* addrobj.c in Sources */ = {isa = PBXBuildFile; fileRef = 319F7A152A30D11400E5B418 /* addrobj.c */; }; + 31A47BA4156C1E130039B1C2 /* mps.c in Sources */ = {isa = PBXBuildFile; fileRef = 31A47BA3156C1E130039B1C2 /* mps.c */; }; + 31D60007156D3C6200337B26 /* segsmss.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D60006156D3C5F00337B26 /* segsmss.c */; }; + 31D60008156D3C7400337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D60018156D3CC300337B26 /* awluthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D60017156D3CC300337B26 /* awluthe.c */; }; + 31D60019156D3CCC00337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D6001A156D3CDC00337B26 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 31D6001B156D3CDC00337B26 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 31D6001C156D3CDC00337B26 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 31D6001D156D3CDC00337B26 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 31D6001E156D3CDF00337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31D60035156D3DF300337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D60038156D3E3000337B26 /* lockcov.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D60036156D3E0200337B26 /* lockcov.c */; }; + 31D60039156D3E3E00337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31D60048156D3ECF00337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31D60049156D3ED200337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D6004B156D3EE600337B26 /* poolncv.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D6004A156D3EE600337B26 /* poolncv.c */; }; + 31D6005F156D3F4A00337B26 /* zcoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D6005E156D3F4A00337B26 /* zcoll.c */; }; + 31D60060156D3F5000337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D60069156D3F7200337B26 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 31D6006A156D3F7200337B26 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 31D6006B156D3F7200337B26 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 31D6006C156D3F7200337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31D6007D156D3FCF00337B26 /* zmess.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D6007B156D3FCC00337B26 /* zmess.c */; }; + 31D6007E156D3FD700337B26 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 31D6007F156D3FD700337B26 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 31D60080156D3FD700337B26 /* fmthe.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAE4156BE6D500753214 /* fmthe.c */; }; + 31D60081156D3FD700337B26 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 31D60082156D3FD700337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31D60083156D3FDB00337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D6009A156D404000337B26 /* steptest.c in Sources */ = {isa = PBXBuildFile; fileRef = 31D60098156D403C00337B26 /* steptest.c */; }; + 31D6009B156D404400337B26 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; + 31D6009E156D406400337B26 /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 31D6009F156D406400337B26 /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 31D600A0156D406400337B26 /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; + 31D600A1156D406400337B26 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31EEAC75156AB58E00714D05 /* mpmss.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC74156AB58E00714D05 /* mpmss.c */; }; + 31EEAC9F156AB73400714D05 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 31FCAE161769244F008C034C /* mps.c in Sources */ = {isa = PBXBuildFile; fileRef = 31A47BA3156C1E130039B1C2 /* mps.c */; }; + 31FCAE19176924D4008C034C /* scheme.c in Sources */ = {isa = PBXBuildFile; fileRef = 31FCAE18176924D4008C034C /* scheme.c */; }; + 6313D46918A400B200EB03EF /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; + 6313D47318A4028E00EB03EF /* djbench.c in Sources */ = {isa = PBXBuildFile; fileRef = 318DA8CE1892B1210089718C /* djbench.c */; }; + 6313D47418A4029200EB03EF /* gcbench.c in Sources */ = {isa = PBXBuildFile; fileRef = 6313D46618A3FDC900EB03EF /* gcbench.c */; }; + 6313D47518A40C6300EB03EF /* fmtdytst.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC7156BE48D00753214 /* fmtdytst.c */; }; + 6313D47618A40C7B00EB03EF /* fmtdy.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CAC6156BE48D00753214 /* fmtdy.c */; }; + 6313D47718A40D0400EB03EF /* fmtno.c in Sources */ = {isa = PBXBuildFile; fileRef = 3124CACC156BE4C200753214 /* fmtno.c */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 220FD3DA195339C000967A35 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 220FD3F319533E8F00967A35 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 220FD3D8195339C000967A35; + remoteInfo = ztfm; + }; + 2215A9AB192A47BB00E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9B3192A47C500E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9BB192A47CE00E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9C3192A47D500E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2231BB4E18CA97D8002D6322 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 2231BB5C18CA97DC002D6322 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 2231BB6C18CA986B002D6322 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2231BB4C18CA97D8002D6322; + remoteInfo = locbwcss; + }; + 2231BB6E18CA986D002D6322 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2231BB5A18CA97DC002D6322; + remoteInfo = locusss; + }; + 223E795A19EAB00B00DC26A6 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 224CC78E175E1821002FF81B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 224CC79C175E187C002FF81B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 224CC78C175E1821002FF81B; + remoteInfo = mvfftest; + }; + 2265D71320E53F9C003019E8 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 2265D72120E54020003019E8 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2265D71120E53F9C003019E8; + remoteInfo = mpseventpy; + }; + 2275798816C5422900B662B0 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2D604B9B16514B1A003AAF46; + remoteInfo = mpseventtxt; + }; + 2286E4C818F4389E004111E2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22F846B018F437B900982BA7; + remoteInfo = lockut; + }; + 2291A5AE175CAB2F001D4920 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 2291A5BF175CAB5F001D4920 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2291A5AC175CAB2F001D4920; + remoteInfo = awlutth; + }; + 229E228719EAB10D00E21417 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 223E795819EAB00B00DC26A6; + remoteInfo = sncss; + }; + 22B2BC3818B643AD00C33E63 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31FCAE0917692403008C034C; + remoteInfo = scheme; + }; + 22B2BC3A18B643B000C33E63 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22B2BC2C18B6434F00C33E63; + remoteInfo = "scheme-advanced"; + }; + 22B2BC3C18B643B300C33E63 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 318DA8C31892B0F30089718C; + remoteInfo = djbench; + }; + 22B2BC3E18B643B700C33E63 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6313D46718A400B200EB03EF; + remoteInfo = gcbench; + }; + 22C2ACA418BE400A006B3677 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 22C2ACB118BE4056006B3677 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22C2ACA218BE400A006B3677; + remoteInfo = nailboardtest; + }; + 22CDE92D16E9EB9300366D0A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 22EA3F3A20D2B0D90065F5B6 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 22EA3F4720D2B23F0065F5B6 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22EA3F3820D2B0D90065F5B6; + remoteInfo = forktest; + }; + 22F846B218F437B900982BA7 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 22FA176616E8D6FC0098B23F /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 22FA177816E8DB0C0098B23F /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22FA176416E8D6FC0098B23F; + remoteInfo = amcssth; + }; + 22FACEE218880983000FDBC1 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 22FACEF0188809B5000FDBC1 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 22FACEE018880983000FDBC1; + remoteInfo = airtest; + }; + 2D07B9781636FCBD00DB751B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 2D07B9701636FC9900DB751B; + remoteInfo = mpseventsql; + }; + 3104AFC0156D35AE000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104AFD2156D35F2000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104AFE7156D368D000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104AFF5156D37BC000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104AFF7156D37BE000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3124CAEA156BE7F300753214; + remoteInfo = amcss; + }; + 3104AFF9156D37C1000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFB2156D357B000A585A; + remoteInfo = apss; + }; + 3104AFFB156D37C3000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3124CAB7156BE3EC00753214; + remoteInfo = awlut; + }; + 3104AFFD156D37C6000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEAC64156AB52600714D05; + remoteInfo = mpmss; + }; + 3104AFFF156D37C8000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3124CAD3156BE64A00753214; + remoteInfo = mpsicv; + }; + 3104B001156D37CB000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFC7156D35E2000A585A; + remoteInfo = sacss; + }; + 3104B003156D37CD000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFDC156D3681000A585A; + remoteInfo = amcsshe; + }; + 3104B013156D38FA000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104B01C156D398B000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104B008156D38F3000A585A; + remoteInfo = amsss; + }; + 3104B02C156D39DF000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104B021156D39D4000A585A; + remoteInfo = amssshe; + }; + 3104B037156D3A56000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104B047156D3ADE000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3104B049156D3AE4000A585A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104B03C156D3AD7000A585A; + remoteInfo = segsmss; + }; + 31108A3C1C6B90E900E728EA /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A59D156E9156001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A59F156E915A001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A58F156E913C001E0AA3; + remoteInfo = locv; + }; + 3114A5B3156E92D8001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A5B5156E92DC001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A5A6156E92C0001E0AA3; + remoteInfo = qs; + }; + 3114A5C9156E9328001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A5CB156E932C001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A5BC156E9315001E0AA3; + remoteInfo = finalcv; + }; + 3114A5E7156E93BF001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A5E9156E93C4001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A5D5156E93A0001E0AA3; + remoteInfo = finaltest; + }; + 3114A5FD156E9406001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A5FF156E940A001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A5EE156E93E7001E0AA3; + remoteInfo = arenacv; + }; + 3114A60F156E9438001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A611156E943B001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A604156E9430001E0AA3; + remoteInfo = bttest; + }; + 3114A62B156E94A6001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A62D156E94AA001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A61B156E9485001E0AA3; + remoteInfo = teletest; + }; + 3114A641156E94F8001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A643156E94FB001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A632156E94DB001E0AA3; + remoteInfo = abqtest; + }; + 3114A658156E95B1001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A65A156E95B4001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A64B156E9596001E0AA3; + remoteInfo = landtest; + }; + 3114A674156E9619001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A676156E961C001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A661156E95D9001E0AA3; + remoteInfo = btcv; + }; + 3114A68A156E9682001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A68C156E9686001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A67B156E9668001E0AA3; + remoteInfo = mv2test; + }; + 3114A6A4156E9735001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A6A6156E9739001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A694156E971B001E0AA3; + remoteInfo = messtest; + }; + 3114A6B6156E975E001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A6B8156E9763001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A6AB156E9759001E0AA3; + remoteInfo = walkt0; + }; + 3114A6D2156E9834001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 3114A6D4156E9839001E0AA3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3114A6C5156E9815001E0AA3; + remoteInfo = mpseventcnv; + }; + 314CB6EA1C6D272A0073CA42 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31108A3A1C6B90E900E728EA; + remoteInfo = tagtest; + }; + 319F7A062A30D08500E5B418 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 319F7A182A30D2F000E5B418 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 319F7A042A30D08500E5B418; + remoteInfo = addrobj; + }; + 31A47BA9156C210D0039B1C2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31A47BAB156C21120039B1C2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31A47BAD156C21170039B1C2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31A47BAF156C211B0039B1C2 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D6001F156D3CEC00337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D60021156D3CF200337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D6000C156D3CB200337B26; + remoteInfo = awluthe; + }; + 31D60031156D3D5300337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D60033156D3D5A00337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D60026156D3D3E00337B26; + remoteInfo = lockcov; + }; + 31D6004C156D3EF000337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D6004E156D3EF700337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D6003D156D3EC700337B26; + remoteInfo = poolncv; + }; + 31D60062156D3F5C00337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D60053156D3F3500337B26; + remoteInfo = zcoll; + }; + 31D60064156D3F5F00337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D60084156D3FE100337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D60086156D3FE600337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D60070156D3FBC00337B26; + remoteInfo = zmess; + }; + 31D60096156D403500337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31EEABFA156AAF9D00714D05; + remoteInfo = mps; + }; + 31D6009C156D404B00337B26 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 31D6008B156D402900337B26; + remoteInfo = steptest; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 220FD3E4195339C000967A35 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2231BB5418CA97D8002D6322 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2231BB6218CA97DC002D6322 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 223E796019EAB00B00DC26A6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 224CC794175E1821002FF81B /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2265D71820E53F9C003019E8 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2291A5B8175CAB2F001D4920 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22B2BC3118B6434F00C33E63 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22C2ACAA18BE400A006B3677 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22EA3F4020D2B0D90065F5B6 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22F846B818F437B900982BA7 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22FA177016E8D6FC0098B23F /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 22FACEE818880983000FDBC1 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2D07B96F1636FC9900DB751B /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 2D604B9A16514B1A003AAF46 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104AFB1156D357B000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104AFC6156D35E2000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104AFDB156D3681000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104B007156D38F3000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104B020156D39D4000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3104B03B156D3AD7000A585A /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31108A421C6B90E900E728EA /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A58E156E913C001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A5A5156E92C0001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A5BB156E9315001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A5D4156E93A0001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A5ED156E93E7001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A603156E9430001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A61A156E9485001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A631156E94DB001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A64A156E9596001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A660156E95D9001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A67A156E9668001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A693156E971B001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A6AA156E9759001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3114A6C4156E9815001E0AA3 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3124CAB6156BE3EC00753214 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3124CAD2156BE64A00753214 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 3124CAE9156BE7F300753214 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 318DA8C81892B0F30089718C /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 319F7A0F2A30D08500E5B418 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D6000B156D3CB200337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D60025156D3D3E00337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D6003C156D3EC700337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D60052156D3F3500337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D6006F156D3FBC00337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31D6008A156D402900337B26 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31EEAC63156AB52600714D05 /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 31FCAE0817692403008C034C /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; + 6313D46D18A400B200EB03EF /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 2213454C1DB0386600E14202 /* prmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prmc.h; sourceTree = ""; }; + 2213454D1DB038D400E14202 /* prmcxc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = prmcxc.c; sourceTree = ""; }; + 220FD3E9195339C000967A35 /* ztfm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = ztfm; sourceTree = BUILT_PRODUCTS_DIR; }; + 220FD3EA195339E500967A35 /* mpsitr.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpsitr.c; sourceTree = ""; }; + 220FD3EB195339F000967A35 /* trans.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = trans.c; sourceTree = ""; }; + 220FD3ED19533A8700967A35 /* mpscvm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscvm.h; sourceTree = ""; }; + 220FD3EE19533A8700967A35 /* trans.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trans.h; sourceTree = ""; }; + 220FD3F019533C3200967A35 /* ztfm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ztfm.c; sourceTree = ""; }; + 2231BB5918CA97D8002D6322 /* locbwcss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = locbwcss; sourceTree = BUILT_PRODUCTS_DIR; }; + 2231BB6718CA97DC002D6322 /* locusss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = locusss; sourceTree = BUILT_PRODUCTS_DIR; }; + 2231BB6818CA9834002D6322 /* locbwcss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = locbwcss.c; sourceTree = ""; }; + 2231BB6918CA983C002D6322 /* locusss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = locusss.c; sourceTree = ""; }; + 223475CB194CA09500C69128 /* vm.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = vm.c; sourceTree = ""; }; + 223475CC194CA09500C69128 /* vm.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm.h; sourceTree = ""; }; + 2239BB4C20EE2E34007AC917 /* rangetree.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rangetree.c; sourceTree = ""; }; + 2239BB4D20EE2E4D007AC917 /* rangetree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rangetree.h; sourceTree = ""; }; + 223E796519EAB00B00DC26A6 /* sncss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = sncss; sourceTree = BUILT_PRODUCTS_DIR; }; + 223E796619EAB04100DC26A6 /* sncss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sncss.c; sourceTree = ""; }; + 224CC799175E1821002FF81B /* fotest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fotest; sourceTree = BUILT_PRODUCTS_DIR; }; + 224CC79E175E3202002FF81B /* fotest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fotest.c; sourceTree = ""; }; + 22561A9618F4263300372C66 /* testthr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = testthr.h; sourceTree = ""; }; + 22561A9718F4263300372C66 /* testthrix.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = testthrix.c; sourceTree = ""; }; + 2265D71D20E53F9C003019E8 /* mpseventpy */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpseventpy; sourceTree = BUILT_PRODUCTS_DIR; }; + 2265D71F20E5400F003019E8 /* eventpy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = eventpy.c; sourceTree = SOURCE_ROOT; }; + 2291A5A8175CAA51001D4920 /* poolmv2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = poolmv2.h; sourceTree = ""; }; + 2291A5A9175CAA9B001D4920 /* awlutth.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = awlutth.c; sourceTree = ""; }; + 2291A5BD175CAB2F001D4920 /* awlutth */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = awlutth; sourceTree = BUILT_PRODUCTS_DIR; }; + 2291A5E9175CB4EC001D4920 /* landtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = landtest.c; sourceTree = ""; }; + 2291A5EA175CB503001D4920 /* abq.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = abq.h; sourceTree = ""; }; + 2291A5EB175CB53E001D4920 /* range.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = range.c; sourceTree = ""; }; + 2291A5EC175CB53E001D4920 /* range.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = range.h; sourceTree = ""; }; + 2291A5EE175CB768001D4920 /* freelist.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = freelist.c; sourceTree = ""; }; + 2291A5EF175CB768001D4920 /* freelist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = freelist.h; sourceTree = ""; }; + 2291A5F0175CB7A4001D4920 /* testlib.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = testlib.h; sourceTree = ""; }; + 22B2BC2B18B6434000C33E63 /* scheme-advanced.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "scheme-advanced.c"; path = "../example/scheme/scheme-advanced.c"; sourceTree = ""; }; + 22B2BC3618B6434F00C33E63 /* scheme-advanced */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "scheme-advanced"; sourceTree = BUILT_PRODUCTS_DIR; }; + 22C2ACA018BE3FEC006B3677 /* nailboardtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nailboardtest.c; sourceTree = ""; }; + 22C2ACAF18BE400A006B3677 /* nailboardtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = nailboardtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 22C5C99A18EC6AEC004C63D4 /* failover.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = failover.c; sourceTree = ""; }; + 22C5C99B18EC6AEC004C63D4 /* failover.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = failover.h; sourceTree = ""; }; + 22C5C99C18EC6AEC004C63D4 /* land.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = land.c; sourceTree = ""; }; + 22DD93E118ED815F00240DD2 /* failover.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = failover.txt; path = ../design/failover.txt; sourceTree = ""; }; + 22DD93E218ED815F00240DD2 /* land.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = land.txt; path = ../design/land.txt; sourceTree = ""; }; + 22E30E821886FF1400D98EA9 /* nailboard.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = nailboard.c; sourceTree = ""; }; + 22E30E831886FF1400D98EA9 /* nailboard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nailboard.h; sourceTree = ""; }; + 22EA3F3720D2B0730065F5B6 /* forktest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = forktest.c; sourceTree = ""; }; + 22EA3F4520D2B0D90065F5B6 /* forktest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = forktest; sourceTree = BUILT_PRODUCTS_DIR; }; + 22F846AF18F4379C00982BA7 /* lockut.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lockut.c; sourceTree = ""; }; + 22F846BD18F437B900982BA7 /* lockut */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = lockut; sourceTree = BUILT_PRODUCTS_DIR; }; + 22FA177516E8D6FC0098B23F /* amcssth */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amcssth; sourceTree = BUILT_PRODUCTS_DIR; }; + 22FA177616E8D7A80098B23F /* amcssth.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amcssth.c; sourceTree = ""; }; + 22FACED1188807FF000FDBC1 /* airtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = airtest.c; sourceTree = ""; }; + 22FACED2188807FF000FDBC1 /* fmtdy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fmtdy.h; sourceTree = ""; }; + 22FACED3188807FF000FDBC1 /* fmtdytst.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fmtdytst.h; sourceTree = ""; }; + 22FACED4188807FF000FDBC1 /* fmthe.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fmthe.h; sourceTree = ""; }; + 22FACED5188807FF000FDBC1 /* fmtno.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fmtno.h; sourceTree = ""; }; + 22FACED6188807FF000FDBC1 /* fmtscheme.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fmtscheme.c; sourceTree = ""; }; + 22FACED7188807FF000FDBC1 /* fmtscheme.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fmtscheme.h; sourceTree = ""; }; + 22FACEDA1888088A000FDBC1 /* ss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ss.c; sourceTree = ""; }; + 22FACEDB188808D5000FDBC1 /* mpscmfs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscmfs.h; sourceTree = ""; }; + 22FACEDC18880933000FDBC1 /* poolmfs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = poolmfs.h; sourceTree = ""; }; + 22FACEDD18880933000FDBC1 /* poolmrg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = poolmrg.h; sourceTree = ""; }; + 22FACEDE18880933000FDBC1 /* pooln.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pooln.c; sourceTree = ""; }; + 22FACEDF18880933000FDBC1 /* pooln.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pooln.h; sourceTree = ""; }; + 22FACEED18880983000FDBC1 /* airtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = airtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 2D07B96C1636FC7200DB751B /* eventsql.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventsql.c; sourceTree = ""; }; + 2D07B9711636FC9900DB751B /* mpseventsql */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpseventsql; sourceTree = BUILT_PRODUCTS_DIR; }; + 2D07B97B163705E400DB751B /* libsqlite3.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libsqlite3.dylib; path = usr/lib/libsqlite3.dylib; sourceTree = SDKROOT; }; + 2D604B9C16514B1A003AAF46 /* mpseventtxt */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpseventtxt; sourceTree = BUILT_PRODUCTS_DIR; }; + 2D604BA416514C4F003AAF46 /* eventtxt.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = eventtxt.c; sourceTree = ""; }; + 3104AFB3156D357B000A585A /* apss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = apss; sourceTree = BUILT_PRODUCTS_DIR; }; + 3104AFBE156D3591000A585A /* apss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = apss.c; sourceTree = ""; }; + 3104AFC8156D35E2000A585A /* sacss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = sacss; sourceTree = BUILT_PRODUCTS_DIR; }; + 3104AFD6156D3602000A585A /* sacss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sacss.c; sourceTree = ""; }; + 3104AFDD156D3681000A585A /* amcsshe */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amcsshe; sourceTree = BUILT_PRODUCTS_DIR; }; + 3104AFEB156D36A5000A585A /* amcsshe.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amcsshe.c; sourceTree = ""; }; + 3104B009156D38F3000A585A /* amsss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amsss; sourceTree = BUILT_PRODUCTS_DIR; }; + 3104B015156D390B000A585A /* amsss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amsss.c; sourceTree = ""; }; + 3104B022156D39D4000A585A /* amssshe */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amssshe; sourceTree = BUILT_PRODUCTS_DIR; }; + 3104B02F156D39F2000A585A /* amssshe.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amssshe.c; sourceTree = ""; }; + 3104B03D156D3AD7000A585A /* segsmss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = segsmss; sourceTree = BUILT_PRODUCTS_DIR; }; + 3107DC4E173B03D100F705C8 /* arg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = arg.h; sourceTree = ""; }; + 310F5D7118B6675F007EFCBC /* tree.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = tree.c; sourceTree = ""; }; + 310F5D7218B6675F007EFCBC /* tree.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = tree.h; sourceTree = ""; }; + 31108A391C6B90D600E728EA /* tagtest.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = tagtest.c; sourceTree = ""; }; + 31108A471C6B90E900E728EA /* tagtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = tagtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3112ED3A18ABC57F00CC531A /* sa.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sa.h; sourceTree = ""; }; + 3112ED3B18ABC75200CC531A /* sa.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = sa.c; sourceTree = ""; }; + 3114A590156E913C001E0AA3 /* locv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = locv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A5A1156E9168001E0AA3 /* locv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = locv.c; sourceTree = ""; }; + 3114A5A7156E92C0001E0AA3 /* qs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = qs; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A5B7156E92F0001E0AA3 /* qs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = qs.c; sourceTree = ""; }; + 3114A5BD156E9315001E0AA3 /* finalcv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = finalcv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A5CD156E9369001E0AA3 /* finalcv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = finalcv.c; sourceTree = ""; }; + 3114A5D6156E93A0001E0AA3 /* finaltest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = finaltest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A5E5156E93B9001E0AA3 /* finaltest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = finaltest.c; sourceTree = ""; }; + 3114A5EF156E93E7001E0AA3 /* arenacv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = arenacv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A5FB156E93FC001E0AA3 /* arenacv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = arenacv.c; sourceTree = ""; }; + 3114A605156E9430001E0AA3 /* bttest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = bttest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A613156E944A001E0AA3 /* bttest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = bttest.c; sourceTree = ""; }; + 3114A61C156E9485001E0AA3 /* teletest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = teletest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A628156E949A001E0AA3 /* teletest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = teletest.c; sourceTree = ""; }; + 3114A633156E94DB001E0AA3 /* abqtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = abqtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A63D156E94EA001E0AA3 /* abqtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = abqtest.c; sourceTree = ""; }; + 3114A645156E9525001E0AA3 /* abq.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = abq.c; sourceTree = ""; }; + 3114A64C156E9596001E0AA3 /* landtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = landtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A662156E95D9001E0AA3 /* btcv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = btcv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A66C156E95EB001E0AA3 /* btcv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = btcv.c; sourceTree = ""; }; + 3114A67C156E9668001E0AA3 /* mv2test */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mv2test; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A686156E9674001E0AA3 /* mv2test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mv2test.c; sourceTree = ""; }; + 3114A695156E971B001E0AA3 /* messtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = messtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A69F156E9725001E0AA3 /* messtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = messtest.c; sourceTree = ""; }; + 3114A6AC156E9759001E0AA3 /* walkt0 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = walkt0; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A6BA156E9768001E0AA3 /* walkt0.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = walkt0.c; sourceTree = ""; }; + 3114A6C6156E9815001E0AA3 /* mpseventcnv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpseventcnv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A6D0156E9829001E0AA3 /* eventcnv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = eventcnv.c; sourceTree = ""; }; + 31160D921899540D0071EB17 /* abq.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = abq.txt; path = ../design/abq.txt; sourceTree = ""; }; + 31160D931899540D0071EB17 /* alloc-frame.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "alloc-frame.txt"; path = "../design/alloc-frame.txt"; sourceTree = ""; }; + 31160D941899540D0071EB17 /* arena.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = arena.txt; path = ../design/arena.txt; sourceTree = ""; }; + 31160D951899540D0071EB17 /* arenavm.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = arenavm.txt; path = ../design/arenavm.txt; sourceTree = ""; }; + 31160D961899540D0071EB17 /* bt.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = bt.txt; path = ../design/bt.txt; sourceTree = ""; }; + 31160D971899540D0071EB17 /* buffer.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = buffer.txt; path = ../design/buffer.txt; sourceTree = ""; }; + 31160D981899540D0071EB17 /* cbs.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = cbs.txt; path = ../design/cbs.txt; sourceTree = ""; }; + 31160D991899540D0071EB17 /* check.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = check.txt; path = ../design/check.txt; sourceTree = ""; }; + 31160D9A1899540D0071EB17 /* pool.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = pool.txt; path = ../design/pool.txt; sourceTree = ""; }; + 31160D9B1899540D0071EB17 /* collection.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = collection.txt; path = ../design/collection.txt; sourceTree = ""; }; + 31160D9C1899540D0071EB17 /* config.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = config.txt; path = ../design/config.txt; sourceTree = ""; }; + 31160D9D1899540D0071EB17 /* critical-path.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "critical-path.txt"; path = "../design/critical-path.txt"; sourceTree = ""; }; + 31160D9E1899540D0071EB17 /* diag.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = diag.txt; path = ../design/diag.txt; sourceTree = ""; }; + 31160D9F1899540D0071EB17 /* finalize.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = finalize.txt; path = ../design/finalize.txt; sourceTree = ""; }; + 31160DA01899540D0071EB17 /* fix.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = fix.txt; path = ../design/fix.txt; sourceTree = ""; }; + 31160DA11899540D0071EB17 /* freelist.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = freelist.txt; path = ../design/freelist.txt; sourceTree = ""; }; + 31160DA21899540D0071EB17 /* guide.hex.trans.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = guide.hex.trans.txt; path = ../design/guide.hex.trans.txt; sourceTree = ""; }; + 31160DA31899540D0071EB17 /* guide.impl.c.format.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = guide.impl.c.format.txt; path = ../design/guide.impl.c.format.txt; sourceTree = ""; }; + 31160DA41899540D0071EB17 /* index.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = index.txt; path = ../design/index.txt; sourceTree = ""; }; + 31160DA51899540D0071EB17 /* interface-c.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "interface-c.txt"; path = "../design/interface-c.txt"; sourceTree = ""; }; + 31160DA61899540D0071EB17 /* io.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = io.txt; path = ../design/io.txt; sourceTree = ""; }; + 31160DA71899540D0071EB17 /* keyword-arguments.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "keyword-arguments.txt"; path = "../design/keyword-arguments.txt"; sourceTree = ""; }; + 31160DA81899540D0071EB17 /* lib.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = lib.txt; path = ../design/lib.txt; sourceTree = ""; }; + 31160DA91899540D0071EB17 /* lock.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = lock.txt; path = ../design/lock.txt; sourceTree = ""; }; + 31160DAA1899540D0071EB17 /* locus.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = locus.txt; path = ../design/locus.txt; sourceTree = ""; }; + 31160DAB1899540D0071EB17 /* message-gc.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "message-gc.txt"; path = "../design/message-gc.txt"; sourceTree = ""; }; + 31160DAC1899540D0071EB17 /* message.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = message.txt; path = ../design/message.txt; sourceTree = ""; }; + 31160DAD1899540D0071EB17 /* object-debug.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "object-debug.txt"; path = "../design/object-debug.txt"; sourceTree = ""; }; + 31160DAF1899540D0071EB17 /* poolamc.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolamc.txt; path = ../design/poolamc.txt; sourceTree = ""; }; + 31160DB01899540D0071EB17 /* poolams.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolams.txt; path = ../design/poolams.txt; sourceTree = ""; }; + 31160DB11899540D0071EB17 /* poolawl.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolawl.txt; path = ../design/poolawl.txt; sourceTree = ""; }; + 31160DB21899540D0071EB17 /* poollo.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poollo.txt; path = ../design/poollo.txt; sourceTree = ""; }; + 31160DB31899540D0071EB17 /* poolmfs.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolmfs.txt; path = ../design/poolmfs.txt; sourceTree = ""; }; + 31160DB41899540D0071EB17 /* poolmrg.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolmrg.txt; path = ../design/poolmrg.txt; sourceTree = ""; }; + 31160DB61899540D0071EB17 /* poolmvff.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolmvff.txt; path = ../design/poolmvff.txt; sourceTree = ""; }; + 31160DB71899540D0071EB17 /* poolmvt.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = poolmvt.txt; path = ../design/poolmvt.txt; sourceTree = ""; }; + 31160DB81899540D0071EB17 /* prot.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = prot.txt; path = ../design/prot.txt; sourceTree = ""; }; + 31160DBB1899540D0071EB17 /* protocol.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = protocol.txt; path = ../design/protocol.txt; sourceTree = ""; }; + 31160DBD1899540D0071EB17 /* pthreadext.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = pthreadext.txt; path = ../design/pthreadext.txt; sourceTree = ""; }; + 31160DBE1899540D0071EB17 /* range.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = range.txt; path = ../design/range.txt; sourceTree = ""; }; + 31160DC01899540D0071EB17 /* ring.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = ring.txt; path = ../design/ring.txt; sourceTree = ""; }; + 31160DC11899540D0071EB17 /* root.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = root.txt; path = ../design/root.txt; sourceTree = ""; }; + 31160DC21899540D0071EB17 /* scan.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = scan.txt; path = ../design/scan.txt; sourceTree = ""; }; + 31160DC31899540D0071EB17 /* seg.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = seg.txt; path = ../design/seg.txt; sourceTree = ""; }; + 31160DC41899540D0071EB17 /* shield.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = shield.txt; path = ../design/shield.txt; sourceTree = ""; }; + 31160DC51899540D0071EB17 /* sig.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = sig.txt; path = ../design/sig.txt; sourceTree = ""; }; + 31160DC61899540D0071EB17 /* splay.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = splay.txt; path = ../design/splay.txt; sourceTree = ""; }; + 31160DC81899540D0071EB17 /* strategy.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = strategy.txt; path = ../design/strategy.txt; sourceTree = ""; }; + 31160DC91899540D0071EB17 /* telemetry.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = telemetry.txt; path = ../design/telemetry.txt; sourceTree = ""; }; + 31160DCA1899540D0071EB17 /* tests.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = tests.txt; path = ../design/tests.txt; sourceTree = ""; }; + 31160DCB1899540D0071EB17 /* thread-manager.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "thread-manager.txt"; path = "../design/thread-manager.txt"; sourceTree = ""; }; + 31160DCC1899540D0071EB17 /* thread-safety.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "thread-safety.txt"; path = "../design/thread-safety.txt"; sourceTree = ""; }; + 31160DCD1899540D0071EB17 /* trace.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = trace.txt; path = ../design/trace.txt; sourceTree = ""; }; + 31160DCE1899540D0071EB17 /* type.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = type.txt; path = ../design/type.txt; sourceTree = ""; }; + 31160DCF1899540D0071EB17 /* version-library.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "version-library.txt"; path = "../design/version-library.txt"; sourceTree = ""; }; + 31160DD11899540D0071EB17 /* vm.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = vm.txt; path = ../design/vm.txt; sourceTree = ""; }; + 31160DD51899540D0071EB17 /* writef.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = writef.txt; path = ../design/writef.txt; sourceTree = ""; }; + 31172ABA17750F9D009488E5 /* thxc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = thxc.c; sourceTree = ""; }; + 31172ABB177512F6009488E5 /* prmcxci3.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = prmcxci3.c; sourceTree = ""; }; + 31172ABC1775131C009488E5 /* prmcxci6.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = prmcxci6.c; sourceTree = ""; }; + 31172ABE1775164F009488E5 /* prmcxc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = prmcxc.h; sourceTree = ""; }; + 31172AC017752253009488E5 /* protxc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = protxc.c; sourceTree = ""; }; + 311F2F5017398AD500C15B6A /* boot.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = boot.h; sourceTree = ""; }; + 311F2F5117398AE900C15B6A /* bt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bt.h; sourceTree = ""; }; + 311F2F5217398AE900C15B6A /* cbs.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cbs.h; sourceTree = ""; }; + 311F2F5417398AE900C15B6A /* check.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = check.h; sourceTree = ""; }; + 311F2F5517398AE900C15B6A /* clock.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; + 311F2F5617398AE900C15B6A /* config.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = config.h; sourceTree = ""; }; + 311F2F5717398AE900C15B6A /* dbgpool.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dbgpool.h; sourceTree = ""; }; + 311F2F5817398AE900C15B6A /* event.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = event.h; sourceTree = ""; }; + 311F2F5917398AE900C15B6A /* eventcom.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eventcom.h; sourceTree = ""; }; + 311F2F5A17398AE900C15B6A /* eventdef.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eventdef.h; sourceTree = ""; }; + 311F2F5E17398B0E00C15B6A /* lock.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = ""; }; + 311F2F5F17398B0E00C15B6A /* meter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = meter.h; sourceTree = ""; }; + 311F2F6017398B0E00C15B6A /* misc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = misc.h; sourceTree = ""; }; + 311F2F6117398B0E00C15B6A /* mpm.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpm.h; sourceTree = ""; }; + 311F2F6217398B1A00C15B6A /* mpmst.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpmst.h; sourceTree = ""; }; + 311F2F6317398B1A00C15B6A /* mpmtypes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpmtypes.h; sourceTree = ""; }; + 311F2F6417398B1A00C15B6A /* mps.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = mps.h; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; + 311F2F6517398B3B00C15B6A /* mpsacl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpsacl.h; sourceTree = ""; }; + 311F2F6617398B3B00C15B6A /* mpsavm.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpsavm.h; sourceTree = ""; }; + 311F2F6717398B3B00C15B6A /* mpsio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpsio.h; sourceTree = ""; }; + 311F2F6817398B3B00C15B6A /* mpslib.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpslib.h; sourceTree = ""; }; + 311F2F6917398B3B00C15B6A /* mpstd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpstd.h; sourceTree = ""; }; + 311F2F6B17398B4C00C15B6A /* mpswin.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpswin.h; sourceTree = ""; }; + 311F2F6D17398B6300C15B6A /* prmci3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = prmci3.h; sourceTree = ""; }; + 311F2F6E17398B6300C15B6A /* prmci6.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = prmci6.h; sourceTree = ""; }; + 311F2F7117398B7100C15B6A /* protocol.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = protocol.h; sourceTree = ""; }; + 311F2F7317398B7100C15B6A /* ring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ring.h; sourceTree = ""; }; + 311F2F7417398B7100C15B6A /* sac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sac.h; sourceTree = ""; }; + 311F2F7617398B8E00C15B6A /* splay.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = splay.h; sourceTree = ""; }; + 311F2F7717398B8E00C15B6A /* ss.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ss.h; sourceTree = ""; }; + 311F2F7817398B8E00C15B6A /* th.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = th.h; sourceTree = ""; }; + 311F2F7A17398B8E00C15B6A /* tract.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = tract.h; sourceTree = ""; }; + 311F2F7B17398E7600C15B6A /* poolmvff.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = poolmvff.h; sourceTree = ""; }; + 3124CAB8156BE3EC00753214 /* awlut */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = awlut; sourceTree = BUILT_PRODUCTS_DIR; }; + 3124CAC2156BE40100753214 /* awlut.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = awlut.c; sourceTree = ""; }; + 3124CAC6156BE48D00753214 /* fmtdy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fmtdy.c; sourceTree = ""; }; + 3124CAC7156BE48D00753214 /* fmtdytst.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fmtdytst.c; sourceTree = ""; }; + 3124CACA156BE4A300753214 /* poollo.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poollo.c; sourceTree = ""; }; + 3124CACC156BE4C200753214 /* fmtno.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fmtno.c; sourceTree = ""; }; + 3124CACE156BE4CF00753214 /* poolawl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolawl.c; sourceTree = ""; }; + 3124CAD4156BE64A00753214 /* mpsicv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpsicv; sourceTree = BUILT_PRODUCTS_DIR; }; + 3124CADE156BE65900753214 /* mpsicv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpsicv.c; sourceTree = ""; }; + 3124CAE4156BE6D500753214 /* fmthe.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fmthe.c; sourceTree = ""; }; + 3124CAEB156BE7F300753214 /* amcss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amcss; sourceTree = BUILT_PRODUCTS_DIR; }; + 3124CAF5156BE81100753214 /* amcss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amcss.c; sourceTree = ""; }; + 314562191C72ABFA00D7A514 /* scan.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = scan.c; sourceTree = ""; }; + 315B7AFC17834FDB00B097C4 /* prmci3.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = prmci3.c; sourceTree = ""; }; + 315B7AFD17834FDB00B097C4 /* prmci6.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = prmci6.c; sourceTree = ""; }; + 317B3C2A1731830100F9A469 /* arg.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = arg.c; sourceTree = ""; }; + 318DA8CD1892B0F30089718C /* djbench */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = djbench; sourceTree = BUILT_PRODUCTS_DIR; }; + 318DA8CE1892B1210089718C /* djbench.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = djbench.c; sourceTree = ""; }; + 31942A671C8EC3FC001AAF32 /* locus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = locus.h; sourceTree = ""; }; + 31942A6A1C8EC445001AAF32 /* an.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = an.txt; path = ../design/an.txt; sourceTree = ""; }; + 31942A6E1C8EC445001AAF32 /* bootstrap.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = bootstrap.txt; path = ../design/bootstrap.txt; sourceTree = ""; }; + 31942A741C8EC445001AAF32 /* clock.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = clock.txt; path = ../design/clock.txt; sourceTree = ""; }; + 31942A791C8EC445001AAF32 /* exec-env.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "exec-env.txt"; path = "../design/exec-env.txt"; sourceTree = ""; }; + 31942A801C8EC445001AAF32 /* guide.impl.c.naming.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = guide.impl.c.naming.txt; path = ../design/guide.impl.c.naming.txt; sourceTree = ""; }; + 31942A811C8EC445001AAF32 /* guide.review.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = guide.review.txt; path = ../design/guide.review.txt; sourceTree = ""; }; + 31942A8C1C8EC446001AAF32 /* nailboard-1.svg */ = {isa = PBXFileReference; lastKnownFileType = text.xml; name = "nailboard-1.svg"; path = "../design/nailboard-1.svg"; sourceTree = ""; }; + 31942A8D1C8EC446001AAF32 /* nailboard-2.svg */ = {isa = PBXFileReference; lastKnownFileType = text.xml; name = "nailboard-2.svg"; path = "../design/nailboard-2.svg"; sourceTree = ""; }; + 31942A8E1C8EC446001AAF32 /* nailboard-3.svg */ = {isa = PBXFileReference; lastKnownFileType = text.xml; name = "nailboard-3.svg"; path = "../design/nailboard-3.svg"; sourceTree = ""; }; + 31942A8F1C8EC446001AAF32 /* nailboard.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = nailboard.txt; path = ../design/nailboard.txt; sourceTree = ""; }; + 31942A9B1C8EC446001AAF32 /* prmc.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = prmc.txt; path = ../design/prmc.txt; sourceTree = ""; }; + 31942AA91C8EC446001AAF32 /* sp.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = sp.txt; path = ../design/sp.txt; sourceTree = ""; }; + 31942AAB1C8EC446001AAF32 /* stack-scan.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = "stack-scan.txt"; path = "../design/stack-scan.txt"; sourceTree = ""; }; + 31942AB01C8EC446001AAF32 /* testthr.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = testthr.txt; path = ../design/testthr.txt; sourceTree = ""; }; + 319F7A142A30D08500E5B418 /* addrobj */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = addrobj; sourceTree = BUILT_PRODUCTS_DIR; }; + 319F7A152A30D11400E5B418 /* addrobj.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = addrobj.c; sourceTree = ""; }; + 31A47BA3156C1E130039B1C2 /* mps.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mps.c; sourceTree = ""; }; + 31C83ADD1786281C0031A0DB /* protxc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = protxc.h; sourceTree = ""; }; + 31CD33BB173A9F1500524741 /* mpscams.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpscams.h; sourceTree = ""; }; + 31CD33BC173A9F1500524741 /* poolams.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = poolams.c; sourceTree = ""; }; + 31CD33BD173A9F1500524741 /* poolams.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = poolams.h; sourceTree = ""; }; + 31D4D5FD1745058100BE84B5 /* poolmv2.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = poolmv2.c; sourceTree = ""; }; + 31D60006156D3C5F00337B26 /* segsmss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = segsmss.c; sourceTree = ""; }; + 31D6000D156D3CB200337B26 /* awluthe */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = awluthe; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D60017156D3CC300337B26 /* awluthe.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = awluthe.c; sourceTree = ""; }; + 31D60027156D3D3E00337B26 /* lockcov */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = lockcov; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D60036156D3E0200337B26 /* lockcov.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lockcov.c; sourceTree = ""; }; + 31D6003E156D3EC700337B26 /* poolncv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = poolncv; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D6004A156D3EE600337B26 /* poolncv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolncv.c; sourceTree = ""; }; + 31D60054156D3F3500337B26 /* zcoll */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = zcoll; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D6005E156D3F4A00337B26 /* zcoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = zcoll.c; sourceTree = ""; }; + 31D60071156D3FBC00337B26 /* zmess */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = zmess; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D6007B156D3FCC00337B26 /* zmess.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = zmess.c; sourceTree = ""; }; + 31D6008C156D402900337B26 /* steptest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = steptest; sourceTree = BUILT_PRODUCTS_DIR; }; + 31D60098156D403C00337B26 /* steptest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = steptest.c; sourceTree = ""; }; + 31EEABF5156AAF7C00714D05 /* mpsi.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpsi.c; sourceTree = ""; }; + 31EEABFB156AAF9D00714D05 /* libmps.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libmps.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 31EEAC01156AB21B00714D05 /* mpm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpm.c; sourceTree = ""; }; + 31EEAC03156AB23A00714D05 /* arenavm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = arenavm.c; sourceTree = ""; }; + 31EEAC05156AB27B00714D05 /* arena.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = arena.c; sourceTree = ""; }; + 31EEAC06156AB27B00714D05 /* arenacl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = arenacl.c; sourceTree = ""; }; + 31EEAC07156AB27B00714D05 /* global.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = global.c; sourceTree = ""; }; + 31EEAC08156AB27B00714D05 /* locus.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = locus.c; sourceTree = ""; }; + 31EEAC09156AB27B00714D05 /* pool.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pool.c; sourceTree = ""; }; + 31EEAC0A156AB27B00714D05 /* poolabs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolabs.c; sourceTree = ""; }; + 31EEAC0B156AB27B00714D05 /* protocol.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = protocol.c; sourceTree = ""; }; + 31EEAC0D156AB27B00714D05 /* tract.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = tract.c; sourceTree = ""; }; + 31EEAC0E156AB27B00714D05 /* walk.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = walk.c; sourceTree = ""; }; + 31EEAC19156AB2B200714D05 /* buffer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = buffer.c; sourceTree = ""; }; + 31EEAC1A156AB2B200714D05 /* format.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = format.c; sourceTree = ""; }; + 31EEAC1B156AB2B200714D05 /* ref.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ref.c; sourceTree = ""; }; + 31EEAC1C156AB2B200714D05 /* root.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = root.c; sourceTree = ""; }; + 31EEAC1D156AB2B200714D05 /* seg.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = seg.c; sourceTree = ""; }; + 31EEAC1E156AB2B200714D05 /* trace.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = trace.c; sourceTree = ""; }; + 31EEAC1F156AB2B200714D05 /* traceanc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = traceanc.c; sourceTree = ""; }; + 31EEAC27156AB2F200714D05 /* bt.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = bt.c; sourceTree = ""; }; + 31EEAC28156AB2F200714D05 /* dbgpool.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dbgpool.c; sourceTree = ""; }; + 31EEAC29156AB2F200714D05 /* dbgpooli.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dbgpooli.c; sourceTree = ""; }; + 31EEAC2A156AB2F200714D05 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = ""; }; + 31EEAC2B156AB2F200714D05 /* ld.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ld.c; sourceTree = ""; }; + 31EEAC2C156AB2F200714D05 /* message.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = message.c; sourceTree = ""; }; + 31EEAC2D156AB2F200714D05 /* poolmfs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolmfs.c; sourceTree = ""; }; + 31EEAC2E156AB2F200714D05 /* poolmrg.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolmrg.c; sourceTree = ""; }; + 31EEAC30156AB2F200714D05 /* ring.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ring.c; sourceTree = ""; }; + 31EEAC31156AB2F200714D05 /* sac.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = sac.c; sourceTree = ""; }; + 31EEAC32156AB2F200714D05 /* shield.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = shield.c; sourceTree = ""; }; + 31EEAC3F156AB32500714D05 /* boot.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = boot.c; sourceTree = ""; }; + 31EEAC40156AB32500714D05 /* cbs.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cbs.c; sourceTree = ""; }; + 31EEAC42156AB32500714D05 /* meter.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = meter.c; sourceTree = ""; }; + 31EEAC43156AB32500714D05 /* splay.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = splay.c; sourceTree = ""; }; + 31EEAC44156AB32500714D05 /* version.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = version.c; sourceTree = ""; }; + 31EEAC4C156AB3B000714D05 /* lockix.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lockix.c; sourceTree = ""; }; + 31EEAC4F156AB3E300714D05 /* protix.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = protix.c; sourceTree = ""; }; + 31EEAC53156AB3E300714D05 /* vmix.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = vmix.c; sourceTree = ""; }; + 31EEAC5B156AB41900714D05 /* poolamc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolamc.c; sourceTree = ""; }; + 31EEAC5D156AB43F00714D05 /* poolsnc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = poolsnc.c; sourceTree = ""; }; + 31EEAC5F156AB44D00714D05 /* poolmvff.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = poolmvff.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 31EEAC65156AB52600714D05 /* mpmss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mpmss; sourceTree = BUILT_PRODUCTS_DIR; }; + 31EEAC70156AB56000714D05 /* mpsioan.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpsioan.c; sourceTree = ""; }; + 31EEAC71156AB56000714D05 /* mpsliban.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpsliban.c; sourceTree = ""; }; + 31EEAC74156AB58E00714D05 /* mpmss.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mpmss.c; sourceTree = SOURCE_ROOT; }; + 31EEAC9E156AB73400714D05 /* testlib.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = testlib.c; sourceTree = ""; }; + 31EEACA7156AB79800714D05 /* span.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = span.c; sourceTree = ""; }; + 31F6CCA91739B0CF00C48748 /* mpscamc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpscamc.h; sourceTree = ""; }; + 31F6CCAA1739B0CF00C48748 /* mpscawl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpscawl.h; sourceTree = ""; }; + 31F6CCAB1739B0CF00C48748 /* mpsclo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpsclo.h; sourceTree = ""; }; + 31F6CCAC1739B0CF00C48748 /* mpscmvff.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpscmvff.h; sourceTree = ""; }; + 31F6CCAD1739B0CF00C48748 /* mpscsnc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mpscsnc.h; sourceTree = ""; }; + 31FCAE0A17692403008C034C /* scheme */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = scheme; sourceTree = BUILT_PRODUCTS_DIR; }; + 31FCAE18176924D4008C034C /* scheme.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = scheme.c; path = ../example/scheme/scheme.c; sourceTree = ""; }; + 6313D46618A3FDC900EB03EF /* gcbench.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = gcbench.c; sourceTree = ""; }; + 6313D47218A400B200EB03EF /* gcbench */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = gcbench; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 220FD3E2195339C000967A35 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 220FD3E3195339C000967A35 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2231BB5218CA97D8002D6322 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2231BB5318CA97D8002D6322 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2231BB6018CA97DC002D6322 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2231BB6118CA97DC002D6322 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 223E795E19EAB00B00DC26A6 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 223E795F19EAB00B00DC26A6 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 224CC792175E1821002FF81B /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 224CC793175E1821002FF81B /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2265D71620E53F9C003019E8 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2265D71720E53F9C003019E8 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2291A5B6175CAB2F001D4920 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2291A5B7175CAB2F001D4920 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22B2BC3018B6434F00C33E63 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22C2ACA818BE400A006B3677 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 22C2ACA918BE400A006B3677 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22EA3F3E20D2B0D90065F5B6 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 22EA3F3F20D2B0D90065F5B6 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22F846B618F437B900982BA7 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 22F846B718F437B900982BA7 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22FA176E16E8D6FC0098B23F /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 22FA176F16E8D6FC0098B23F /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22FACEE618880983000FDBC1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 22FACEE718880983000FDBC1 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2D07B96E1636FC9900DB751B /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2D07B97C163705E400DB751B /* libsqlite3.dylib in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2D604B9916514B1A003AAF46 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2D53F2E716515A63009A1829 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFB0156D357B000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFC2156D35B2000A585A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFC5156D35E2000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFD4156D35F7000A585A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFDA156D3681000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFE9156D3690000A585A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B006156D38F3000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104B018156D3953000A585A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B01F156D39D4000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104B02E156D39E2000A585A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B03A156D3AD7000A585A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60008156D3C7400337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31108A401C6B90E900E728EA /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31108A411C6B90E900E728EA /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A58D156E913C001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A59B156E914B001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5A4156E92C0001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5B1156E92C8001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5BA156E9315001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5C8156E9322001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5D3156E93A0001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5E4156E93AE001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5EC156E93E7001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5FA156E93F3001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A602156E9430001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A617156E946B001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A619156E9485001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A626156E948C001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A630156E94DB001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A640156E94F0001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A649156E9596001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A673156E95F6001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A65F156E95D9001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A670156E95F2001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A679156E9668001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6DD156E9A0F001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A692156E971B001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6A3156E972D001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A6A9156E9759001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6BE156E9771001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A6C3156E9815001E0AA3 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6D7156E9923001E0AA3 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAB5156BE3EC00753214 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CAC4156BE40D00753214 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAD1156BE64A00753214 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CAE1156BE67000753214 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAE8156BE7F300753214 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CAFC156BE82900753214 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 318DA8C71892B0F30089718C /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 319F7A0D2A30D08500E5B418 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 319F7A0E2A30D08500E5B418 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D6000A156D3CB200337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60019156D3CCC00337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60024156D3D3E00337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60035156D3DF300337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D6003B156D3EC700337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60049156D3ED200337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60051156D3F3500337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60060156D3F5000337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D6006E156D3FBC00337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60083156D3FDB00337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60089156D402900337B26 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D6009B156D404400337B26 /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31EEABF8156AAF9D00714D05 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31EEAC62156AB52600714D05 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 3150AE53156ABA2500A6E22A /* libmps.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31FCAE0717692403008C034C /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6313D46C18A400B200EB03EF /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 2265D71E20E53FEF003019E8 /* mapeventpy */ = { + isa = PBXGroup; + children = ( + 2265D71F20E5400F003019E8 /* eventpy.c */, + ); + path = mapeventpy; + sourceTree = ""; + }; + 2D07B96A1636FC4C00DB751B /* mpseventsql */ = { + isa = PBXGroup; + children = ( + 2D07B96C1636FC7200DB751B /* eventsql.c */, + ); + name = mpseventsql; + sourceTree = ""; + }; + 2D604B971651433C003AAF46 /* mpseventtxt */ = { + isa = PBXGroup; + children = ( + 2D604BA416514C4F003AAF46 /* eventtxt.c */, + ); + name = mpseventtxt; + sourceTree = ""; + }; + 3114A6D6156E9846001E0AA3 /* Tools */ = { + isa = PBXGroup; + children = ( + 2265D71E20E53FEF003019E8 /* mapeventpy */, + 2D604B971651433C003AAF46 /* mpseventtxt */, + 2D07B96A1636FC4C00DB751B /* mpseventsql */, + 3114A6D8156E9942001E0AA3 /* mpseventcnv */, + ); + name = Tools; + sourceTree = ""; + }; + 3114A6D8156E9942001E0AA3 /* mpseventcnv */ = { + isa = PBXGroup; + children = ( + 3114A6D0156E9829001E0AA3 /* eventcnv.c */, + ); + name = mpseventcnv; + sourceTree = ""; + }; + 31160D90189953D50071EB17 /* Design */ = { + isa = PBXGroup; + children = ( + 31160D921899540D0071EB17 /* abq.txt */, + 31160D931899540D0071EB17 /* alloc-frame.txt */, + 31942A6A1C8EC445001AAF32 /* an.txt */, + 31160D941899540D0071EB17 /* arena.txt */, + 31160D951899540D0071EB17 /* arenavm.txt */, + 31942A6E1C8EC445001AAF32 /* bootstrap.txt */, + 31160D961899540D0071EB17 /* bt.txt */, + 31160D971899540D0071EB17 /* buffer.txt */, + 31160D981899540D0071EB17 /* cbs.txt */, + 31160D991899540D0071EB17 /* check.txt */, + 31942A741C8EC445001AAF32 /* clock.txt */, + 31160D9B1899540D0071EB17 /* collection.txt */, + 31160D9C1899540D0071EB17 /* config.txt */, + 31160D9D1899540D0071EB17 /* critical-path.txt */, + 31160D9E1899540D0071EB17 /* diag.txt */, + 31942A791C8EC445001AAF32 /* exec-env.txt */, + 22DD93E118ED815F00240DD2 /* failover.txt */, + 31160D9F1899540D0071EB17 /* finalize.txt */, + 31160DA01899540D0071EB17 /* fix.txt */, + 31160DA11899540D0071EB17 /* freelist.txt */, + 31160DA21899540D0071EB17 /* guide.hex.trans.txt */, + 31160DA31899540D0071EB17 /* guide.impl.c.format.txt */, + 31942A801C8EC445001AAF32 /* guide.impl.c.naming.txt */, + 31942A811C8EC445001AAF32 /* guide.review.txt */, + 31160DA41899540D0071EB17 /* index.txt */, + 31160DA51899540D0071EB17 /* interface-c.txt */, + 31160DA61899540D0071EB17 /* io.txt */, + 31160DA71899540D0071EB17 /* keyword-arguments.txt */, + 22DD93E218ED815F00240DD2 /* land.txt */, + 31160DA81899540D0071EB17 /* lib.txt */, + 31160DA91899540D0071EB17 /* lock.txt */, + 31160DAA1899540D0071EB17 /* locus.txt */, + 31160DAB1899540D0071EB17 /* message-gc.txt */, + 31160DAC1899540D0071EB17 /* message.txt */, + 31942A8C1C8EC446001AAF32 /* nailboard-1.svg */, + 31942A8D1C8EC446001AAF32 /* nailboard-2.svg */, + 31942A8E1C8EC446001AAF32 /* nailboard-3.svg */, + 31942A8F1C8EC446001AAF32 /* nailboard.txt */, + 31160DAD1899540D0071EB17 /* object-debug.txt */, + 31160D9A1899540D0071EB17 /* pool.txt */, + 31160DAF1899540D0071EB17 /* poolamc.txt */, + 31160DB01899540D0071EB17 /* poolams.txt */, + 31160DB11899540D0071EB17 /* poolawl.txt */, + 31160DB21899540D0071EB17 /* poollo.txt */, + 31160DB31899540D0071EB17 /* poolmfs.txt */, + 31160DB41899540D0071EB17 /* poolmrg.txt */, + 31160DB61899540D0071EB17 /* poolmvff.txt */, + 31160DB71899540D0071EB17 /* poolmvt.txt */, + 31942A9B1C8EC446001AAF32 /* prmc.txt */, + 31160DB81899540D0071EB17 /* prot.txt */, + 31160DBB1899540D0071EB17 /* protocol.txt */, + 31160DBD1899540D0071EB17 /* pthreadext.txt */, + 31160DBE1899540D0071EB17 /* range.txt */, + 31160DC01899540D0071EB17 /* ring.txt */, + 31160DC11899540D0071EB17 /* root.txt */, + 31160DC21899540D0071EB17 /* scan.txt */, + 31160DC31899540D0071EB17 /* seg.txt */, + 31160DC41899540D0071EB17 /* shield.txt */, + 31160DC51899540D0071EB17 /* sig.txt */, + 31942AA91C8EC446001AAF32 /* sp.txt */, + 31160DC61899540D0071EB17 /* splay.txt */, + 31942AAB1C8EC446001AAF32 /* stack-scan.txt */, + 31160DC81899540D0071EB17 /* strategy.txt */, + 31160DC91899540D0071EB17 /* telemetry.txt */, + 31160DCA1899540D0071EB17 /* tests.txt */, + 31942AB01C8EC446001AAF32 /* testthr.txt */, + 31160DCB1899540D0071EB17 /* thread-manager.txt */, + 31160DCC1899540D0071EB17 /* thread-safety.txt */, + 31160DCD1899540D0071EB17 /* trace.txt */, + 31160DCE1899540D0071EB17 /* type.txt */, + 31160DCF1899540D0071EB17 /* version-library.txt */, + 31160DD11899540D0071EB17 /* vm.txt */, + 31160DD51899540D0071EB17 /* writef.txt */, + ); + name = Design; + sourceTree = ""; + }; + 3124CAB3156BE1B700753214 /* Tests */ = { + isa = PBXGroup; + children = ( + 319F7A152A30D11400E5B418 /* addrobj.c */, + 3114A63D156E94EA001E0AA3 /* abqtest.c */, + 22FACED1188807FF000FDBC1 /* airtest.c */, + 3124CAF5156BE81100753214 /* amcss.c */, + 3104AFEB156D36A5000A585A /* amcsshe.c */, + 22FA177616E8D7A80098B23F /* amcssth.c */, + 3104B015156D390B000A585A /* amsss.c */, + 3104B02F156D39F2000A585A /* amssshe.c */, + 3104AFBE156D3591000A585A /* apss.c */, + 3114A5FB156E93FC001E0AA3 /* arenacv.c */, + 3124CAC2156BE40100753214 /* awlut.c */, + 31D60017156D3CC300337B26 /* awluthe.c */, + 2291A5A9175CAA9B001D4920 /* awlutth.c */, + 3114A66C156E95EB001E0AA3 /* btcv.c */, + 3114A613156E944A001E0AA3 /* bttest.c */, + 3114A5CD156E9369001E0AA3 /* finalcv.c */, + 3114A5E5156E93B9001E0AA3 /* finaltest.c */, + 3124CAC6156BE48D00753214 /* fmtdy.c */, + 22FACED2188807FF000FDBC1 /* fmtdy.h */, + 3124CAC7156BE48D00753214 /* fmtdytst.c */, + 22FACED3188807FF000FDBC1 /* fmtdytst.h */, + 3124CAE4156BE6D500753214 /* fmthe.c */, + 22FACED4188807FF000FDBC1 /* fmthe.h */, + 3124CACC156BE4C200753214 /* fmtno.c */, + 22FACED5188807FF000FDBC1 /* fmtno.h */, + 22FACED6188807FF000FDBC1 /* fmtscheme.c */, + 22FACED7188807FF000FDBC1 /* fmtscheme.h */, + 22EA3F3720D2B0730065F5B6 /* forktest.c */, + 224CC79E175E3202002FF81B /* fotest.c */, + 2291A5E9175CB4EC001D4920 /* landtest.c */, + 2231BB6818CA9834002D6322 /* locbwcss.c */, + 31D60036156D3E0200337B26 /* lockcov.c */, + 22F846AF18F4379C00982BA7 /* lockut.c */, + 2231BB6918CA983C002D6322 /* locusss.c */, + 3114A5A1156E9168001E0AA3 /* locv.c */, + 3114A69F156E9725001E0AA3 /* messtest.c */, + 31EEAC74156AB58E00714D05 /* mpmss.c */, + 3124CADE156BE65900753214 /* mpsicv.c */, + 3114A686156E9674001E0AA3 /* mv2test.c */, + 22C2ACA018BE3FEC006B3677 /* nailboardtest.c */, + 31D6004A156D3EE600337B26 /* poolncv.c */, + 3114A5B7156E92F0001E0AA3 /* qs.c */, + 3104AFD6156D3602000A585A /* sacss.c */, + 31D60006156D3C5F00337B26 /* segsmss.c */, + 223E796619EAB04100DC26A6 /* sncss.c */, + 31D60098156D403C00337B26 /* steptest.c */, + 31108A391C6B90D600E728EA /* tagtest.c */, + 3114A628156E949A001E0AA3 /* teletest.c */, + 31EEAC9E156AB73400714D05 /* testlib.c */, + 2291A5F0175CB7A4001D4920 /* testlib.h */, + 22561A9618F4263300372C66 /* testthr.h */, + 22561A9718F4263300372C66 /* testthrix.c */, + 3114A6BA156E9768001E0AA3 /* walkt0.c */, + 31D6005E156D3F4A00337B26 /* zcoll.c */, + 31D6007B156D3FCC00337B26 /* zmess.c */, + 220FD3F019533C3200967A35 /* ztfm.c */, + ); + name = Tests; + sourceTree = ""; + }; + 318DA8C21892B0B20089718C /* Benchmarks */ = { + isa = PBXGroup; + children = ( + 318DA8CE1892B1210089718C /* djbench.c */, + 6313D46618A3FDC900EB03EF /* gcbench.c */, + ); + name = Benchmarks; + sourceTree = ""; + }; + 31A47BA8156C1E930039B1C2 /* MPS */ = { + isa = PBXGroup; + children = ( + 31A47BA3156C1E130039B1C2 /* mps.c */, + 31EEABF4156AAF6500714D05 /* MPM Core */, + 31EEAC5A156AB40800714D05 /* Extra pools */, + 31EEAC4B156AB39C00714D05 /* Platform */, + 31EEAC6F156AB54300714D05 /* ANSI Plinth */, + ); + name = MPS; + sourceTree = ""; + }; + 31EEABD8156AAE9E00714D05 = { + isa = PBXGroup; + children = ( + 2D07B97B163705E400DB751B /* libsqlite3.dylib */, + 3114A6D6156E9846001E0AA3 /* Tools */, + 31A47BA8156C1E930039B1C2 /* MPS */, + 31160D90189953D50071EB17 /* Design */, + 3124CAB3156BE1B700753214 /* Tests */, + 318DA8C21892B0B20089718C /* Benchmarks */, + 31FCAE171769247F008C034C /* Scheme */, + 31EEABEF156AAF5C00714D05 /* Products */, + ); + sourceTree = ""; + }; + 31EEABEF156AAF5C00714D05 /* Products */ = { + isa = PBXGroup; + children = ( + 31EEABFB156AAF9D00714D05 /* libmps.a */, + 31EEAC65156AB52600714D05 /* mpmss */, + 3124CAB8156BE3EC00753214 /* awlut */, + 3124CAD4156BE64A00753214 /* mpsicv */, + 3124CAEB156BE7F300753214 /* amcss */, + 3104AFB3156D357B000A585A /* apss */, + 3104AFC8156D35E2000A585A /* sacss */, + 3104AFDD156D3681000A585A /* amcsshe */, + 3104B009156D38F3000A585A /* amsss */, + 3104B022156D39D4000A585A /* amssshe */, + 3104B03D156D3AD7000A585A /* segsmss */, + 31D6000D156D3CB200337B26 /* awluthe */, + 31D60027156D3D3E00337B26 /* lockcov */, + 31D6003E156D3EC700337B26 /* poolncv */, + 31D60054156D3F3500337B26 /* zcoll */, + 31D60071156D3FBC00337B26 /* zmess */, + 31D6008C156D402900337B26 /* steptest */, + 3114A590156E913C001E0AA3 /* locv */, + 3114A5A7156E92C0001E0AA3 /* qs */, + 3114A5BD156E9315001E0AA3 /* finalcv */, + 3114A5D6156E93A0001E0AA3 /* finaltest */, + 3114A5EF156E93E7001E0AA3 /* arenacv */, + 3114A605156E9430001E0AA3 /* bttest */, + 3114A61C156E9485001E0AA3 /* teletest */, + 3114A633156E94DB001E0AA3 /* abqtest */, + 3114A64C156E9596001E0AA3 /* landtest */, + 3114A662156E95D9001E0AA3 /* btcv */, + 3114A67C156E9668001E0AA3 /* mv2test */, + 3114A695156E971B001E0AA3 /* messtest */, + 3114A6AC156E9759001E0AA3 /* walkt0 */, + 3114A6C6156E9815001E0AA3 /* mpseventcnv */, + 2D07B9711636FC9900DB751B /* mpseventsql */, + 2D604B9C16514B1A003AAF46 /* mpseventtxt */, + 22FA177516E8D6FC0098B23F /* amcssth */, + 2291A5BD175CAB2F001D4920 /* awlutth */, + 224CC799175E1821002FF81B /* fotest */, + 31FCAE0A17692403008C034C /* scheme */, + 318DA8CD1892B0F30089718C /* djbench */, + 6313D47218A400B200EB03EF /* gcbench */, + 22B2BC3618B6434F00C33E63 /* scheme-advanced */, + 2231BB5918CA97D8002D6322 /* locbwcss */, + 2231BB6718CA97DC002D6322 /* locusss */, + 22FACEED18880983000FDBC1 /* airtest */, + 22C2ACAF18BE400A006B3677 /* nailboardtest */, + 22F846BD18F437B900982BA7 /* lockut */, + 31108A471C6B90E900E728EA /* tagtest */, + 223E796519EAB00B00DC26A6 /* sncss */, + 22EA3F4520D2B0D90065F5B6 /* forktest */, + 2265D71D20E53F9C003019E8 /* mpseventpy */, + 220FD3E9195339C000967A35 /* ztfm */, + 319F7A142A30D08500E5B418 /* addrobj */, + ); + name = Products; + sourceTree = ""; + }; + 31EEABF4156AAF6500714D05 /* MPM Core */ = { + isa = PBXGroup; + children = ( + 3114A645156E9525001E0AA3 /* abq.c */, + 2291A5EA175CB503001D4920 /* abq.h */, + 31EEAC05156AB27B00714D05 /* arena.c */, + 31EEAC06156AB27B00714D05 /* arenacl.c */, + 31EEAC03156AB23A00714D05 /* arenavm.c */, + 317B3C2A1731830100F9A469 /* arg.c */, + 3107DC4E173B03D100F705C8 /* arg.h */, + 31EEAC3F156AB32500714D05 /* boot.c */, + 311F2F5017398AD500C15B6A /* boot.h */, + 31EEAC27156AB2F200714D05 /* bt.c */, + 311F2F5117398AE900C15B6A /* bt.h */, + 31EEAC19156AB2B200714D05 /* buffer.c */, + 31EEAC40156AB32500714D05 /* cbs.c */, + 311F2F5217398AE900C15B6A /* cbs.h */, + 311F2F5417398AE900C15B6A /* check.h */, + 311F2F5517398AE900C15B6A /* clock.h */, + 311F2F5617398AE900C15B6A /* config.h */, + 31EEAC28156AB2F200714D05 /* dbgpool.c */, + 311F2F5717398AE900C15B6A /* dbgpool.h */, + 31EEAC29156AB2F200714D05 /* dbgpooli.c */, + 31EEAC2A156AB2F200714D05 /* event.c */, + 311F2F5817398AE900C15B6A /* event.h */, + 311F2F5917398AE900C15B6A /* eventcom.h */, + 311F2F5A17398AE900C15B6A /* eventdef.h */, + 22C5C99A18EC6AEC004C63D4 /* failover.c */, + 22C5C99B18EC6AEC004C63D4 /* failover.h */, + 31EEAC1A156AB2B200714D05 /* format.c */, + 2291A5EE175CB768001D4920 /* freelist.c */, + 2291A5EF175CB768001D4920 /* freelist.h */, + 31EEAC07156AB27B00714D05 /* global.c */, + 22C5C99C18EC6AEC004C63D4 /* land.c */, + 31EEAC2B156AB2F200714D05 /* ld.c */, + 311F2F5E17398B0E00C15B6A /* lock.h */, + 31EEAC08156AB27B00714D05 /* locus.c */, + 31942A671C8EC3FC001AAF32 /* locus.h */, + 31EEAC2C156AB2F200714D05 /* message.c */, + 31EEAC42156AB32500714D05 /* meter.c */, + 311F2F5F17398B0E00C15B6A /* meter.h */, + 311F2F6017398B0E00C15B6A /* misc.h */, + 31EEAC01156AB21B00714D05 /* mpm.c */, + 311F2F6117398B0E00C15B6A /* mpm.h */, + 311F2F6217398B1A00C15B6A /* mpmst.h */, + 311F2F6317398B1A00C15B6A /* mpmtypes.h */, + 311F2F6417398B1A00C15B6A /* mps.h */, + 311F2F6517398B3B00C15B6A /* mpsacl.h */, + 311F2F6617398B3B00C15B6A /* mpsavm.h */, + 22FACEDB188808D5000FDBC1 /* mpscmfs.h */, + 220FD3ED19533A8700967A35 /* mpscvm.h */, + 31EEABF5156AAF7C00714D05 /* mpsi.c */, + 220FD3EA195339E500967A35 /* mpsitr.c */, + 311F2F6717398B3B00C15B6A /* mpsio.h */, + 311F2F6817398B3B00C15B6A /* mpslib.h */, + 311F2F6917398B3B00C15B6A /* mpstd.h */, + 311F2F6B17398B4C00C15B6A /* mpswin.h */, + 22E30E821886FF1400D98EA9 /* nailboard.c */, + 22E30E831886FF1400D98EA9 /* nailboard.h */, + 31EEAC09156AB27B00714D05 /* pool.c */, + 31EEAC0A156AB27B00714D05 /* poolabs.c */, + 31EEAC2D156AB2F200714D05 /* poolmfs.c */, + 22FACEDC18880933000FDBC1 /* poolmfs.h */, + 31EEAC2E156AB2F200714D05 /* poolmrg.c */, + 22FACEDD18880933000FDBC1 /* poolmrg.h */, + 31EEAC5F156AB44D00714D05 /* poolmvff.c */, + 311F2F7B17398E7600C15B6A /* poolmvff.h */, + 22FACEDE18880933000FDBC1 /* pooln.c */, + 22FACEDF18880933000FDBC1 /* pooln.h */, + 2213454C1DB0386600E14202 /* prmc.h */, + 31EEAC0B156AB27B00714D05 /* protocol.c */, + 311F2F7117398B7100C15B6A /* protocol.h */, + 2291A5EB175CB53E001D4920 /* range.c */, + 2291A5EC175CB53E001D4920 /* range.h */, + 2239BB4C20EE2E34007AC917 /* rangetree.c */, + 2239BB4D20EE2E4D007AC917 /* rangetree.h */, + 31EEAC1B156AB2B200714D05 /* ref.c */, + 31EEAC30156AB2F200714D05 /* ring.c */, + 311F2F7317398B7100C15B6A /* ring.h */, + 31EEAC1C156AB2B200714D05 /* root.c */, + 3112ED3B18ABC75200CC531A /* sa.c */, + 3112ED3A18ABC57F00CC531A /* sa.h */, + 31EEAC31156AB2F200714D05 /* sac.c */, + 311F2F7417398B7100C15B6A /* sac.h */, + 314562191C72ABFA00D7A514 /* scan.c */, + 31EEAC1D156AB2B200714D05 /* seg.c */, + 31EEAC32156AB2F200714D05 /* shield.c */, + 31EEAC43156AB32500714D05 /* splay.c */, + 311F2F7617398B8E00C15B6A /* splay.h */, + 22FACEDA1888088A000FDBC1 /* ss.c */, + 311F2F7717398B8E00C15B6A /* ss.h */, + 311F2F7817398B8E00C15B6A /* th.h */, + 31EEAC1E156AB2B200714D05 /* trace.c */, + 31EEAC1F156AB2B200714D05 /* traceanc.c */, + 31EEAC0D156AB27B00714D05 /* tract.c */, + 311F2F7A17398B8E00C15B6A /* tract.h */, + 220FD3EB195339F000967A35 /* trans.c */, + 220FD3EE19533A8700967A35 /* trans.h */, + 310F5D7118B6675F007EFCBC /* tree.c */, + 310F5D7218B6675F007EFCBC /* tree.h */, + 31EEAC44156AB32500714D05 /* version.c */, + 223475CB194CA09500C69128 /* vm.c */, + 223475CC194CA09500C69128 /* vm.h */, + 31EEAC0E156AB27B00714D05 /* walk.c */, + ); + name = "MPM Core"; + sourceTree = ""; + }; + 31EEAC4B156AB39C00714D05 /* Platform */ = { + isa = PBXGroup; + children = ( + 31EEAC4C156AB3B000714D05 /* lockix.c */, + 315B7AFC17834FDB00B097C4 /* prmci3.c */, + 311F2F6D17398B6300C15B6A /* prmci3.h */, + 315B7AFD17834FDB00B097C4 /* prmci6.c */, + 311F2F6E17398B6300C15B6A /* prmci6.h */, + 2213454D1DB038D400E14202 /* prmcxc.c */, + 31172ABB177512F6009488E5 /* prmcxci3.c */, + 31172ABC1775131C009488E5 /* prmcxci6.c */, + 31172ABE1775164F009488E5 /* prmcxc.h */, + 31EEAC4F156AB3E300714D05 /* protix.c */, + 31C83ADD1786281C0031A0DB /* protxc.h */, + 31172AC017752253009488E5 /* protxc.c */, + 31EEACA7156AB79800714D05 /* span.c */, + 31172ABA17750F9D009488E5 /* thxc.c */, + 31EEAC53156AB3E300714D05 /* vmix.c */, + ); + name = Platform; + sourceTree = ""; + }; + 31EEAC5A156AB40800714D05 /* Extra pools */ = { + isa = PBXGroup; + children = ( + 31F6CCA91739B0CF00C48748 /* mpscamc.h */, + 31CD33BB173A9F1500524741 /* mpscams.h */, + 31F6CCAA1739B0CF00C48748 /* mpscawl.h */, + 31F6CCAB1739B0CF00C48748 /* mpsclo.h */, + 31F6CCAC1739B0CF00C48748 /* mpscmvff.h */, + 31F6CCAD1739B0CF00C48748 /* mpscsnc.h */, + 31EEAC5B156AB41900714D05 /* poolamc.c */, + 31CD33BC173A9F1500524741 /* poolams.c */, + 31CD33BD173A9F1500524741 /* poolams.h */, + 3124CACE156BE4CF00753214 /* poolawl.c */, + 3124CACA156BE4A300753214 /* poollo.c */, + 31D4D5FD1745058100BE84B5 /* poolmv2.c */, + 2291A5A8175CAA51001D4920 /* poolmv2.h */, + 31EEAC5D156AB43F00714D05 /* poolsnc.c */, + ); + name = "Extra pools"; + sourceTree = ""; + }; + 31EEAC6F156AB54300714D05 /* ANSI Plinth */ = { + isa = PBXGroup; + children = ( + 31EEAC70156AB56000714D05 /* mpsioan.c */, + 31EEAC71156AB56000714D05 /* mpsliban.c */, + ); + name = "ANSI Plinth"; + sourceTree = ""; + }; + 31FCAE171769247F008C034C /* Scheme */ = { + isa = PBXGroup; + children = ( + 22B2BC2B18B6434000C33E63 /* scheme-advanced.c */, + 31FCAE18176924D4008C034C /* scheme.c */, + ); + name = Scheme; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 31EEABF9156AAF9D00714D05 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 220FD3D8195339C000967A35 /* ztfm */ = { + isa = PBXNativeTarget; + buildConfigurationList = 220FD3E5195339C000967A35 /* Build configuration list for PBXNativeTarget "ztfm" */; + buildPhases = ( + 220FD3DB195339C000967A35 /* Sources */, + 220FD3E2195339C000967A35 /* Frameworks */, + 220FD3E4195339C000967A35 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 220FD3D9195339C000967A35 /* PBXTargetDependency */, + ); + name = ztfm; + productName = zmess; + productReference = 220FD3E9195339C000967A35 /* ztfm */; + productType = "com.apple.product-type.tool"; + }; + 2231BB4C18CA97D8002D6322 /* locbwcss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2231BB5518CA97D8002D6322 /* Build configuration list for PBXNativeTarget "locbwcss" */; + buildPhases = ( + 2231BB4F18CA97D8002D6322 /* Sources */, + 2231BB5218CA97D8002D6322 /* Frameworks */, + 2231BB5418CA97D8002D6322 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 2231BB4D18CA97D8002D6322 /* PBXTargetDependency */, + ); + name = locbwcss; + productName = lockcov; + productReference = 2231BB5918CA97D8002D6322 /* locbwcss */; + productType = "com.apple.product-type.tool"; + }; + 2231BB5A18CA97DC002D6322 /* locusss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2231BB6318CA97DC002D6322 /* Build configuration list for PBXNativeTarget "locusss" */; + buildPhases = ( + 2231BB5D18CA97DC002D6322 /* Sources */, + 2231BB6018CA97DC002D6322 /* Frameworks */, + 2231BB6218CA97DC002D6322 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 2231BB5B18CA97DC002D6322 /* PBXTargetDependency */, + ); + name = locusss; + productName = lockcov; + productReference = 2231BB6718CA97DC002D6322 /* locusss */; + productType = "com.apple.product-type.tool"; + }; + 223E795819EAB00B00DC26A6 /* sncss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 223E796119EAB00B00DC26A6 /* Build configuration list for PBXNativeTarget "sncss" */; + buildPhases = ( + 223E795B19EAB00B00DC26A6 /* Sources */, + 223E795E19EAB00B00DC26A6 /* Frameworks */, + 223E796019EAB00B00DC26A6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 223E795919EAB00B00DC26A6 /* PBXTargetDependency */, + ); + name = sncss; + productName = apss; + productReference = 223E796519EAB00B00DC26A6 /* sncss */; + productType = "com.apple.product-type.tool"; + }; + 224CC78C175E1821002FF81B /* fotest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 224CC795175E1821002FF81B /* Build configuration list for PBXNativeTarget "fotest" */; + buildPhases = ( + 224CC78F175E1821002FF81B /* Sources */, + 224CC792175E1821002FF81B /* Frameworks */, + 224CC794175E1821002FF81B /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 224CC78D175E1821002FF81B /* PBXTargetDependency */, + ); + name = fotest; + productName = mv2test; + productReference = 224CC799175E1821002FF81B /* fotest */; + productType = "com.apple.product-type.tool"; + }; + 2265D71120E53F9C003019E8 /* mpseventpy */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2265D71920E53F9C003019E8 /* Build configuration list for PBXNativeTarget "mpseventpy" */; + buildPhases = ( + 2265D71420E53F9C003019E8 /* Sources */, + 2265D71620E53F9C003019E8 /* Frameworks */, + 2265D71820E53F9C003019E8 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 2265D71220E53F9C003019E8 /* PBXTargetDependency */, + ); + name = mpseventpy; + productName = mpseventcnv; + productReference = 2265D71D20E53F9C003019E8 /* mpseventpy */; + productType = "com.apple.product-type.tool"; + }; + 2291A5AC175CAB2F001D4920 /* awlutth */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2291A5B9175CAB2F001D4920 /* Build configuration list for PBXNativeTarget "awlutth" */; + buildPhases = ( + 2291A5AF175CAB2F001D4920 /* Sources */, + 2291A5B6175CAB2F001D4920 /* Frameworks */, + 2291A5B8175CAB2F001D4920 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 2291A5AD175CAB2F001D4920 /* PBXTargetDependency */, + ); + name = awlutth; + productName = awluthe; + productReference = 2291A5BD175CAB2F001D4920 /* awlutth */; + productType = "com.apple.product-type.tool"; + }; + 22B2BC2C18B6434F00C33E63 /* scheme-advanced */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22B2BC3218B6434F00C33E63 /* Build configuration list for PBXNativeTarget "scheme-advanced" */; + buildPhases = ( + 22B2BC2D18B6434F00C33E63 /* Sources */, + 22B2BC3018B6434F00C33E63 /* Frameworks */, + 22B2BC3118B6434F00C33E63 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "scheme-advanced"; + productName = scheme; + productReference = 22B2BC3618B6434F00C33E63 /* scheme-advanced */; + productType = "com.apple.product-type.tool"; + }; + 22C2ACA218BE400A006B3677 /* nailboardtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22C2ACAB18BE400A006B3677 /* Build configuration list for PBXNativeTarget "nailboardtest" */; + buildPhases = ( + 22C2ACA518BE400A006B3677 /* Sources */, + 22C2ACA818BE400A006B3677 /* Frameworks */, + 22C2ACAA18BE400A006B3677 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 22C2ACA318BE400A006B3677 /* PBXTargetDependency */, + ); + name = nailboardtest; + productName = mv2test; + productReference = 22C2ACAF18BE400A006B3677 /* nailboardtest */; + productType = "com.apple.product-type.tool"; + }; + 22EA3F3820D2B0D90065F5B6 /* forktest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22EA3F4120D2B0D90065F5B6 /* Build configuration list for PBXNativeTarget "forktest" */; + buildPhases = ( + 22EA3F3B20D2B0D90065F5B6 /* Sources */, + 22EA3F3E20D2B0D90065F5B6 /* Frameworks */, + 22EA3F4020D2B0D90065F5B6 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 22EA3F3920D2B0D90065F5B6 /* PBXTargetDependency */, + ); + name = forktest; + productName = mv2test; + productReference = 22EA3F4520D2B0D90065F5B6 /* forktest */; + productType = "com.apple.product-type.tool"; + }; + 22F846B018F437B900982BA7 /* lockut */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22F846B918F437B900982BA7 /* Build configuration list for PBXNativeTarget "lockut" */; + buildPhases = ( + 22F846B318F437B900982BA7 /* Sources */, + 22F846B618F437B900982BA7 /* Frameworks */, + 22F846B818F437B900982BA7 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 22F846B118F437B900982BA7 /* PBXTargetDependency */, + ); + name = lockut; + productName = lockcov; + productReference = 22F846BD18F437B900982BA7 /* lockut */; + productType = "com.apple.product-type.tool"; + }; + 22FA176416E8D6FC0098B23F /* amcssth */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22FA177116E8D6FC0098B23F /* Build configuration list for PBXNativeTarget "amcssth" */; + buildPhases = ( + 22FA176716E8D6FC0098B23F /* Sources */, + 22FA176E16E8D6FC0098B23F /* Frameworks */, + 22FA177016E8D6FC0098B23F /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 22FA176516E8D6FC0098B23F /* PBXTargetDependency */, + ); + name = amcssth; + productName = amcssth; + productReference = 22FA177516E8D6FC0098B23F /* amcssth */; + productType = "com.apple.product-type.tool"; + }; + 22FACEE018880983000FDBC1 /* airtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 22FACEE918880983000FDBC1 /* Build configuration list for PBXNativeTarget "airtest" */; + buildPhases = ( + 22FACEE318880983000FDBC1 /* Sources */, + 22FACEE618880983000FDBC1 /* Frameworks */, + 22FACEE818880983000FDBC1 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 22FACEE118880983000FDBC1 /* PBXTargetDependency */, + ); + name = airtest; + productName = airtest; + productReference = 22FACEED18880983000FDBC1 /* airtest */; + productType = "com.apple.product-type.tool"; + }; + 2D07B9701636FC9900DB751B /* mpseventsql */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2D07B9741636FC9900DB751B /* Build configuration list for PBXNativeTarget "mpseventsql" */; + buildPhases = ( + 2D07B96D1636FC9900DB751B /* Sources */, + 2D07B96E1636FC9900DB751B /* Frameworks */, + 2D07B96F1636FC9900DB751B /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = mpseventsql; + productName = mpseventsql; + productReference = 2D07B9711636FC9900DB751B /* mpseventsql */; + productType = "com.apple.product-type.tool"; + }; + 2D604B9B16514B1A003AAF46 /* mpseventtxt */ = { + isa = PBXNativeTarget; + buildConfigurationList = 2D604BA216514B59003AAF46 /* Build configuration list for PBXNativeTarget "mpseventtxt" */; + buildPhases = ( + 2D604B9816514B1A003AAF46 /* Sources */, + 2D604B9916514B1A003AAF46 /* Frameworks */, + 2D604B9A16514B1A003AAF46 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = mpseventtxt; + productName = mpseventtxt; + productReference = 2D604B9C16514B1A003AAF46 /* mpseventtxt */; + productType = "com.apple.product-type.tool"; + }; + 3104AFB2156D357B000A585A /* apss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104AFBC156D357B000A585A /* Build configuration list for PBXNativeTarget "apss" */; + buildPhases = ( + 3104AFAF156D357B000A585A /* Sources */, + 3104AFB0156D357B000A585A /* Frameworks */, + 3104AFB1156D357B000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104AFC1156D35AE000A585A /* PBXTargetDependency */, + ); + name = apss; + productName = apss; + productReference = 3104AFB3156D357B000A585A /* apss */; + productType = "com.apple.product-type.tool"; + }; + 3104AFC7156D35E2000A585A /* sacss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104AFCF156D35E2000A585A /* Build configuration list for PBXNativeTarget "sacss" */; + buildPhases = ( + 3104AFC4156D35E2000A585A /* Sources */, + 3104AFC5156D35E2000A585A /* Frameworks */, + 3104AFC6156D35E2000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104AFD3156D35F2000A585A /* PBXTargetDependency */, + ); + name = sacss; + productName = sacss; + productReference = 3104AFC8156D35E2000A585A /* sacss */; + productType = "com.apple.product-type.tool"; + }; + 3104AFDC156D3681000A585A /* amcsshe */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104AFE4156D3682000A585A /* Build configuration list for PBXNativeTarget "amcsshe" */; + buildPhases = ( + 3104AFD9156D3681000A585A /* Sources */, + 3104AFDA156D3681000A585A /* Frameworks */, + 3104AFDB156D3681000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104AFE8156D368D000A585A /* PBXTargetDependency */, + ); + name = amcsshe; + productName = amcsshe; + productReference = 3104AFDD156D3681000A585A /* amcsshe */; + productType = "com.apple.product-type.tool"; + }; + 3104B008156D38F3000A585A /* amsss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104B010156D38F3000A585A /* Build configuration list for PBXNativeTarget "amsss" */; + buildPhases = ( + 3104B005156D38F3000A585A /* Sources */, + 3104B006156D38F3000A585A /* Frameworks */, + 3104B007156D38F3000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104B014156D38FA000A585A /* PBXTargetDependency */, + ); + name = amsss; + productName = amsss; + productReference = 3104B009156D38F3000A585A /* amsss */; + productType = "com.apple.product-type.tool"; + }; + 3104B021156D39D4000A585A /* amssshe */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104B029156D39D4000A585A /* Build configuration list for PBXNativeTarget "amssshe" */; + buildPhases = ( + 3104B01E156D39D4000A585A /* Sources */, + 3104B01F156D39D4000A585A /* Frameworks */, + 3104B020156D39D4000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104B038156D3A56000A585A /* PBXTargetDependency */, + ); + name = amssshe; + productName = amssshe; + productReference = 3104B022156D39D4000A585A /* amssshe */; + productType = "com.apple.product-type.tool"; + }; + 3104B03C156D3AD7000A585A /* segsmss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3104B044156D3AD8000A585A /* Build configuration list for PBXNativeTarget "segsmss" */; + buildPhases = ( + 3104B039156D3AD7000A585A /* Sources */, + 3104B03A156D3AD7000A585A /* Frameworks */, + 3104B03B156D3AD7000A585A /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3104B048156D3ADE000A585A /* PBXTargetDependency */, + ); + name = segsmss; + productName = segsmss; + productReference = 3104B03D156D3AD7000A585A /* segsmss */; + productType = "com.apple.product-type.tool"; + }; + 31108A3A1C6B90E900E728EA /* tagtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31108A431C6B90E900E728EA /* Build configuration list for PBXNativeTarget "tagtest" */; + buildPhases = ( + 31108A3D1C6B90E900E728EA /* Sources */, + 31108A401C6B90E900E728EA /* Frameworks */, + 31108A421C6B90E900E728EA /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31108A3B1C6B90E900E728EA /* PBXTargetDependency */, + ); + name = tagtest; + productName = teletest; + productReference = 31108A471C6B90E900E728EA /* tagtest */; + productType = "com.apple.product-type.tool"; + }; + 3114A58F156E913C001E0AA3 /* locv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A599156E913C001E0AA3 /* Build configuration list for PBXNativeTarget "locv" */; + buildPhases = ( + 3114A58C156E913C001E0AA3 /* Sources */, + 3114A58D156E913C001E0AA3 /* Frameworks */, + 3114A58E156E913C001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A59E156E9156001E0AA3 /* PBXTargetDependency */, + ); + name = locv; + productName = locv; + productReference = 3114A590156E913C001E0AA3 /* locv */; + productType = "com.apple.product-type.tool"; + }; + 3114A5A6156E92C0001E0AA3 /* qs */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A5AE156E92C0001E0AA3 /* Build configuration list for PBXNativeTarget "qs" */; + buildPhases = ( + 3114A5A3156E92C0001E0AA3 /* Sources */, + 3114A5A4156E92C0001E0AA3 /* Frameworks */, + 3114A5A5156E92C0001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A5B4156E92D8001E0AA3 /* PBXTargetDependency */, + ); + name = qs; + productName = qs; + productReference = 3114A5A7156E92C0001E0AA3 /* qs */; + productType = "com.apple.product-type.tool"; + }; + 3114A5BC156E9315001E0AA3 /* finalcv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A5C4156E9315001E0AA3 /* Build configuration list for PBXNativeTarget "finalcv" */; + buildPhases = ( + 3114A5B9156E9315001E0AA3 /* Sources */, + 3114A5BA156E9315001E0AA3 /* Frameworks */, + 3114A5BB156E9315001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A5CA156E9328001E0AA3 /* PBXTargetDependency */, + ); + name = finalcv; + productName = finalcv; + productReference = 3114A5BD156E9315001E0AA3 /* finalcv */; + productType = "com.apple.product-type.tool"; + }; + 3114A5D5156E93A0001E0AA3 /* finaltest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A5DD156E93A0001E0AA3 /* Build configuration list for PBXNativeTarget "finaltest" */; + buildPhases = ( + 3114A5D2156E93A0001E0AA3 /* Sources */, + 3114A5D3156E93A0001E0AA3 /* Frameworks */, + 3114A5D4156E93A0001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A5E8156E93BF001E0AA3 /* PBXTargetDependency */, + ); + name = finaltest; + productName = finaltest; + productReference = 3114A5D6156E93A0001E0AA3 /* finaltest */; + productType = "com.apple.product-type.tool"; + }; + 3114A5EE156E93E7001E0AA3 /* arenacv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A5F6156E93E7001E0AA3 /* Build configuration list for PBXNativeTarget "arenacv" */; + buildPhases = ( + 3114A5EB156E93E7001E0AA3 /* Sources */, + 3114A5EC156E93E7001E0AA3 /* Frameworks */, + 3114A5ED156E93E7001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A5FE156E9406001E0AA3 /* PBXTargetDependency */, + ); + name = arenacv; + productName = arenacv; + productReference = 3114A5EF156E93E7001E0AA3 /* arenacv */; + productType = "com.apple.product-type.tool"; + }; + 3114A604156E9430001E0AA3 /* bttest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A60C156E9430001E0AA3 /* Build configuration list for PBXNativeTarget "bttest" */; + buildPhases = ( + 3114A601156E9430001E0AA3 /* Sources */, + 3114A602156E9430001E0AA3 /* Frameworks */, + 3114A603156E9430001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A610156E9438001E0AA3 /* PBXTargetDependency */, + ); + name = bttest; + productName = bttest; + productReference = 3114A605156E9430001E0AA3 /* bttest */; + productType = "com.apple.product-type.tool"; + }; + 3114A61B156E9485001E0AA3 /* teletest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A623156E9485001E0AA3 /* Build configuration list for PBXNativeTarget "teletest" */; + buildPhases = ( + 3114A618156E9485001E0AA3 /* Sources */, + 3114A619156E9485001E0AA3 /* Frameworks */, + 3114A61A156E9485001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A62C156E94A6001E0AA3 /* PBXTargetDependency */, + ); + name = teletest; + productName = teletest; + productReference = 3114A61C156E9485001E0AA3 /* teletest */; + productType = "com.apple.product-type.tool"; + }; + 3114A632156E94DB001E0AA3 /* abqtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A63A156E94DB001E0AA3 /* Build configuration list for PBXNativeTarget "abqtest" */; + buildPhases = ( + 3114A62F156E94DB001E0AA3 /* Sources */, + 3114A630156E94DB001E0AA3 /* Frameworks */, + 3114A631156E94DB001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A642156E94F8001E0AA3 /* PBXTargetDependency */, + ); + name = abqtest; + productName = abqtest; + productReference = 3114A633156E94DB001E0AA3 /* abqtest */; + productType = "com.apple.product-type.tool"; + }; + 3114A64B156E9596001E0AA3 /* landtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "landtest" */; + buildPhases = ( + 3114A648156E9596001E0AA3 /* Sources */, + 3114A649156E9596001E0AA3 /* Frameworks */, + 3114A64A156E9596001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A659156E95B1001E0AA3 /* PBXTargetDependency */, + ); + name = landtest; + productName = landtest; + productReference = 3114A64C156E9596001E0AA3 /* landtest */; + productType = "com.apple.product-type.tool"; + }; + 3114A661156E95D9001E0AA3 /* btcv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A669156E95D9001E0AA3 /* Build configuration list for PBXNativeTarget "btcv" */; + buildPhases = ( + 3114A65E156E95D9001E0AA3 /* Sources */, + 3114A65F156E95D9001E0AA3 /* Frameworks */, + 3114A660156E95D9001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A675156E9619001E0AA3 /* PBXTargetDependency */, + ); + name = btcv; + productName = btcv; + productReference = 3114A662156E95D9001E0AA3 /* btcv */; + productType = "com.apple.product-type.tool"; + }; + 3114A67B156E9668001E0AA3 /* mv2test */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A683156E9669001E0AA3 /* Build configuration list for PBXNativeTarget "mv2test" */; + buildPhases = ( + 3114A678156E9668001E0AA3 /* Sources */, + 3114A679156E9668001E0AA3 /* Frameworks */, + 3114A67A156E9668001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A68B156E9682001E0AA3 /* PBXTargetDependency */, + ); + name = mv2test; + productName = mv2test; + productReference = 3114A67C156E9668001E0AA3 /* mv2test */; + productType = "com.apple.product-type.tool"; + }; + 3114A694156E971B001E0AA3 /* messtest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A69C156E971B001E0AA3 /* Build configuration list for PBXNativeTarget "messtest" */; + buildPhases = ( + 3114A691156E971B001E0AA3 /* Sources */, + 3114A692156E971B001E0AA3 /* Frameworks */, + 3114A693156E971B001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A6A5156E9735001E0AA3 /* PBXTargetDependency */, + ); + name = messtest; + productName = messtest; + productReference = 3114A695156E971B001E0AA3 /* messtest */; + productType = "com.apple.product-type.tool"; + }; + 3114A6AB156E9759001E0AA3 /* walkt0 */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A6B3156E9759001E0AA3 /* Build configuration list for PBXNativeTarget "walkt0" */; + buildPhases = ( + 3114A6A8156E9759001E0AA3 /* Sources */, + 3114A6A9156E9759001E0AA3 /* Frameworks */, + 3114A6AA156E9759001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A6B7156E975E001E0AA3 /* PBXTargetDependency */, + ); + name = walkt0; + productName = walkt0; + productReference = 3114A6AC156E9759001E0AA3 /* walkt0 */; + productType = "com.apple.product-type.tool"; + }; + 3114A6C5156E9815001E0AA3 /* mpseventcnv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3114A6CD156E9815001E0AA3 /* Build configuration list for PBXNativeTarget "mpseventcnv" */; + buildPhases = ( + 3114A6C2156E9815001E0AA3 /* Sources */, + 3114A6C3156E9815001E0AA3 /* Frameworks */, + 3114A6C4156E9815001E0AA3 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 3114A6D3156E9834001E0AA3 /* PBXTargetDependency */, + ); + name = mpseventcnv; + productName = mpseventcnv; + productReference = 3114A6C6156E9815001E0AA3 /* mpseventcnv */; + productType = "com.apple.product-type.tool"; + }; + 3124CAB7156BE3EC00753214 /* awlut */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3124CABF156BE3EC00753214 /* Build configuration list for PBXNativeTarget "awlut" */; + buildPhases = ( + 3124CAB4156BE3EC00753214 /* Sources */, + 3124CAB5156BE3EC00753214 /* Frameworks */, + 3124CAB6156BE3EC00753214 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31A47BAC156C21120039B1C2 /* PBXTargetDependency */, + ); + name = awlut; + productName = awlut; + productReference = 3124CAB8156BE3EC00753214 /* awlut */; + productType = "com.apple.product-type.tool"; + }; + 3124CAD3156BE64A00753214 /* mpsicv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3124CADB156BE64A00753214 /* Build configuration list for PBXNativeTarget "mpsicv" */; + buildPhases = ( + 3124CAD0156BE64A00753214 /* Sources */, + 3124CAD1156BE64A00753214 /* Frameworks */, + 3124CAD2156BE64A00753214 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31A47BAE156C21170039B1C2 /* PBXTargetDependency */, + ); + name = mpsicv; + productName = mpsicv; + productReference = 3124CAD4156BE64A00753214 /* mpsicv */; + productType = "com.apple.product-type.tool"; + }; + 3124CAEA156BE7F300753214 /* amcss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3124CAF2156BE7F300753214 /* Build configuration list for PBXNativeTarget "amcss" */; + buildPhases = ( + 3124CAE7156BE7F300753214 /* Sources */, + 3124CAE8156BE7F300753214 /* Frameworks */, + 3124CAE9156BE7F300753214 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31A47BB0156C211B0039B1C2 /* PBXTargetDependency */, + ); + name = amcss; + productName = amcss; + productReference = 3124CAEB156BE7F300753214 /* amcss */; + productType = "com.apple.product-type.tool"; + }; + 318DA8C31892B0F30089718C /* djbench */ = { + isa = PBXNativeTarget; + buildConfigurationList = 318DA8C91892B0F30089718C /* Build configuration list for PBXNativeTarget "djbench" */; + buildPhases = ( + 318DA8C41892B0F30089718C /* Sources */, + 318DA8C71892B0F30089718C /* Frameworks */, + 318DA8C81892B0F30089718C /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = djbench; + productName = scheme; + productReference = 318DA8CD1892B0F30089718C /* djbench */; + productType = "com.apple.product-type.tool"; + }; + 319F7A042A30D08500E5B418 /* addrobj */ = { + isa = PBXNativeTarget; + buildConfigurationList = 319F7A102A30D08500E5B418 /* Build configuration list for PBXNativeTarget "addrobj" */; + buildPhases = ( + 319F7A072A30D08500E5B418 /* Sources */, + 319F7A0D2A30D08500E5B418 /* Frameworks */, + 319F7A0F2A30D08500E5B418 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 319F7A052A30D08500E5B418 /* PBXTargetDependency */, + ); + name = addrobj; + productName = finalcv; + productReference = 319F7A142A30D08500E5B418 /* addrobj */; + productType = "com.apple.product-type.tool"; + }; + 31D6000C156D3CB200337B26 /* awluthe */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D60014156D3CB200337B26 /* Build configuration list for PBXNativeTarget "awluthe" */; + buildPhases = ( + 31D60009156D3CB200337B26 /* Sources */, + 31D6000A156D3CB200337B26 /* Frameworks */, + 31D6000B156D3CB200337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D60020156D3CEC00337B26 /* PBXTargetDependency */, + ); + name = awluthe; + productName = awluthe; + productReference = 31D6000D156D3CB200337B26 /* awluthe */; + productType = "com.apple.product-type.tool"; + }; + 31D60026156D3D3E00337B26 /* lockcov */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D6002E156D3D3F00337B26 /* Build configuration list for PBXNativeTarget "lockcov" */; + buildPhases = ( + 31D60023156D3D3E00337B26 /* Sources */, + 31D60024156D3D3E00337B26 /* Frameworks */, + 31D60025156D3D3E00337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D60032156D3D5300337B26 /* PBXTargetDependency */, + ); + name = lockcov; + productName = lockcov; + productReference = 31D60027156D3D3E00337B26 /* lockcov */; + productType = "com.apple.product-type.tool"; + }; + 31D6003D156D3EC700337B26 /* poolncv */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D60045156D3EC700337B26 /* Build configuration list for PBXNativeTarget "poolncv" */; + buildPhases = ( + 31D6003A156D3EC700337B26 /* Sources */, + 31D6003B156D3EC700337B26 /* Frameworks */, + 31D6003C156D3EC700337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D6004D156D3EF000337B26 /* PBXTargetDependency */, + ); + name = poolncv; + productName = poolncv; + productReference = 31D6003E156D3EC700337B26 /* poolncv */; + productType = "com.apple.product-type.tool"; + }; + 31D60053156D3F3500337B26 /* zcoll */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D6005B156D3F3500337B26 /* Build configuration list for PBXNativeTarget "zcoll" */; + buildPhases = ( + 31D60050156D3F3500337B26 /* Sources */, + 31D60051156D3F3500337B26 /* Frameworks */, + 31D60052156D3F3500337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D60065156D3F5F00337B26 /* PBXTargetDependency */, + ); + name = zcoll; + productName = zcoll; + productReference = 31D60054156D3F3500337B26 /* zcoll */; + productType = "com.apple.product-type.tool"; + }; + 31D60070156D3FBC00337B26 /* zmess */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D60078156D3FBC00337B26 /* Build configuration list for PBXNativeTarget "zmess" */; + buildPhases = ( + 31D6006D156D3FBC00337B26 /* Sources */, + 31D6006E156D3FBC00337B26 /* Frameworks */, + 31D6006F156D3FBC00337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D60085156D3FE100337B26 /* PBXTargetDependency */, + ); + name = zmess; + productName = zmess; + productReference = 31D60071156D3FBC00337B26 /* zmess */; + productType = "com.apple.product-type.tool"; + }; + 31D6008B156D402900337B26 /* steptest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31D60093156D402900337B26 /* Build configuration list for PBXNativeTarget "steptest" */; + buildPhases = ( + 31D60088156D402900337B26 /* Sources */, + 31D60089156D402900337B26 /* Frameworks */, + 31D6008A156D402900337B26 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31D60097156D403500337B26 /* PBXTargetDependency */, + ); + name = steptest; + productName = steptest; + productReference = 31D6008C156D402900337B26 /* steptest */; + productType = "com.apple.product-type.tool"; + }; + 31EEABFA156AAF9D00714D05 /* mps */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31EEABFC156AAF9D00714D05 /* Build configuration list for PBXNativeTarget "mps" */; + buildPhases = ( + 31EEABF7156AAF9D00714D05 /* Sources */, + 31EEABF8156AAF9D00714D05 /* Frameworks */, + 31EEABF9156AAF9D00714D05 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = mps; + productName = mps; + productReference = 31EEABFB156AAF9D00714D05 /* libmps.a */; + productType = "com.apple.product-type.library.static"; + }; + 31EEAC64156AB52600714D05 /* mpmss */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31EEAC6C156AB52600714D05 /* Build configuration list for PBXNativeTarget "mpmss" */; + buildPhases = ( + 31EEAC61156AB52600714D05 /* Sources */, + 31EEAC62156AB52600714D05 /* Frameworks */, + 31EEAC63156AB52600714D05 /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + 31A47BAA156C210D0039B1C2 /* PBXTargetDependency */, + ); + name = mpmss; + productName = mpmss; + productReference = 31EEAC65156AB52600714D05 /* mpmss */; + productType = "com.apple.product-type.tool"; + }; + 31FCAE0917692403008C034C /* scheme */ = { + isa = PBXNativeTarget; + buildConfigurationList = 31FCAE1317692403008C034C /* Build configuration list for PBXNativeTarget "scheme" */; + buildPhases = ( + 31FCAE0617692403008C034C /* Sources */, + 31FCAE0717692403008C034C /* Frameworks */, + 31FCAE0817692403008C034C /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = scheme; + productName = scheme; + productReference = 31FCAE0A17692403008C034C /* scheme */; + productType = "com.apple.product-type.tool"; + }; + 6313D46718A400B200EB03EF /* gcbench */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6313D46E18A400B200EB03EF /* Build configuration list for PBXNativeTarget "gcbench" */; + buildPhases = ( + 6313D46818A400B200EB03EF /* Sources */, + 6313D46C18A400B200EB03EF /* Frameworks */, + 6313D46D18A400B200EB03EF /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = gcbench; + productName = scheme; + productReference = 6313D47218A400B200EB03EF /* gcbench */; + productType = "com.apple.product-type.tool"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 31EEABDA156AAE9E00714D05 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 1000; + }; + buildConfigurationList = 31EEABDD156AAE9E00714D05 /* Build configuration list for PBXProject "mps" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + English, + en, + ); + mainGroup = 31EEABD8156AAE9E00714D05; + productRefGroup = 31EEABEF156AAF5C00714D05 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 3104AFF1156D37A0000A585A /* all */, + 2215A9B9192A47CE00E9E2CE /* testall */, + 2215A9B1192A47C500E9E2CE /* testansi */, + 2215A9A9192A47BB00E9E2CE /* testci */, + 2215A9C1192A47D500E9E2CE /* testpollnone */, + 22CDE8EF16E9E97D00366D0A /* testrun */, + 31EEABFA156AAF9D00714D05 /* mps */, + 3114A632156E94DB001E0AA3 /* abqtest */, + 22FACEE018880983000FDBC1 /* airtest */, + 3124CAEA156BE7F300753214 /* amcss */, + 3104AFDC156D3681000A585A /* amcsshe */, + 22FA176416E8D6FC0098B23F /* amcssth */, + 3104B008156D38F3000A585A /* amsss */, + 3104B021156D39D4000A585A /* amssshe */, + 3104AFB2156D357B000A585A /* apss */, + 3114A5EE156E93E7001E0AA3 /* arenacv */, + 3124CAB7156BE3EC00753214 /* awlut */, + 31D6000C156D3CB200337B26 /* awluthe */, + 2291A5AC175CAB2F001D4920 /* awlutth */, + 3114A661156E95D9001E0AA3 /* btcv */, + 3114A604156E9430001E0AA3 /* bttest */, + 318DA8C31892B0F30089718C /* djbench */, + 3114A5BC156E9315001E0AA3 /* finalcv */, + 3114A5D5156E93A0001E0AA3 /* finaltest */, + 22EA3F3820D2B0D90065F5B6 /* forktest */, + 224CC78C175E1821002FF81B /* fotest */, + 6313D46718A400B200EB03EF /* gcbench */, + 3114A64B156E9596001E0AA3 /* landtest */, + 2231BB4C18CA97D8002D6322 /* locbwcss */, + 31D60026156D3D3E00337B26 /* lockcov */, + 2231BB5A18CA97DC002D6322 /* locusss */, + 22F846B018F437B900982BA7 /* lockut */, + 3114A58F156E913C001E0AA3 /* locv */, + 3114A694156E971B001E0AA3 /* messtest */, + 31EEAC64156AB52600714D05 /* mpmss */, + 3124CAD3156BE64A00753214 /* mpsicv */, + 3114A67B156E9668001E0AA3 /* mv2test */, + 22C2ACA218BE400A006B3677 /* nailboardtest */, + 31D6003D156D3EC700337B26 /* poolncv */, + 3114A5A6156E92C0001E0AA3 /* qs */, + 3104AFC7156D35E2000A585A /* sacss */, + 3104B03C156D3AD7000A585A /* segsmss */, + 223E795819EAB00B00DC26A6 /* sncss */, + 31D6008B156D402900337B26 /* steptest */, + 3114A61B156E9485001E0AA3 /* teletest */, + 3114A6AB156E9759001E0AA3 /* walkt0 */, + 31D60053156D3F3500337B26 /* zcoll */, + 31D60070156D3FBC00337B26 /* zmess */, + 220FD3D8195339C000967A35 /* ztfm */, + 3114A6C5156E9815001E0AA3 /* mpseventcnv */, + 2265D71120E53F9C003019E8 /* mpseventpy */, + 2D07B9701636FC9900DB751B /* mpseventsql */, + 2D604B9B16514B1A003AAF46 /* mpseventtxt */, + 31FCAE0917692403008C034C /* scheme */, + 22B2BC2C18B6434F00C33E63 /* scheme-advanced */, + 31108A3A1C6B90E900E728EA /* tagtest */, + 319F7A042A30D08500E5B418 /* addrobj */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 2215A9AC192A47BB00E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh -s \"$TARGET_NAME\" \"$TARGET_BUILD_DIR\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9B4192A47C500E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh -s \"$TARGET_NAME\" \"$TARGET_BUILD_DIR\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9BC192A47CE00E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh -s \"$TARGET_NAME\" \"$TARGET_BUILD_DIR\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9C4192A47D500E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh -s \"$TARGET_NAME\" \"$TARGET_BUILD_DIR\"\n"; + showEnvVarsInLog = 0; + }; + 22CDE8F416E9E9D400366D0A /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh -s \"$TARGET_NAME\" \"$TARGET_BUILD_DIR\"\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 220FD3DB195339C000967A35 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 220FD3F219533E7900967A35 /* ztfm.c in Sources */, + 220FD3DD195339C000967A35 /* fmtdy.c in Sources */, + 220FD3DE195339C000967A35 /* fmtdytst.c in Sources */, + 220FD3DF195339C000967A35 /* fmthe.c in Sources */, + 220FD3F119533E7200967A35 /* fmtno.c in Sources */, + 220FD3E1195339C000967A35 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2231BB4F18CA97D8002D6322 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2231BB6B18CA9861002D6322 /* locbwcss.c in Sources */, + 2231BB5118CA97D8002D6322 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2231BB5D18CA97DC002D6322 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2231BB6A18CA984F002D6322 /* locusss.c in Sources */, + 2231BB5F18CA97DC002D6322 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 223E795B19EAB00B00DC26A6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 223E796719EAB05C00DC26A6 /* sncss.c in Sources */, + 223E795D19EAB00B00DC26A6 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 224CC78F175E1821002FF81B /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 224CC7A0175E322C002FF81B /* fotest.c in Sources */, + 224CC791175E1821002FF81B /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2265D71420E53F9C003019E8 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2265D72020E54010003019E8 /* eventpy.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2291A5AF175CAB2F001D4920 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2291A5BE175CAB4E001D4920 /* awlutth.c in Sources */, + 2291A5B1175CAB2F001D4920 /* fmtdy.c in Sources */, + 2291A5B2175CAB2F001D4920 /* fmtdytst.c in Sources */, + 2291A5B3175CAB2F001D4920 /* fmthe.c in Sources */, + 2291A5B4175CAB2F001D4920 /* fmtno.c in Sources */, + 2291A5B5175CAB2F001D4920 /* testlib.c in Sources */, + 22561A9918F4266600372C66 /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22B2BC2D18B6434F00C33E63 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22B2BC2E18B6434F00C33E63 /* mps.c in Sources */, + 22B2BC3718B6437C00C33E63 /* scheme-advanced.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22C2ACA518BE400A006B3677 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22C2ACB018BE4049006B3677 /* nailboardtest.c in Sources */, + 22C2ACA718BE400A006B3677 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22EA3F3B20D2B0D90065F5B6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22EA3F4620D2B0FD0065F5B6 /* forktest.c in Sources */, + 22EA3F3D20D2B0D90065F5B6 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22F846B318F437B900982BA7 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22F846BE18F437D700982BA7 /* lockut.c in Sources */, + 22F846B518F437B900982BA7 /* testlib.c in Sources */, + 22F846BF18F437E000982BA7 /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22FA176716E8D6FC0098B23F /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22FA177716E8D7A80098B23F /* amcssth.c in Sources */, + 22FA176916E8D6FC0098B23F /* fmtdy.c in Sources */, + 22FA176A16E8D6FC0098B23F /* fmtdytst.c in Sources */, + 22FA176B16E8D6FC0098B23F /* fmthe.c in Sources */, + 22FA176C16E8D6FC0098B23F /* fmtno.c in Sources */, + 22FA176D16E8D6FC0098B23F /* testlib.c in Sources */, + 22561A9818F4265D00372C66 /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 22FACEE318880983000FDBC1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 22FACEEF188809A7000FDBC1 /* airtest.c in Sources */, + 22FACEEE188809A3000FDBC1 /* fmtscheme.c in Sources */, + 22FACEE518880983000FDBC1 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2D07B96D1636FC9900DB751B /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2D07B97A1636FCCE00DB751B /* eventsql.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 2D604B9816514B1A003AAF46 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2D604BA516514C4F003AAF46 /* eventtxt.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFAF156D357B000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFBF156D3591000A585A /* apss.c in Sources */, + 3104AFC3156D35C3000A585A /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFC4156D35E2000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFD5156D35FB000A585A /* testlib.c in Sources */, + 3104AFD8156D3607000A585A /* sacss.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104AFD9156D3681000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104AFEA156D3697000A585A /* testlib.c in Sources */, + 3104AFEC156D36A5000A585A /* amcsshe.c in Sources */, + 3104AFED156D374A000A585A /* fmthe.c in Sources */, + 3104AFEE156D374D000A585A /* fmtno.c in Sources */, + 3104AFEF156D3753000A585A /* fmtdy.c in Sources */, + 3104AFF0156D3756000A585A /* fmtdytst.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B005156D38F3000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104B016156D390B000A585A /* amsss.c in Sources */, + 3104B017156D3915000A585A /* testlib.c in Sources */, + 3104B019156D3960000A585A /* fmtdy.c in Sources */, + 3104B01A156D396E000A585A /* fmtdytst.c in Sources */, + 3104B01B156D3973000A585A /* fmtno.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B01E156D39D4000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104B031156D39FD000A585A /* fmthe.c in Sources */, + 3104B032156D3A00000A585A /* fmtdytst.c in Sources */, + 3104B033156D3A05000A585A /* testlib.c in Sources */, + 3104B034156D3A2C000A585A /* amssshe.c in Sources */, + 3104B035156D3A39000A585A /* fmtdy.c in Sources */, + 3104B036156D3A49000A585A /* fmtno.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3104B039156D3AD7000A585A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3104B04E156D3AFE000A585A /* testlib.c in Sources */, + 3104B04F156D3B09000A585A /* fmtdy.c in Sources */, + 3104B050156D3B09000A585A /* fmtdytst.c in Sources */, + 3104B051156D3B09000A585A /* fmtno.c in Sources */, + 31D60007156D3C6200337B26 /* segsmss.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31108A3D1C6B90E900E728EA /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31108A3E1C6B90E900E728EA /* testlib.c in Sources */, + 31108A481C6B911B00E728EA /* tagtest.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A58C156E913C001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A59C156E914F001E0AA3 /* testlib.c in Sources */, + 3114A5A2156E9168001E0AA3 /* locv.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5A3156E92C0001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5B2156E92CB001E0AA3 /* testlib.c in Sources */, + 3114A5B8156E92F1001E0AA3 /* qs.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5B9156E9315001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5C7156E9322001E0AA3 /* testlib.c in Sources */, + 3114A5CE156E9369001E0AA3 /* finalcv.c in Sources */, + 3114A5CF156E9381001E0AA3 /* fmtdy.c in Sources */, + 3114A5D0156E9381001E0AA3 /* fmtdytst.c in Sources */, + 3114A5D1156E9381001E0AA3 /* fmtno.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5D2156E93A0001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5E0156E93AE001E0AA3 /* fmtdy.c in Sources */, + 3114A5E1156E93AE001E0AA3 /* fmtdytst.c in Sources */, + 3114A5E2156E93AE001E0AA3 /* fmtno.c in Sources */, + 3114A5E3156E93AE001E0AA3 /* testlib.c in Sources */, + 3114A5E6156E93B9001E0AA3 /* finaltest.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A5EB156E93E7001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A5F9156E93F3001E0AA3 /* testlib.c in Sources */, + 3114A5FC156E93FC001E0AA3 /* arenacv.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A601156E9430001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A615156E944E001E0AA3 /* bttest.c in Sources */, + 3114A616156E9455001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A618156E9485001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A627156E9490001E0AA3 /* testlib.c in Sources */, + 3114A62A156E949E001E0AA3 /* teletest.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A62F156E94DB001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A63E156E94EA001E0AA3 /* abqtest.c in Sources */, + 3114A63F156E94F0001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A648156E9596001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2291A5ED175CB5E2001D4920 /* landtest.c in Sources */, + 3114A672156E95F6001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A65E156E95D9001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A66E156E95F2001E0AA3 /* btcv.c in Sources */, + 3114A66F156E95F2001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A678156E9668001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 224CC79F175E321C002FF81B /* mv2test.c in Sources */, + 3114A688156E967C001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A691156E971B001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6A1156E9729001E0AA3 /* messtest.c in Sources */, + 3114A6A2156E972D001E0AA3 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A6A8156E9759001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6BC156E976C001E0AA3 /* walkt0.c in Sources */, + 3114A6BD156E9771001E0AA3 /* testlib.c in Sources */, + 3114A6BF156E97B8001E0AA3 /* fmtdy.c in Sources */, + 3114A6C0156E97B8001E0AA3 /* fmtdytst.c in Sources */, + 3114A6C1156E97B8001E0AA3 /* fmtno.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3114A6C2156E9815001E0AA3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3114A6D1156E9829001E0AA3 /* eventcnv.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAB4156BE3EC00753214 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CAC3156BE40100753214 /* awlut.c in Sources */, + 3124CAC5156BE41700753214 /* testlib.c in Sources */, + 3124CAC8156BE48D00753214 /* fmtdy.c in Sources */, + 3124CAC9156BE48D00753214 /* fmtdytst.c in Sources */, + 3124CACD156BE4C200753214 /* fmtno.c in Sources */, + 311A44F81C8B1EBD00852E2B /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAD0156BE64A00753214 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CADF156BE65900753214 /* mpsicv.c in Sources */, + 3124CAE0156BE66B00753214 /* testlib.c in Sources */, + 3124CAE2156BE68E00753214 /* fmtdy.c in Sources */, + 3124CAE3156BE69B00753214 /* fmtno.c in Sources */, + 3124CAE5156BE6D500753214 /* fmthe.c in Sources */, + 3124CAE6156BE6F700753214 /* fmtdytst.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 3124CAE7156BE7F300753214 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3124CAF6156BE81100753214 /* amcss.c in Sources */, + 3124CAF7156BE82000753214 /* fmtdy.c in Sources */, + 3124CAF8156BE82000753214 /* fmtdytst.c in Sources */, + 3124CAF9156BE82000753214 /* fmthe.c in Sources */, + 3124CAFA156BE82000753214 /* fmtno.c in Sources */, + 3124CAFB156BE82000753214 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 318DA8C41892B0F30089718C /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 318DA8D31892B27E0089718C /* testlib.c in Sources */, + 6313D47318A4028E00EB03EF /* djbench.c in Sources */, + 22561A9A18F426BB00372C66 /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 319F7A072A30D08500E5B418 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 319F7A172A30D11400E5B418 /* addrobj.c in Sources */, + 319F7A082A30D08500E5B418 /* testlib.c in Sources */, + 319F7A0A2A30D08500E5B418 /* fmtdy.c in Sources */, + 319F7A0B2A30D08500E5B418 /* fmtdytst.c in Sources */, + 319F7A0C2A30D08500E5B418 /* fmtno.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60009156D3CB200337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 311A44F91C8B1EC200852E2B /* testthrix.c in Sources */, + 31D60018156D3CC300337B26 /* awluthe.c in Sources */, + 31D6001A156D3CDC00337B26 /* fmtdy.c in Sources */, + 31D6001B156D3CDC00337B26 /* fmtdytst.c in Sources */, + 31D6001C156D3CDC00337B26 /* fmthe.c in Sources */, + 31D6001D156D3CDC00337B26 /* fmtno.c in Sources */, + 31D6001E156D3CDF00337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60023156D3D3E00337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D60038156D3E3000337B26 /* lockcov.c in Sources */, + 31D60039156D3E3E00337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D6003A156D3EC700337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 2215A9C9192A495F00E9E2CE /* pooln.c in Sources */, + 31D6004B156D3EE600337B26 /* poolncv.c in Sources */, + 31D60048156D3ECF00337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60050156D3F3500337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D6005F156D3F4A00337B26 /* zcoll.c in Sources */, + 31D60069156D3F7200337B26 /* fmtdy.c in Sources */, + 31D6006A156D3F7200337B26 /* fmtdytst.c in Sources */, + 31D6006B156D3F7200337B26 /* fmtno.c in Sources */, + 31D6006C156D3F7200337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D6006D156D3FBC00337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D6007D156D3FCF00337B26 /* zmess.c in Sources */, + 31D6007E156D3FD700337B26 /* fmtdy.c in Sources */, + 31D6007F156D3FD700337B26 /* fmtdytst.c in Sources */, + 31D60080156D3FD700337B26 /* fmthe.c in Sources */, + 31D60081156D3FD700337B26 /* fmtno.c in Sources */, + 31D60082156D3FD700337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31D60088156D402900337B26 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31D6009A156D404000337B26 /* steptest.c in Sources */, + 31D6009E156D406400337B26 /* fmtdy.c in Sources */, + 31D6009F156D406400337B26 /* fmtdytst.c in Sources */, + 31D600A0156D406400337B26 /* fmtno.c in Sources */, + 31D600A1156D406400337B26 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31EEABF7156AAF9D00714D05 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31A47BA4156C1E130039B1C2 /* mps.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31EEAC61156AB52600714D05 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31EEAC75156AB58E00714D05 /* mpmss.c in Sources */, + 31EEAC9F156AB73400714D05 /* testlib.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 31FCAE0617692403008C034C /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 31FCAE161769244F008C034C /* mps.c in Sources */, + 31FCAE19176924D4008C034C /* scheme.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6313D46818A400B200EB03EF /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6313D47718A40D0400EB03EF /* fmtno.c in Sources */, + 6313D46918A400B200EB03EF /* testlib.c in Sources */, + 6313D47418A4029200EB03EF /* gcbench.c in Sources */, + 6313D47518A40C6300EB03EF /* fmtdytst.c in Sources */, + 6313D47618A40C7B00EB03EF /* fmtdy.c in Sources */, + 22561A9B18F426F300372C66 /* testthrix.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 220FD3D9195339C000967A35 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 220FD3DA195339C000967A35 /* PBXContainerItemProxy */; + }; + 220FD3F419533E8F00967A35 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 220FD3D8195339C000967A35 /* ztfm */; + targetProxy = 220FD3F319533E8F00967A35 /* PBXContainerItemProxy */; + }; + 2215A9AA192A47BB00E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9AB192A47BB00E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9B2192A47C500E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9B3192A47C500E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9BA192A47CE00E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9BB192A47CE00E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9C2192A47D500E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9C3192A47D500E9E2CE /* PBXContainerItemProxy */; + }; + 2231BB4D18CA97D8002D6322 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 2231BB4E18CA97D8002D6322 /* PBXContainerItemProxy */; + }; + 2231BB5B18CA97DC002D6322 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 2231BB5C18CA97DC002D6322 /* PBXContainerItemProxy */; + }; + 2231BB6D18CA986B002D6322 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2231BB4C18CA97D8002D6322 /* locbwcss */; + targetProxy = 2231BB6C18CA986B002D6322 /* PBXContainerItemProxy */; + }; + 2231BB6F18CA986D002D6322 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2231BB5A18CA97DC002D6322 /* locusss */; + targetProxy = 2231BB6E18CA986D002D6322 /* PBXContainerItemProxy */; + }; + 223E795919EAB00B00DC26A6 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 223E795A19EAB00B00DC26A6 /* PBXContainerItemProxy */; + }; + 224CC78D175E1821002FF81B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 224CC78E175E1821002FF81B /* PBXContainerItemProxy */; + }; + 224CC79D175E187C002FF81B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 224CC78C175E1821002FF81B /* fotest */; + targetProxy = 224CC79C175E187C002FF81B /* PBXContainerItemProxy */; + }; + 2265D71220E53F9C003019E8 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 2265D71320E53F9C003019E8 /* PBXContainerItemProxy */; + }; + 2265D72220E54020003019E8 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2265D71120E53F9C003019E8 /* mpseventpy */; + targetProxy = 2265D72120E54020003019E8 /* PBXContainerItemProxy */; + }; + 2275798916C5422900B662B0 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2D604B9B16514B1A003AAF46 /* mpseventtxt */; + targetProxy = 2275798816C5422900B662B0 /* PBXContainerItemProxy */; + }; + 2286E4C918F4389E004111E2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22F846B018F437B900982BA7 /* lockut */; + targetProxy = 2286E4C818F4389E004111E2 /* PBXContainerItemProxy */; + }; + 2291A5AD175CAB2F001D4920 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 2291A5AE175CAB2F001D4920 /* PBXContainerItemProxy */; + }; + 2291A5C0175CAB5F001D4920 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2291A5AC175CAB2F001D4920 /* awlutth */; + targetProxy = 2291A5BF175CAB5F001D4920 /* PBXContainerItemProxy */; + }; + 229E228819EAB10D00E21417 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 223E795819EAB00B00DC26A6 /* sncss */; + targetProxy = 229E228719EAB10D00E21417 /* PBXContainerItemProxy */; + }; + 22B2BC3918B643AD00C33E63 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31FCAE0917692403008C034C /* scheme */; + targetProxy = 22B2BC3818B643AD00C33E63 /* PBXContainerItemProxy */; + }; + 22B2BC3B18B643B000C33E63 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22B2BC2C18B6434F00C33E63 /* scheme-advanced */; + targetProxy = 22B2BC3A18B643B000C33E63 /* PBXContainerItemProxy */; + }; + 22B2BC3D18B643B300C33E63 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 318DA8C31892B0F30089718C /* djbench */; + targetProxy = 22B2BC3C18B643B300C33E63 /* PBXContainerItemProxy */; + }; + 22B2BC3F18B643B700C33E63 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6313D46718A400B200EB03EF /* gcbench */; + targetProxy = 22B2BC3E18B643B700C33E63 /* PBXContainerItemProxy */; + }; + 22C2ACA318BE400A006B3677 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 22C2ACA418BE400A006B3677 /* PBXContainerItemProxy */; + }; + 22C2ACB218BE4056006B3677 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22C2ACA218BE400A006B3677 /* nailboardtest */; + targetProxy = 22C2ACB118BE4056006B3677 /* PBXContainerItemProxy */; + }; + 22CDE92E16E9EB9300366D0A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 22CDE92D16E9EB9300366D0A /* PBXContainerItemProxy */; + }; + 22EA3F3920D2B0D90065F5B6 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 22EA3F3A20D2B0D90065F5B6 /* PBXContainerItemProxy */; + }; + 22EA3F4820D2B23F0065F5B6 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22EA3F3820D2B0D90065F5B6 /* forktest */; + targetProxy = 22EA3F4720D2B23F0065F5B6 /* PBXContainerItemProxy */; + }; + 22F846B118F437B900982BA7 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 22F846B218F437B900982BA7 /* PBXContainerItemProxy */; + }; + 22FA176516E8D6FC0098B23F /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 22FA176616E8D6FC0098B23F /* PBXContainerItemProxy */; + }; + 22FA177916E8DB0C0098B23F /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22FA176416E8D6FC0098B23F /* amcssth */; + targetProxy = 22FA177816E8DB0C0098B23F /* PBXContainerItemProxy */; + }; + 22FACEE118880983000FDBC1 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 22FACEE218880983000FDBC1 /* PBXContainerItemProxy */; + }; + 22FACEF1188809B5000FDBC1 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 22FACEE018880983000FDBC1 /* airtest */; + targetProxy = 22FACEF0188809B5000FDBC1 /* PBXContainerItemProxy */; + }; + 2D07B9791636FCBD00DB751B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 2D07B9701636FC9900DB751B /* mpseventsql */; + targetProxy = 2D07B9781636FCBD00DB751B /* PBXContainerItemProxy */; + }; + 3104AFC1156D35AE000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104AFC0156D35AE000A585A /* PBXContainerItemProxy */; + }; + 3104AFD3156D35F2000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104AFD2156D35F2000A585A /* PBXContainerItemProxy */; + }; + 3104AFE8156D368D000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104AFE7156D368D000A585A /* PBXContainerItemProxy */; + }; + 3104AFF6156D37BC000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104AFF5156D37BC000A585A /* PBXContainerItemProxy */; + }; + 3104AFF8156D37BE000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3124CAEA156BE7F300753214 /* amcss */; + targetProxy = 3104AFF7156D37BE000A585A /* PBXContainerItemProxy */; + }; + 3104AFFA156D37C1000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFB2156D357B000A585A /* apss */; + targetProxy = 3104AFF9156D37C1000A585A /* PBXContainerItemProxy */; + }; + 3104AFFC156D37C3000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3124CAB7156BE3EC00753214 /* awlut */; + targetProxy = 3104AFFB156D37C3000A585A /* PBXContainerItemProxy */; + }; + 3104AFFE156D37C6000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEAC64156AB52600714D05 /* mpmss */; + targetProxy = 3104AFFD156D37C6000A585A /* PBXContainerItemProxy */; + }; + 3104B000156D37C8000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3124CAD3156BE64A00753214 /* mpsicv */; + targetProxy = 3104AFFF156D37C8000A585A /* PBXContainerItemProxy */; + }; + 3104B002156D37CB000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFC7156D35E2000A585A /* sacss */; + targetProxy = 3104B001156D37CB000A585A /* PBXContainerItemProxy */; + }; + 3104B004156D37CD000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFDC156D3681000A585A /* amcsshe */; + targetProxy = 3104B003156D37CD000A585A /* PBXContainerItemProxy */; + }; + 3104B014156D38FA000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104B013156D38FA000A585A /* PBXContainerItemProxy */; + }; + 3104B01D156D398B000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104B008156D38F3000A585A /* amsss */; + targetProxy = 3104B01C156D398B000A585A /* PBXContainerItemProxy */; + }; + 3104B02D156D39DF000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104B021156D39D4000A585A /* amssshe */; + targetProxy = 3104B02C156D39DF000A585A /* PBXContainerItemProxy */; + }; + 3104B038156D3A56000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104B037156D3A56000A585A /* PBXContainerItemProxy */; + }; + 3104B048156D3ADE000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3104B047156D3ADE000A585A /* PBXContainerItemProxy */; + }; + 3104B04A156D3AE4000A585A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104B03C156D3AD7000A585A /* segsmss */; + targetProxy = 3104B049156D3AE4000A585A /* PBXContainerItemProxy */; + }; + 31108A3B1C6B90E900E728EA /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31108A3C1C6B90E900E728EA /* PBXContainerItemProxy */; + }; + 3114A59E156E9156001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A59D156E9156001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5A0156E915A001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A58F156E913C001E0AA3 /* locv */; + targetProxy = 3114A59F156E915A001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5B4156E92D8001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A5B3156E92D8001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5B6156E92DC001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A5A6156E92C0001E0AA3 /* qs */; + targetProxy = 3114A5B5156E92DC001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5CA156E9328001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A5C9156E9328001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5CC156E932C001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A5BC156E9315001E0AA3 /* finalcv */; + targetProxy = 3114A5CB156E932C001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5E8156E93BF001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A5E7156E93BF001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5EA156E93C4001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A5D5156E93A0001E0AA3 /* finaltest */; + targetProxy = 3114A5E9156E93C4001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A5FE156E9406001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A5FD156E9406001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A600156E940A001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A5EE156E93E7001E0AA3 /* arenacv */; + targetProxy = 3114A5FF156E940A001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A610156E9438001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A60F156E9438001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A612156E943B001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A604156E9430001E0AA3 /* bttest */; + targetProxy = 3114A611156E943B001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A62C156E94A6001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A62B156E94A6001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A62E156E94AA001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A61B156E9485001E0AA3 /* teletest */; + targetProxy = 3114A62D156E94AA001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A642156E94F8001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A641156E94F8001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A644156E94FB001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A632156E94DB001E0AA3 /* abqtest */; + targetProxy = 3114A643156E94FB001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A659156E95B1001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A658156E95B1001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A65B156E95B4001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A64B156E9596001E0AA3 /* landtest */; + targetProxy = 3114A65A156E95B4001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A675156E9619001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A674156E9619001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A677156E961C001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A661156E95D9001E0AA3 /* btcv */; + targetProxy = 3114A676156E961C001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A68B156E9682001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A68A156E9682001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A68D156E9686001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A67B156E9668001E0AA3 /* mv2test */; + targetProxy = 3114A68C156E9686001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6A5156E9735001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A6A4156E9735001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6A7156E9739001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A694156E971B001E0AA3 /* messtest */; + targetProxy = 3114A6A6156E9739001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6B7156E975E001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A6B6156E975E001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6B9156E9763001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A6AB156E9759001E0AA3 /* walkt0 */; + targetProxy = 3114A6B8156E9763001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6D3156E9834001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 3114A6D2156E9834001E0AA3 /* PBXContainerItemProxy */; + }; + 3114A6D5156E9839001E0AA3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3114A6C5156E9815001E0AA3 /* mpseventcnv */; + targetProxy = 3114A6D4156E9839001E0AA3 /* PBXContainerItemProxy */; + }; + 314CB6EB1C6D272A0073CA42 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31108A3A1C6B90E900E728EA /* tagtest */; + targetProxy = 314CB6EA1C6D272A0073CA42 /* PBXContainerItemProxy */; + }; + 319F7A052A30D08500E5B418 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 319F7A062A30D08500E5B418 /* PBXContainerItemProxy */; + }; + 319F7A192A30D2F000E5B418 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 319F7A042A30D08500E5B418 /* addrobj */; + targetProxy = 319F7A182A30D2F000E5B418 /* PBXContainerItemProxy */; + }; + 31A47BAA156C210D0039B1C2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31A47BA9156C210D0039B1C2 /* PBXContainerItemProxy */; + }; + 31A47BAC156C21120039B1C2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31A47BAB156C21120039B1C2 /* PBXContainerItemProxy */; + }; + 31A47BAE156C21170039B1C2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31A47BAD156C21170039B1C2 /* PBXContainerItemProxy */; + }; + 31A47BB0156C211B0039B1C2 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31A47BAF156C211B0039B1C2 /* PBXContainerItemProxy */; + }; + 31D60020156D3CEC00337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D6001F156D3CEC00337B26 /* PBXContainerItemProxy */; + }; + 31D60022156D3CF200337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D6000C156D3CB200337B26 /* awluthe */; + targetProxy = 31D60021156D3CF200337B26 /* PBXContainerItemProxy */; + }; + 31D60032156D3D5300337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D60031156D3D5300337B26 /* PBXContainerItemProxy */; + }; + 31D60034156D3D5A00337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D60026156D3D3E00337B26 /* lockcov */; + targetProxy = 31D60033156D3D5A00337B26 /* PBXContainerItemProxy */; + }; + 31D6004D156D3EF000337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D6004C156D3EF000337B26 /* PBXContainerItemProxy */; + }; + 31D6004F156D3EF700337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D6003D156D3EC700337B26 /* poolncv */; + targetProxy = 31D6004E156D3EF700337B26 /* PBXContainerItemProxy */; + }; + 31D60063156D3F5C00337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D60053156D3F3500337B26 /* zcoll */; + targetProxy = 31D60062156D3F5C00337B26 /* PBXContainerItemProxy */; + }; + 31D60065156D3F5F00337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D60064156D3F5F00337B26 /* PBXContainerItemProxy */; + }; + 31D60085156D3FE100337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D60084156D3FE100337B26 /* PBXContainerItemProxy */; + }; + 31D60087156D3FE600337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D60070156D3FBC00337B26 /* zmess */; + targetProxy = 31D60086156D3FE600337B26 /* PBXContainerItemProxy */; + }; + 31D60097156D403500337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31EEABFA156AAF9D00714D05 /* mps */; + targetProxy = 31D60096156D403500337B26 /* PBXContainerItemProxy */; + }; + 31D6009D156D404B00337B26 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 31D6008B156D402900337B26 /* steptest */; + targetProxy = 31D6009C156D404B00337B26 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 220FD3E6195339C000967A35 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 220FD3E7195339C000967A35 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 220FD3E8195339C000967A35 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2215A9AE192A47BB00E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2215A9AF192A47BB00E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2215A9B0192A47BB00E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2215A9B6192A47C500E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2215A9B7192A47C500E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2215A9B8192A47C500E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2215A9BE192A47CE00E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2215A9BF192A47CE00E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2215A9C0192A47CE00E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2215A9C6192A47D500E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2215A9C7192A47D500E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2215A9C8192A47D500E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2231BB5618CA97D8002D6322 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2231BB5718CA97D8002D6322 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2231BB5818CA97D8002D6322 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2231BB6418CA97DC002D6322 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2231BB6518CA97DC002D6322 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2231BB6618CA97DC002D6322 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 223E796219EAB00B00DC26A6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 223E796319EAB00B00DC26A6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 223E796419EAB00B00DC26A6 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 224CC796175E1821002FF81B /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 224CC797175E1821002FF81B /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2265D71A20E53F9C003019E8 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2265D71B20E53F9C003019E8 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2265D71C20E53F9C003019E8 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 2291A5BA175CAB2F001D4920 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2291A5BB175CAB2F001D4920 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22B2BC3318B6434F00C33E63 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22B2BC3418B6434F00C33E63 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22B2BC3518B6434F00C33E63 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 22C2ACA118BE3FEC006B3677 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 22C2ACAC18BE400A006B3677 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22C2ACAD18BE400A006B3677 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22C2ACAE18BE400A006B3677 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 22CDE8F116E9E97E00366D0A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22CDE8F216E9E97E00366D0A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22EA3F4220D2B0D90065F5B6 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22EA3F4320D2B0D90065F5B6 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22EA3F4420D2B0D90065F5B6 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 22F846BA18F437B900982BA7 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22F846BB18F437B900982BA7 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22F846BC18F437B900982BA7 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 22FA177216E8D6FC0098B23F /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22FA177316E8D6FC0098B23F /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 22FACEEA18880983000FDBC1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 22FACEEB18880983000FDBC1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2D07B9751636FC9900DB751B /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_C_LANGUAGE_STANDARD = c99; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2D07B9761636FC9900DB751B /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_C_LANGUAGE_STANDARD = c99; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 2D604B9F16514B1A003AAF46 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 2D604BA016514B1A003AAF46 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104AFBA156D357B000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104AFBB156D357B000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104AFD0156D35E2000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104AFD1156D35E2000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104AFE5156D3682000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104AFE6156D3682000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104AFF3156D37A0000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104AFF4156D37A0000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104B011156D38F3000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104B012156D38F3000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104B02A156D39D4000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104B02B156D39D4000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3104B045156D3AD8000A585A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3104B046156D3AD8000A585A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31108A441C6B90E900E728EA /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31108A451C6B90E900E728EA /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31108A461C6B90E900E728EA /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 3114A597156E913C001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A598156E913C001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A5AF156E92C0001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A5B0156E92C0001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A5C5156E9315001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A5C6156E9315001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A5DE156E93A0001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A5DF156E93A0001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A5F7156E93E7001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A5F8156E93E7001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A60D156E9430001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A60E156E9430001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A624156E9485001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A625156E9485001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A63B156E94DB001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A63C156E94DB001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A654156E9596001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A655156E9596001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A66A156E95D9001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A66B156E95D9001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A684156E9669001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A685156E9669001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A69D156E971B001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A69E156E971B001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A6B4156E9759001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A6B5156E9759001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3114A6CE156E9815001E0AA3 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3114A6CF156E9815001E0AA3 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3124CAC0156BE3EC00753214 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3124CAC1156BE3EC00753214 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3124CADC156BE64A00753214 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3124CADD156BE64A00753214 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 3124CAF3156BE7F300753214 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 3124CAF4156BE7F300753214 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 318DA8CA1892B0F30089718C /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 318DA8CB1892B0F30089718C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 318DA8D41892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = c89; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = s; + GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_RASH; + GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES; + GCC_TREAT_INCOMPATIBLE_POINTER_TYPE_WARNINGS_AS_ERRORS = YES; + GCC_TREAT_WARNINGS_AS_ERRORS = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES; + GCC_WARN_PEDANTIC = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_SIGN_COMPARE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNKNOWN_PRAGMAS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_LABEL = YES; + GCC_WARN_UNUSED_PARAMETER = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + SDKROOT = macosx; + SYMROOT = xc; + WARNING_CFLAGS = ( + "-pedantic", + "-Waggregate-return", + "-Wall", + "-Wcast-qual", + "-Wconversion", + "-Wduplicate-enum", + "-Wextra", + "-Winline", + "-Wmissing-prototypes", + "-Wmissing-variable-declarations", + "-Wnested-externs", + "-Wpointer-arith", + "-Wshadow", + "-Wstrict-aliasing=2", + "-Wstrict-prototypes", + "-Wunreachable-code", + "-Wwrite-strings", + ); + }; + name = RASH; + }; + 318DA8D51892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8D61892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + EXECUTABLE_PREFIX = lib; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8D71892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8D81892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8D91892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DA1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DB1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DC1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DD1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DE1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8DF1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E01892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E11892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E21892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E31892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E41892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E71892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E81892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8E91892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8EA1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8EB1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8EC1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8ED1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8EE1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8EF1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F01892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F11892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F21892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F31892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F41892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F51892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F61892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F71892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F81892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8F91892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8FA1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_C_LANGUAGE_STANDARD = c99; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8FB1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8FC1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8FD1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 318DA8FE1892C0D00089718C /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 319F7A112A30D08500E5B418 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 319F7A122A30D08500E5B418 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 319F7A132A30D08500E5B418 /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; + 31D60015156D3CB200337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D60016156D3CB200337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31D6002F156D3D3F00337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D60030156D3D3F00337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31D60046156D3EC700337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D60047156D3EC700337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31D6005C156D3F3500337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D6005D156D3F3500337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31D60079156D3FBC00337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D6007A156D3FBC00337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31D60094156D402900337B26 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31D60095156D402900337B26 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31EEABDF156AAE9E00714D05 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = c89; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_COOL; + GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES; + GCC_TREAT_INCOMPATIBLE_POINTER_TYPE_WARNINGS_AS_ERRORS = YES; + GCC_TREAT_WARNINGS_AS_ERRORS = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES; + GCC_WARN_PEDANTIC = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_SIGN_COMPARE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNKNOWN_PRAGMAS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_LABEL = YES; + GCC_WARN_UNUSED_PARAMETER = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + SDKROOT = macosx; + SYMROOT = xc; + WARNING_CFLAGS = ( + "-pedantic", + "-Waggregate-return", + "-Wall", + "-Wcast-qual", + "-Wconversion", + "-Wduplicate-enum", + "-Wextra", + "-Winline", + "-Wmissing-prototypes", + "-Wmissing-variable-declarations", + "-Wnested-externs", + "-Wpointer-arith", + "-Wshadow", + "-Wstrict-aliasing=2", + "-Wstrict-prototypes", + "-Wunreachable-code", + "-Wwrite-strings", + ); + }; + name = Debug; + }; + 31EEABE0156AAE9E00714D05 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = c89; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = s; + GCC_PREPROCESSOR_DEFINITIONS = CONFIG_VAR_HOT; + GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES; + GCC_TREAT_INCOMPATIBLE_POINTER_TYPE_WARNINGS_AS_ERRORS = YES; + GCC_TREAT_WARNINGS_AS_ERRORS = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES; + GCC_WARN_PEDANTIC = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_SIGN_COMPARE = YES; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNKNOWN_PRAGMAS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_LABEL = YES; + GCC_WARN_UNUSED_PARAMETER = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + SDKROOT = macosx; + SYMROOT = xc; + WARNING_CFLAGS = ( + "-pedantic", + "-Waggregate-return", + "-Wall", + "-Wcast-qual", + "-Wconversion", + "-Wduplicate-enum", + "-Wextra", + "-Winline", + "-Wmissing-prototypes", + "-Wmissing-variable-declarations", + "-Wnested-externs", + "-Wpointer-arith", + "-Wshadow", + "-Wstrict-aliasing=2", + "-Wstrict-prototypes", + "-Wunreachable-code", + "-Wwrite-strings", + ); + }; + name = Release; + }; + 31EEABFD156AAF9D00714D05 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + EXECUTABLE_PREFIX = lib; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31EEABFE156AAF9D00714D05 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + EXECUTABLE_PREFIX = lib; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31EEAC6D156AB52600714D05 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31EEAC6E156AB52600714D05 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 31FCAE1017692403008C034C /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 31FCAE1117692403008C034C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6313D46F18A400B200EB03EF /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 6313D47018A400B200EB03EF /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6313D47118A400B200EB03EF /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = RASH; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 220FD3E5195339C000967A35 /* Build configuration list for PBXNativeTarget "ztfm" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 220FD3E6195339C000967A35 /* Debug */, + 220FD3E7195339C000967A35 /* Release */, + 220FD3E8195339C000967A35 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9AD192A47BB00E9E2CE /* Build configuration list for PBXAggregateTarget "testci" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9AE192A47BB00E9E2CE /* Debug */, + 2215A9AF192A47BB00E9E2CE /* Release */, + 2215A9B0192A47BB00E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9B5192A47C500E9E2CE /* Build configuration list for PBXAggregateTarget "testansi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9B6192A47C500E9E2CE /* Debug */, + 2215A9B7192A47C500E9E2CE /* Release */, + 2215A9B8192A47C500E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9BD192A47CE00E9E2CE /* Build configuration list for PBXAggregateTarget "testall" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9BE192A47CE00E9E2CE /* Debug */, + 2215A9BF192A47CE00E9E2CE /* Release */, + 2215A9C0192A47CE00E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9C5192A47D500E9E2CE /* Build configuration list for PBXAggregateTarget "testpollnone" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9C6192A47D500E9E2CE /* Debug */, + 2215A9C7192A47D500E9E2CE /* Release */, + 2215A9C8192A47D500E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2231BB5518CA97D8002D6322 /* Build configuration list for PBXNativeTarget "locbwcss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2231BB5618CA97D8002D6322 /* Debug */, + 2231BB5718CA97D8002D6322 /* Release */, + 2231BB5818CA97D8002D6322 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2231BB6318CA97DC002D6322 /* Build configuration list for PBXNativeTarget "locusss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2231BB6418CA97DC002D6322 /* Debug */, + 2231BB6518CA97DC002D6322 /* Release */, + 2231BB6618CA97DC002D6322 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 223E796119EAB00B00DC26A6 /* Build configuration list for PBXNativeTarget "sncss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 223E796219EAB00B00DC26A6 /* Debug */, + 223E796319EAB00B00DC26A6 /* Release */, + 223E796419EAB00B00DC26A6 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 224CC795175E1821002FF81B /* Build configuration list for PBXNativeTarget "fotest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 224CC796175E1821002FF81B /* Debug */, + 224CC797175E1821002FF81B /* Release */, + 318DA8E91892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2265D71920E53F9C003019E8 /* Build configuration list for PBXNativeTarget "mpseventpy" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2265D71A20E53F9C003019E8 /* Debug */, + 2265D71B20E53F9C003019E8 /* Release */, + 2265D71C20E53F9C003019E8 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2291A5B9175CAB2F001D4920 /* Build configuration list for PBXNativeTarget "awlutth" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2291A5BA175CAB2F001D4920 /* Debug */, + 2291A5BB175CAB2F001D4920 /* Release */, + 318DA8E11892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22B2BC3218B6434F00C33E63 /* Build configuration list for PBXNativeTarget "scheme-advanced" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22B2BC3318B6434F00C33E63 /* Debug */, + 22B2BC3418B6434F00C33E63 /* Release */, + 22B2BC3518B6434F00C33E63 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22C2ACAB18BE400A006B3677 /* Build configuration list for PBXNativeTarget "nailboardtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22C2ACAC18BE400A006B3677 /* Debug */, + 22C2ACAD18BE400A006B3677 /* Release */, + 22C2ACAE18BE400A006B3677 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22CDE8F016E9E97E00366D0A /* Build configuration list for PBXAggregateTarget "testrun" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22CDE8F116E9E97E00366D0A /* Debug */, + 22CDE8F216E9E97E00366D0A /* Release */, + 318DA8FC1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22EA3F4120D2B0D90065F5B6 /* Build configuration list for PBXNativeTarget "forktest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22EA3F4220D2B0D90065F5B6 /* Debug */, + 22EA3F4320D2B0D90065F5B6 /* Release */, + 22EA3F4420D2B0D90065F5B6 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22F846B918F437B900982BA7 /* Build configuration list for PBXNativeTarget "lockut" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22F846BA18F437B900982BA7 /* Debug */, + 22F846BB18F437B900982BA7 /* Release */, + 22F846BC18F437B900982BA7 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22FA177116E8D6FC0098B23F /* Build configuration list for PBXNativeTarget "amcssth" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22FA177216E8D6FC0098B23F /* Debug */, + 22FA177316E8D6FC0098B23F /* Release */, + 318DA8DA1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 22FACEE918880983000FDBC1 /* Build configuration list for PBXNativeTarget "airtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 22FACEEA18880983000FDBC1 /* Debug */, + 22FACEEB18880983000FDBC1 /* Release */, + 22C2ACA118BE3FEC006B3677 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2D07B9741636FC9900DB751B /* Build configuration list for PBXNativeTarget "mpseventsql" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2D07B9751636FC9900DB751B /* Debug */, + 2D07B9761636FC9900DB751B /* Release */, + 318DA8FA1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2D604BA216514B59003AAF46 /* Build configuration list for PBXNativeTarget "mpseventtxt" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2D604B9F16514B1A003AAF46 /* Debug */, + 2D604BA016514B1A003AAF46 /* Release */, + 318DA8FB1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104AFBC156D357B000A585A /* Build configuration list for PBXNativeTarget "apss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104AFBA156D357B000A585A /* Debug */, + 3104AFBB156D357B000A585A /* Release */, + 318DA8DD1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104AFCF156D35E2000A585A /* Build configuration list for PBXNativeTarget "sacss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104AFD0156D35E2000A585A /* Debug */, + 3104AFD1156D35E2000A585A /* Release */, + 318DA8F21892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104AFE4156D3682000A585A /* Build configuration list for PBXNativeTarget "amcsshe" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104AFE5156D3682000A585A /* Debug */, + 3104AFE6156D3682000A585A /* Release */, + 318DA8D91892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104AFF2156D37A0000A585A /* Build configuration list for PBXAggregateTarget "all" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104AFF3156D37A0000A585A /* Debug */, + 3104AFF4156D37A0000A585A /* Release */, + 318DA8D51892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104B010156D38F3000A585A /* Build configuration list for PBXNativeTarget "amsss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104B011156D38F3000A585A /* Debug */, + 3104B012156D38F3000A585A /* Release */, + 318DA8DB1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104B029156D39D4000A585A /* Build configuration list for PBXNativeTarget "amssshe" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104B02A156D39D4000A585A /* Debug */, + 3104B02B156D39D4000A585A /* Release */, + 318DA8DC1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3104B044156D3AD8000A585A /* Build configuration list for PBXNativeTarget "segsmss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3104B045156D3AD8000A585A /* Debug */, + 3104B046156D3AD8000A585A /* Release */, + 318DA8F31892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31108A431C6B90E900E728EA /* Build configuration list for PBXNativeTarget "tagtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31108A441C6B90E900E728EA /* Debug */, + 31108A451C6B90E900E728EA /* Release */, + 31108A461C6B90E900E728EA /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A599156E913C001E0AA3 /* Build configuration list for PBXNativeTarget "locv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A597156E913C001E0AA3 /* Debug */, + 3114A598156E913C001E0AA3 /* Release */, + 318DA8EB1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A5AE156E92C0001E0AA3 /* Build configuration list for PBXNativeTarget "qs" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A5AF156E92C0001E0AA3 /* Debug */, + 3114A5B0156E92C0001E0AA3 /* Release */, + 318DA8F11892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A5C4156E9315001E0AA3 /* Build configuration list for PBXNativeTarget "finalcv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A5C5156E9315001E0AA3 /* Debug */, + 3114A5C6156E9315001E0AA3 /* Release */, + 318DA8E71892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A5DD156E93A0001E0AA3 /* Build configuration list for PBXNativeTarget "finaltest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A5DE156E93A0001E0AA3 /* Debug */, + 3114A5DF156E93A0001E0AA3 /* Release */, + 318DA8E81892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A5F6156E93E7001E0AA3 /* Build configuration list for PBXNativeTarget "arenacv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A5F7156E93E7001E0AA3 /* Debug */, + 3114A5F8156E93E7001E0AA3 /* Release */, + 318DA8DE1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A60C156E9430001E0AA3 /* Build configuration list for PBXNativeTarget "bttest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A60D156E9430001E0AA3 /* Debug */, + 3114A60E156E9430001E0AA3 /* Release */, + 318DA8E31892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A623156E9485001E0AA3 /* Build configuration list for PBXNativeTarget "teletest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A624156E9485001E0AA3 /* Debug */, + 3114A625156E9485001E0AA3 /* Release */, + 318DA8F51892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A63A156E94DB001E0AA3 /* Build configuration list for PBXNativeTarget "abqtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A63B156E94DB001E0AA3 /* Debug */, + 3114A63C156E94DB001E0AA3 /* Release */, + 318DA8D71892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "landtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A654156E9596001E0AA3 /* Debug */, + 3114A655156E9596001E0AA3 /* Release */, + 318DA8E41892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A669156E95D9001E0AA3 /* Build configuration list for PBXNativeTarget "btcv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A66A156E95D9001E0AA3 /* Debug */, + 3114A66B156E95D9001E0AA3 /* Release */, + 318DA8E21892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A683156E9669001E0AA3 /* Build configuration list for PBXNativeTarget "mv2test" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A684156E9669001E0AA3 /* Debug */, + 3114A685156E9669001E0AA3 /* Release */, + 318DA8EF1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A69C156E971B001E0AA3 /* Build configuration list for PBXNativeTarget "messtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A69D156E971B001E0AA3 /* Debug */, + 3114A69E156E971B001E0AA3 /* Release */, + 318DA8EC1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A6B3156E9759001E0AA3 /* Build configuration list for PBXNativeTarget "walkt0" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A6B4156E9759001E0AA3 /* Debug */, + 3114A6B5156E9759001E0AA3 /* Release */, + 318DA8F61892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3114A6CD156E9815001E0AA3 /* Build configuration list for PBXNativeTarget "mpseventcnv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3114A6CE156E9815001E0AA3 /* Debug */, + 3114A6CF156E9815001E0AA3 /* Release */, + 318DA8F91892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3124CABF156BE3EC00753214 /* Build configuration list for PBXNativeTarget "awlut" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3124CAC0156BE3EC00753214 /* Debug */, + 3124CAC1156BE3EC00753214 /* Release */, + 318DA8DF1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3124CADB156BE64A00753214 /* Build configuration list for PBXNativeTarget "mpsicv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3124CADC156BE64A00753214 /* Debug */, + 3124CADD156BE64A00753214 /* Release */, + 318DA8EE1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3124CAF2156BE7F300753214 /* Build configuration list for PBXNativeTarget "amcss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3124CAF3156BE7F300753214 /* Debug */, + 3124CAF4156BE7F300753214 /* Release */, + 318DA8D81892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 318DA8C91892B0F30089718C /* Build configuration list for PBXNativeTarget "djbench" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 318DA8CA1892B0F30089718C /* Debug */, + 318DA8CB1892B0F30089718C /* Release */, + 318DA8FE1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 319F7A102A30D08500E5B418 /* Build configuration list for PBXNativeTarget "addrobj" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 319F7A112A30D08500E5B418 /* Debug */, + 319F7A122A30D08500E5B418 /* Release */, + 319F7A132A30D08500E5B418 /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D60014156D3CB200337B26 /* Build configuration list for PBXNativeTarget "awluthe" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D60015156D3CB200337B26 /* Debug */, + 31D60016156D3CB200337B26 /* Release */, + 318DA8E01892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D6002E156D3D3F00337B26 /* Build configuration list for PBXNativeTarget "lockcov" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D6002F156D3D3F00337B26 /* Debug */, + 31D60030156D3D3F00337B26 /* Release */, + 318DA8EA1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D60045156D3EC700337B26 /* Build configuration list for PBXNativeTarget "poolncv" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D60046156D3EC700337B26 /* Debug */, + 31D60047156D3EC700337B26 /* Release */, + 318DA8F01892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D6005B156D3F3500337B26 /* Build configuration list for PBXNativeTarget "zcoll" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D6005C156D3F3500337B26 /* Debug */, + 31D6005D156D3F3500337B26 /* Release */, + 318DA8F71892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D60078156D3FBC00337B26 /* Build configuration list for PBXNativeTarget "zmess" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D60079156D3FBC00337B26 /* Debug */, + 31D6007A156D3FBC00337B26 /* Release */, + 318DA8F81892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31D60093156D402900337B26 /* Build configuration list for PBXNativeTarget "steptest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31D60094156D402900337B26 /* Debug */, + 31D60095156D402900337B26 /* Release */, + 318DA8F41892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31EEABDD156AAE9E00714D05 /* Build configuration list for PBXProject "mps" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31EEABDF156AAE9E00714D05 /* Debug */, + 31EEABE0156AAE9E00714D05 /* Release */, + 318DA8D41892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31EEABFC156AAF9D00714D05 /* Build configuration list for PBXNativeTarget "mps" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31EEABFD156AAF9D00714D05 /* Debug */, + 31EEABFE156AAF9D00714D05 /* Release */, + 318DA8D61892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31EEAC6C156AB52600714D05 /* Build configuration list for PBXNativeTarget "mpmss" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31EEAC6D156AB52600714D05 /* Debug */, + 31EEAC6E156AB52600714D05 /* Release */, + 318DA8ED1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 31FCAE1317692403008C034C /* Build configuration list for PBXNativeTarget "scheme" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 31FCAE1017692403008C034C /* Debug */, + 31FCAE1117692403008C034C /* Release */, + 318DA8FD1892C0D00089718C /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6313D46E18A400B200EB03EF /* Build configuration list for PBXNativeTarget "gcbench" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6313D46F18A400B200EB03EF /* Debug */, + 6313D47018A400B200EB03EF /* Release */, + 6313D47118A400B200EB03EF /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 31EEABDA156AAE9E00714D05 /* Project object */; +} diff --git a/mps/code/mps.xcodeproj/xcshareddata/xcschemes/addrobj.xcscheme b/mps/code/mps.xcodeproj/xcshareddata/xcschemes/addrobj.xcscheme new file mode 100644 index 00000000000..fc6990f251f --- /dev/null +++ b/mps/code/mps.xcodeproj/xcshareddata/xcschemes/addrobj.xcscheme @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/mps/code/mps.xcodeproj/xcshareddata/xcschemes/finaltest.xcscheme b/mps/code/mps.xcodeproj/xcshareddata/xcschemes/finaltest.xcscheme new file mode 100644 index 00000000000..67ef98c4301 --- /dev/null +++ b/mps/code/mps.xcodeproj/xcshareddata/xcschemes/finaltest.xcscheme @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/mps/code/mpsacl.h b/mps/code/mpsacl.h new file mode 100644 index 00000000000..b9d6ea4bec7 --- /dev/null +++ b/mps/code/mpsacl.h @@ -0,0 +1,50 @@ +/* mpsacl.h: MEMORY POOL SYSTEM ARENA CLASS "CL" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpsacl_h +#define mpsacl_h + +#include "mps.h" + +/* Client arena base address argument */ +extern const struct mps_key_s _mps_key_ARENA_CL_BASE; +#define MPS_KEY_ARENA_CL_BASE (&_mps_key_ARENA_CL_BASE) +#define MPS_KEY_ARENA_CL_BASE_FIELD addr + +extern mps_arena_class_t mps_arena_class_cl(void); + + +#endif /* mpsacl_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsavm.h b/mps/code/mpsavm.h new file mode 100644 index 00000000000..075665d121b --- /dev/null +++ b/mps/code/mpsavm.h @@ -0,0 +1,50 @@ +/* mpsavm.h: MEMORY POOL SYSTEM ARENA CLASS "VM" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpsavm_h +#define mpsavm_h + +#include "mps.h" + + +extern mps_arena_class_t mps_arena_class_vm(void); + + +/* The vm arena class supports extensions to the arena protocol: */ +extern mps_res_t mps_arena_vm_growth(mps_arena_t, size_t, size_t); + + +#endif /* mpsavm_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscamc.h b/mps/code/mpscamc.h new file mode 100644 index 00000000000..0aaf842e88d --- /dev/null +++ b/mps/code/mpscamc.h @@ -0,0 +1,49 @@ +/* mpscamc.h: MEMORY POOL SYSTEM CLASS "AMC" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscamc_h +#define mpscamc_h + +#include "mps.h" + +extern mps_pool_class_t mps_class_amc(void); +extern mps_pool_class_t mps_class_amcz(void); + +typedef void (*mps_amc_apply_stepper_t)(mps_addr_t, void *, size_t); +extern void mps_amc_apply(mps_pool_t, mps_amc_apply_stepper_t, + void *, size_t); + +#endif /* mpscamc_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscams.h b/mps/code/mpscams.h new file mode 100644 index 00000000000..314d3f3d2b3 --- /dev/null +++ b/mps/code/mpscams.h @@ -0,0 +1,50 @@ +/* mpscams.h: MEMORY POOL SYSTEM CLASS "AMS" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#ifndef mpscams_h +#define mpscams_h + +#include "mps.h" + +extern const struct mps_key_s _mps_key_AMS_SUPPORT_AMBIGUOUS; +#define MPS_KEY_AMS_SUPPORT_AMBIGUOUS (&_mps_key_AMS_SUPPORT_AMBIGUOUS) +#define MPS_KEY_AMS_SUPPORT_AMBIGUOUS_FIELD b + +extern mps_pool_class_t mps_class_ams(void); +extern mps_pool_class_t mps_class_ams_debug(void); + +#endif /* mpscams_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscawl.h b/mps/code/mpscawl.h new file mode 100644 index 00000000000..a1a3a2b3a96 --- /dev/null +++ b/mps/code/mpscawl.h @@ -0,0 +1,50 @@ +/* mpscaawl.h: MEMORY POOL SYSTEM CLASS "AWL" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscawl_h +#define mpscawl_h + +#include "mps.h" + +extern const struct mps_key_s _mps_key_AWL_FIND_DEPENDENT; +#define MPS_KEY_AWL_FIND_DEPENDENT (&_mps_key_AWL_FIND_DEPENDENT) +#define MPS_KEY_AWL_FIND_DEPENDENT_FIELD addr_method + +extern mps_pool_class_t mps_class_awl(void); + +typedef mps_addr_t (*mps_awl_find_dependent_t)(mps_addr_t addr); + +#endif /* mpscawl_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsclo.h b/mps/code/mpsclo.h new file mode 100644 index 00000000000..5884cb8e0c2 --- /dev/null +++ b/mps/code/mpsclo.h @@ -0,0 +1,45 @@ +/* mpsclo.h: MEMORY POOL SYSTEM CLASS "LO" + * + * $Id$ + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpsclo_h +#define mpsclo_h + +#include "mps.h" + +extern mps_pool_class_t mps_class_lo(void); + +#endif /* mpsclo_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscmfs.h b/mps/code/mpscmfs.h new file mode 100644 index 00000000000..096d5aec85b --- /dev/null +++ b/mps/code/mpscmfs.h @@ -0,0 +1,48 @@ +/* mpscamfs.h: MEMORY POOL SYSTEM CLASS "MFS" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscmfs_h +#define mpscmfs_h + +#include "mps.h" + +extern const struct mps_key_s _mps_key_MFS_UNIT_SIZE; +#define MPS_KEY_MFS_UNIT_SIZE (&_mps_key_MFS_UNIT_SIZE) +#define MPS_KEY_MFS_UNIT_SIZE_FIELD size + +extern mps_pool_class_t mps_class_mfs(void); + +#endif /* mpscmfs_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscmv2.h b/mps/code/mpscmv2.h new file mode 100644 index 00000000000..80e5602b304 --- /dev/null +++ b/mps/code/mpscmv2.h @@ -0,0 +1,46 @@ +/* mpscmv2.h: MEMORY POOL SYSTEM CLASS "MVT" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * The MVT pool class used to be known as "MV2" in some places: this + * header provides backwards compatibility for prograns that included + * it under its old name. + */ + +#ifndef mpscmv2_h +#define mpscmv2_h + +#include "mpscmvt.h" + +#endif /* mpscmv2_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscmvff.h b/mps/code/mpscmvff.h new file mode 100644 index 00000000000..455b0d7effa --- /dev/null +++ b/mps/code/mpscmvff.h @@ -0,0 +1,58 @@ +/* mpscmvff.h: MEMORY POOL SYSTEM CLASS "MVFF" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscmvff_h +#define mpscmvff_h + +#include "mps.h" + +extern const struct mps_key_s _mps_key_MVFF_SLOT_HIGH; +#define MPS_KEY_MVFF_SLOT_HIGH (&_mps_key_MVFF_SLOT_HIGH) +#define MPS_KEY_MVFF_SLOT_HIGH_FIELD b +extern const struct mps_key_s _mps_key_MVFF_ARENA_HIGH; +#define MPS_KEY_MVFF_ARENA_HIGH (&_mps_key_MVFF_ARENA_HIGH) +#define MPS_KEY_MVFF_ARENA_HIGH_FIELD b +extern const struct mps_key_s _mps_key_MVFF_FIRST_FIT; +#define MPS_KEY_MVFF_FIRST_FIT (&_mps_key_MVFF_FIRST_FIT) +#define MPS_KEY_MVFF_FIRST_FIT_FIELD b + +#define mps_mvff_free_size mps_pool_free_size +#define mps_mvff_size mps_pool_total_size + +extern mps_pool_class_t mps_class_mvff(void); +extern mps_pool_class_t mps_class_mvff_debug(void); + +#endif /* mpscmvff_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscmvt.h b/mps/code/mpscmvt.h new file mode 100644 index 00000000000..30a3f9f1fe1 --- /dev/null +++ b/mps/code/mpscmvt.h @@ -0,0 +1,54 @@ +/* mpscmvt.h: MEMORY POOL SYSTEM CLASS "MVT" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscmvt_h +#define mpscmvt_h + +#include "mps.h" + +extern const struct mps_key_s _mps_key_MVT_RESERVE_DEPTH; +#define MPS_KEY_MVT_RESERVE_DEPTH (&_mps_key_MVT_RESERVE_DEPTH) +#define MPS_KEY_MVT_RESERVE_DEPTH_FIELD count +extern const struct mps_key_s _mps_key_MVT_FRAG_LIMIT; +#define MPS_KEY_MVT_FRAG_LIMIT (&_mps_key_MVT_FRAG_LIMIT) +#define MPS_KEY_MVT_FRAG_LIMIT_FIELD d + +extern mps_pool_class_t mps_class_mvt(void); + +#define mps_mvt_free_size mps_pool_free_size +#define mps_mvt_size mps_pool_total_size + +#endif /* mpscmvt_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpscsnc.h b/mps/code/mpscsnc.h new file mode 100644 index 00000000000..3a3ff568ad2 --- /dev/null +++ b/mps/code/mpscsnc.h @@ -0,0 +1,44 @@ +/* mpscsnc.h: MEMORY POOL SYSTEM CLASS "SNC" + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef mpscsnc_h +#define mpscsnc_h + +#include "mps.h" + +extern mps_pool_class_t mps_class_snc(void); + +#endif /* mpscsnc_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsi.c b/mps/code/mpsi.c new file mode 100644 index 00000000000..bf83e4181ce --- /dev/null +++ b/mps/code/mpsi.c @@ -0,0 +1,2221 @@ +/* mpsi.c: MEMORY POOL SYSTEM C INTERFACE LAYER + * + * $Id$ + * Copyright (c) 2001-2023 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * .purpose: This code bridges between the MPS interface to C, + * , and the internal MPM interfaces, as defined by + * . .purpose.check: It performs checking of the C client's + * usage of the MPS Interface. .purpose.thread: It excludes multiple + * threads from the MPM by locking the Arena (see ). + * + * .design: + * + * + * NOTES + * + * .note.break-out: Take care not to return when "inside" the Arena + * (between ArenaEnter and ArenaLeave) as this will leave the Arena in + * an unsuitable state for re-entry. + * + * .note.avert: Use AVERT only when "inside" the Arena (between + * ArenaEnter and ArenaLeave), as it's not thread-safe in all + * varieties. Use AVER(TESTT) otherwise. See + * . + * + * + * TRANSGRESSIONS (rule.impl.trans) + * + * .check.protocol: (rule.impl.req) More could be done in this code to + * check that protocols are obeyed by the client. It probably doesn't + * meet checking requirements. + * + * .poll: (rule.universal.complete) Various allocation methods call + * ArenaPoll to allow the MPM to "steal" CPU time and get on with + * background tasks such as incremental GC. + * + * .root-mode: (rule.universal.complete) The root "mode", which + * specifies things like the protectability of roots, is ignored at + * present. This is because the MPM doesn't ever try to protect them. + * In future, it will. + * + * .naming: (rule.impl.guide) The exported identifiers do not follow the + * normal MPS naming conventions. . + */ + +#include "mpm.h" +#include "mps.h" +#include "sac.h" +#include "trans.h" + +#include + + +SRCID(mpsi, "$Id$"); + + +/* mpsi_check -- check consistency of interface mappings + * + * .check.purpose: The mpsi_check function attempts to check whether + * the definitions in match the equivalent definition in + * the MPM. It is checking the assumptions made in the other functions + * in this implementation. + * + * .check.empty: Note that mpsi_check compiles away to almost nothing. + * + * .check.enum.cast: enum comparisons have to be cast to avoid a warning + * from the SunPro C compiler. See builder.sc.warn.enum. */ + +ATTRIBUTE_UNUSED +static Bool mpsi_check(void) +{ + CHECKL(COMPATTYPE(mps_res_t, Res)); + + /* Check that external and internal message types match. */ + /* See and */ + /* . */ + /* Also see .check.enum.cast. */ + CHECKL(COMPATTYPE(mps_message_type_t, MessageType)); + CHECKL((int)MessageTypeFINALIZATION + == (int)_mps_MESSAGE_TYPE_FINALIZATION); + CHECKL((int)MessageTypeGC + == (int)_mps_MESSAGE_TYPE_GC); + CHECKL((int)MessageTypeGCSTART + == (int)_mps_MESSAGE_TYPE_GC_START); + + /* The external idea of a word width and the internal one */ + /* had better match. . */ + CHECKL(sizeof(mps_word_t) == sizeof(void *)); + CHECKL(COMPATTYPE(mps_word_t, Word)); + + /* The external idea of an address and the internal one */ + /* had better match. */ + CHECKL(COMPATTYPE(mps_addr_t, Addr)); + + /* The external idea of size and the internal one had */ + /* better match. */ + /* and . */ + CHECKL(COMPATTYPE(size_t, Size)); + + /* Clock values are passed from external to internal and back */ + /* out to external. */ + CHECKL(COMPATTYPE(mps_clock_t, Clock)); + + return TRUE; +} + + +/* Ranks + * + * Here a rank returning function is defined for all client visible + * ranks. + * + * .rank.final.not: RankFINAL does not have a corresponding function as + * it is only used internally. */ + +mps_rank_t mps_rank_ambig(void) +{ + return RankAMBIG; +} + +mps_rank_t mps_rank_exact(void) +{ + return RankEXACT; +} + +mps_rank_t mps_rank_weak(void) +{ + return RankWEAK; +} + + +mps_res_t mps_arena_extend(mps_arena_t arena, + mps_addr_t base, size_t size) +{ + Res res; + + ArenaEnter(arena); + AVER(size > 0); + res = ArenaExtend(arena, (Addr)base, (Size)size); + ArenaLeave(arena); + + return (mps_res_t)res; +} + +size_t mps_arena_reserved(mps_arena_t arena) +{ + Size size; + + ArenaEnter(arena); + size = ArenaReserved(arena); + ArenaLeave(arena); + + return (size_t)size; +} + +size_t mps_arena_committed(mps_arena_t arena) +{ + Size size; + + ArenaEnter(arena); + size = ArenaCommitted(arena); + ArenaLeave(arena); + + return (size_t)size; +} + +size_t mps_arena_spare_committed(mps_arena_t arena) +{ + Size size; + + ArenaEnter(arena); + size = ArenaSpareCommitted(arena); + ArenaLeave(arena); + + return (size_t)size; +} + +size_t mps_arena_commit_limit(mps_arena_t arena) +{ + Size size; + + ArenaEnter(arena); + size = ArenaCommitLimit(arena); + ArenaLeave(arena); + + return size; +} + +mps_res_t mps_arena_commit_limit_set(mps_arena_t arena, size_t limit) +{ + Res res; + + ArenaEnter(arena); + res = ArenaSetCommitLimit(arena, limit); + ArenaLeave(arena); + + return (mps_res_t)res; +} + +void mps_arena_spare_set(mps_arena_t arena, double spare) +{ + ArenaEnter(arena); + ArenaSetSpare(arena, spare); + ArenaLeave(arena); +} + +double mps_arena_spare(mps_arena_t arena) +{ + double spare; + + ArenaEnter(arena); + spare = ArenaSpare(arena); + ArenaLeave(arena); + + return spare; +} + +void mps_arena_spare_commit_limit_set(mps_arena_t arena, size_t limit) +{ + double spare; + /* Can't check limit, as all possible values are allowed. */ + ArenaEnter(arena); + spare = (double)limit / (double)ArenaCommitted(arena); + if (spare > 1.0) + spare = 1.0; + ArenaSetSpare(arena, spare); + ArenaLeave(arena); +} + +size_t mps_arena_spare_commit_limit(mps_arena_t arena) +{ + size_t limit; + + ArenaEnter(arena); + limit = ArenaSpareCommitLimit(arena); + ArenaLeave(arena); + + return limit; +} + +double mps_arena_pause_time(mps_arena_t arena) +{ + double pause_time; + + ArenaEnter(arena); + pause_time = ArenaPauseTime(arena); + ArenaLeave(arena); + + return pause_time; +} + +void mps_arena_pause_time_set(mps_arena_t arena, double pause_time) +{ + ArenaEnter(arena); + ArenaSetPauseTime(arena, pause_time); + ArenaLeave(arena); +} + + +void mps_arena_clamp(mps_arena_t arena) +{ + ArenaEnter(arena); + ArenaClamp(ArenaGlobals(arena)); + ArenaLeave(arena); +} + + +void mps_arena_release(mps_arena_t arena) +{ + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + ArenaRelease(ArenaGlobals(arena)); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); +} + + +void mps_arena_park(mps_arena_t arena) +{ + ArenaEnter(arena); + ArenaPark(ArenaGlobals(arena)); + ArenaLeave(arena); +} + + +void mps_arena_postmortem(mps_arena_t arena) +{ + /* Don't call ArenaEnter -- one of the purposes of this function is + * to release the arena lock if it's held */ + AVER(TESTT(Arena, arena)); + ArenaPostmortem(ArenaGlobals(arena)); +} + + +mps_res_t mps_arena_start_collect(mps_arena_t arena) +{ + Res res; + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + res = ArenaStartCollect(ArenaGlobals(arena), + TraceStartWhyCLIENTFULL_INCREMENTAL); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + return (mps_res_t)res; +} + +mps_res_t mps_arena_collect(mps_arena_t arena) +{ + Res res; + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + res = ArenaCollect(ArenaGlobals(arena), TraceStartWhyCLIENTFULL_BLOCK); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + return (mps_res_t)res; +} + +mps_bool_t mps_arena_step(mps_arena_t arena, + double interval, + double multiplier) +{ + Bool b; + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + b = ArenaStep(ArenaGlobals(arena), interval, multiplier); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + return b; +} + + +/* mps_arena_create -- create an arena object */ + +mps_res_t mps_arena_create(mps_arena_t *mps_arena_o, + mps_arena_class_t mps_arena_class, ...) +{ + mps_res_t res; + va_list varargs; + va_start(varargs, mps_arena_class); + res = mps_arena_create_v(mps_arena_o, mps_arena_class, varargs); + va_end(varargs); + return (mps_res_t)res; +} + + +/* mps_arena_create_v -- create an arena object */ + +mps_res_t mps_arena_create_v(mps_arena_t *mps_arena_o, + mps_arena_class_t arena_class, + va_list varargs) +{ + mps_arg_s args[MPS_ARGS_MAX]; + AVER(TESTT(ArenaClass, arena_class)); + arena_class->varargs(args, varargs); + return mps_arena_create_k(mps_arena_o, arena_class, args); +} + + +/* mps_arena_create_k -- create an arena object */ + +mps_res_t mps_arena_create_k(mps_arena_t *mps_arena_o, + mps_arena_class_t arena_class, + mps_arg_s mps_args[]) +{ + Arena arena; + Res res; + + /* This is the first real call that the client will have to make, */ + /* so check static consistency here. */ + AVER(mpsi_check()); + + AVER(mps_arena_o != NULL); + + res = ArenaCreate(&arena, arena_class, mps_args); + if (res != ResOK) + return (mps_res_t)res; + + ArenaLeave(arena); + *mps_arena_o = (mps_arena_t)arena; + return MPS_RES_OK; +} + + +/* mps_arena_destroy -- destroy an arena object */ + +void mps_arena_destroy(mps_arena_t arena) +{ + ArenaEnter(arena); + ArenaDestroy(arena); +} + + +/* mps_arena_busy -- is the arena part way through an operation? */ + +mps_bool_t mps_arena_busy(mps_arena_t arena) +{ + /* Don't call ArenaEnter -- the purpose of this function is to + * determine if the arena lock is held */ + AVER(TESTT(Arena, arena)); + return ArenaBusy(arena); +} + + +/* mps_arena_has_addr -- is this address managed by this arena? */ + +mps_bool_t mps_arena_has_addr(mps_arena_t arena, mps_addr_t p) +{ + Bool b; + + /* One of the few functions that can be called + during the call to an MPS function. IE this function + can be called when walking the heap. */ + ArenaEnterRecursive(arena); + AVERT(Arena, arena); + b = ArenaHasAddr(arena, (Addr)p); + ArenaLeaveRecursive(arena); + return b; +} + + +/* mps_addr_pool -- return the pool containing the given address + * + * Wrapper for PoolOfAddr. Note: may return an MPS-internal pool. + */ + +mps_bool_t mps_addr_pool(mps_pool_t *mps_pool_o, + mps_arena_t arena, + mps_addr_t p) +{ + Bool b; + Pool pool; + + AVER(mps_pool_o != NULL); + /* mps_arena -- will be checked by ArenaEnterRecursive */ + /* p -- cannot be checked */ + + /* One of the few functions that can be called + during the call to an MPS function. IE this function + can be called when walking the heap. */ + ArenaEnterRecursive(arena); + b = PoolOfAddr(&pool, arena, (Addr)p); + ArenaLeaveRecursive(arena); + + if(b) + *mps_pool_o = (mps_pool_t)pool; + + return b; +} + + +/* mps_addr_object -- find base pointer of a managed object */ + +mps_res_t mps_addr_object(mps_addr_t *p_o, mps_arena_t arena, mps_addr_t addr) +{ + Res res; + Addr p; + + AVER(p_o != NULL); + + /* This function cannot be called while walking the heap, unlike + * mps_arena_has_addr(). This is because it is designed to be called + * with an active mutator, so takes the arena lock. This is in order + * that it sees a consistent view of MPS structures and the heap, + * and can peek behind the barrier. + */ + ArenaEnter(arena); + AVERT(Arena, arena); + res = ArenaAddrObject(&p, arena, (Addr)addr); + ArenaLeave(arena); + /* We require the object to be ambiguously referenced (hence pinned) + * so that p doesn't become invalid before it is written to *p_o. + * (We can't simply put this write before the ArenaLeave(), because + * p_o could point to MPS-managed memory that is behind a barrier.) + */ + if (res == ResOK) + *p_o = (mps_addr_t)p; + + return res; +} + + +/* mps_addr_fmt -- what format might this address have? + * + * .per-pool: There's no reason why all objects in a pool should have + * the same format. But currently, MPS internals support at most one + * format per pool. + * + * If the address is in a pool and has a format, returns TRUE and + * updates *mps_fmt_o to be that format. Otherwise, returns FALSE + * and does not update *mps_fmt_o. + * + * Note: may return an MPS-internal format. + */ +mps_bool_t mps_addr_fmt(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_addr_t p) +{ + Bool b; + Pool pool; + Format format = 0; + + AVER(mps_fmt_o != NULL); + /* mps_arena -- will be checked by ArenaEnterRecursive */ + /* p -- cannot be checked */ + + /* One of the few functions that can be called + during the call to an MPS function. IE this function + can be called when walking the heap. */ + ArenaEnterRecursive(arena); + /* .per-pool */ + b = PoolOfAddr(&pool, arena, (Addr)p); + if(b) + b = PoolFormat(&format, pool); + ArenaLeaveRecursive(arena); + + if(b) + *mps_fmt_o = (mps_fmt_t)format; + + return b; +} + + +/* mps_fmt_create_k -- create an object format using keyword arguments */ + +mps_res_t mps_fmt_create_k(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_arg_s args[]) +{ + Format format; + Res res; + + ArenaEnter(arena); + + AVER(mps_fmt_o != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + + res = FormatCreate(&format, arena, args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_fmt_o = (mps_fmt_t)format; + return MPS_RES_OK; +} + + +/* mps_fmt_create_A -- create an object format of variant A + * + * .fmt.create.A.purpose: This function converts an object format spec + * of variant "A" into an MPM Format object. See + * for justification of the way that + * the format structure is declared as "mps_fmt_A". */ + +mps_res_t mps_fmt_create_A(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_fmt_A_s *mps_fmt_A) +{ + Format format; + Res res; + + ArenaEnter(arena); + + AVER(mps_fmt_o != NULL); + AVERT(Arena, arena); + AVER(mps_fmt_A != NULL); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, mps_fmt_A->align); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, mps_fmt_A->scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, mps_fmt_A->skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, mps_fmt_A->fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, mps_fmt_A->isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, mps_fmt_A->pad); + res = FormatCreate(&format, arena, args); + } MPS_ARGS_END(args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_fmt_o = (mps_fmt_t)format; + return MPS_RES_OK; +} + + +/* mps_fmt_create_B -- create an object format of variant B */ + +mps_res_t mps_fmt_create_B(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_fmt_B_s *mps_fmt_B) +{ + Format format; + Res res; + + ArenaEnter(arena); + + AVER(mps_fmt_o != NULL); + AVERT(Arena, arena); + AVER(mps_fmt_B != NULL); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, mps_fmt_B->align); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, mps_fmt_B->scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, mps_fmt_B->skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, mps_fmt_B->fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, mps_fmt_B->isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, mps_fmt_B->pad); + MPS_ARGS_ADD(args, MPS_KEY_FMT_CLASS, mps_fmt_B->mps_class); + res = FormatCreate(&format, arena, args); + } MPS_ARGS_END(args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_fmt_o = (mps_fmt_t)format; + return MPS_RES_OK; +} + + +/* mps_fmt_create_auto_header -- create a format of variant auto_header */ + +mps_res_t mps_fmt_create_auto_header(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_fmt_auto_header_s *mps_fmt) +{ + Format format; + Res res; + + ArenaEnter(arena); + + AVER(mps_fmt_o != NULL); + AVERT(Arena, arena); + AVER(mps_fmt != NULL); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, mps_fmt->align); + MPS_ARGS_ADD(args, MPS_KEY_FMT_HEADER_SIZE, mps_fmt->mps_headerSize); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, mps_fmt->scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, mps_fmt->skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, mps_fmt->fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, mps_fmt->isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, mps_fmt->pad); + res = FormatCreate(&format, arena, args); + } MPS_ARGS_END(args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_fmt_o = (mps_fmt_t)format; + return MPS_RES_OK; +} + + +/* mps_fmt_create_fixed -- create an object format of variant fixed */ + +mps_res_t mps_fmt_create_fixed(mps_fmt_t *mps_fmt_o, + mps_arena_t arena, + mps_fmt_fixed_s *mps_fmt_fixed) +{ + Format format; + Res res; + + ArenaEnter(arena); + + AVER(mps_fmt_o != NULL); + AVERT(Arena, arena); + AVER(mps_fmt_fixed != NULL); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_ALIGN, mps_fmt_fixed->align); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, mps_fmt_fixed->scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, mps_fmt_fixed->fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, mps_fmt_fixed->isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, mps_fmt_fixed->pad); + res = FormatCreate(&format, arena, args); + } MPS_ARGS_END(args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_fmt_o = (mps_fmt_t)format; + return MPS_RES_OK; +} + + +/* mps_fmt_destroy -- destroy a format object */ + +void mps_fmt_destroy(mps_fmt_t format) +{ + Arena arena; + + AVER(TESTT(Format, format)); + arena = FormatArena(format); + + ArenaEnter(arena); + + FormatDestroy(format); + + ArenaLeave(arena); +} + + +mps_res_t mps_pool_create(mps_pool_t *mps_pool_o, mps_arena_t arena, + mps_pool_class_t mps_class, ...) +{ + mps_res_t res; + va_list varargs; + va_start(varargs, mps_class); + res = mps_pool_create_v(mps_pool_o, arena, mps_class, varargs); + va_end(varargs); + return res; +} + +mps_res_t mps_pool_create_v(mps_pool_t *mps_pool_o, mps_arena_t arena, + mps_pool_class_t pool_class, va_list varargs) +{ + mps_arg_s args[MPS_ARGS_MAX]; + AVER(TESTT(PoolClass, pool_class)); + pool_class->varargs(args, varargs); + return mps_pool_create_k(mps_pool_o, arena, pool_class, args); +} + +mps_res_t mps_pool_create_k(mps_pool_t *mps_pool_o, mps_arena_t arena, + mps_pool_class_t pool_class, mps_arg_s args[]) +{ + Pool pool; + Res res; + + ArenaEnter(arena); + + AVER(mps_pool_o != NULL); + AVERT(Arena, arena); + AVERT(PoolClass, pool_class); + AVERT(ArgList, args); + + res = PoolCreate(&pool, arena, pool_class, args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_pool_o = (mps_pool_t)pool; + return MPS_RES_OK; +} + +void mps_pool_destroy(mps_pool_t pool) +{ + Arena arena; + + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + PoolDestroy(pool); + + ArenaLeave(arena); +} + +size_t mps_pool_total_size(mps_pool_t pool) +{ + Arena arena; + Size size; + + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + size = PoolTotalSize(pool); + + ArenaLeave(arena); + + return (size_t)size; +} + +size_t mps_pool_free_size(mps_pool_t pool) +{ + Arena arena; + Size size; + + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + size = PoolFreeSize(pool); + + ArenaLeave(arena); + + return (size_t)size; +} + + +mps_res_t mps_alloc(mps_addr_t *p_o, mps_pool_t pool, size_t size) +{ + Arena arena; + Addr p; + Res res; + + AVER_CRITICAL(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + + ArenaPoll(ArenaGlobals(arena)); /* .poll */ + + AVER_CRITICAL(p_o != NULL); + AVERT_CRITICAL(Pool, pool); + AVER_CRITICAL(size > 0); + /* Note: class may allow unaligned size, see */ + /* . */ + + res = PoolAlloc(&p, pool, size); + + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *p_o = (mps_addr_t)p; + return MPS_RES_OK; +} + + +void mps_free(mps_pool_t pool, mps_addr_t p, size_t size) +{ + Arena arena; + + AVER_CRITICAL(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + AVERT_CRITICAL(Pool, pool); + AVER_CRITICAL(size > 0); + /* Note: class may allow unaligned size, see */ + /* . */ + + PoolFree(pool, (Addr)p, size); + ArenaLeave(arena); +} + + +/* mps_ap_create -- create an allocation point */ + +mps_res_t mps_ap_create(mps_ap_t *mps_ap_o, mps_pool_t pool, ...) +{ + mps_res_t res; + va_list varargs; + va_start(varargs, pool); + res = mps_ap_create_v(mps_ap_o, pool, varargs); + va_end(varargs); + return res; +} + + +/* mps_ap_create_v -- create an allocation point, with varargs */ + +mps_res_t mps_ap_create_v(mps_ap_t *mps_ap_o, mps_pool_t pool, + va_list varargs) +{ + Arena arena; + BufferClass bufclass; + mps_arg_s args[MPS_ARGS_MAX]; + + AVER(mps_ap_o != NULL); + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + AVERT(Pool, pool); + bufclass = PoolDefaultBufferClass(pool); + bufclass->varargs(args, varargs); + ArenaLeave(arena); + + return mps_ap_create_k(mps_ap_o, pool, args); +} + +/* mps_ap_create_k -- create an allocation point, with keyword args */ + +mps_res_t mps_ap_create_k(mps_ap_t *mps_ap_o, + mps_pool_t pool, + mps_arg_s args[]) { + Arena arena; + Buffer buf; + BufferClass bufclass; + Res res; + + AVER(mps_ap_o != NULL); + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + AVERT(Pool, pool); + + bufclass = PoolDefaultBufferClass(pool); + res = BufferCreate(&buf, bufclass, pool, TRUE, args); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + + *mps_ap_o = BufferAP(buf); + return MPS_RES_OK; +} + +void mps_ap_destroy(mps_ap_t mps_ap) +{ + Buffer buf = BufferOfAP(mps_ap); + Arena arena; + + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, buf)); + arena = BufferArena(buf); + + ArenaEnter(arena); + + BufferDestroy(buf); + + ArenaLeave(arena); +} + + +/* mps_reserve -- allocate store in preparation for initialization + * + * .reserve.call: mps_reserve does not call BufferReserve, but instead + * uses the in-line macro from . This is so that it calls + * mps_ap_fill and thence ArenaPoll (.poll). The consistency checks are + * those which can be done outside the MPM. See also .commit.call. */ + +mps_res_t (mps_reserve)(mps_addr_t *p_o, mps_ap_t mps_ap, size_t size) +{ + mps_res_t res; + + AVER(p_o != NULL); + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, BufferOfAP(mps_ap))); + AVER(mps_ap->init == mps_ap->alloc); + AVER(size > 0); + + MPS_RESERVE_BLOCK(res, *p_o, mps_ap, size); + + return res; +} + + +/* mps_commit -- commit initialized object, finishing allocation + * + * .commit.call: mps_commit does not call BufferCommit, but instead uses + * the in-line commit macro from . This is so that it calls + * mps_ap_trip and thence ArenaPoll in future (.poll). The consistency + * checks here are the ones which can be done outside the MPM. See also + * .reserve.call. */ + +mps_bool_t (mps_commit)(mps_ap_t mps_ap, mps_addr_t p, size_t size) +{ + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, BufferOfAP(mps_ap))); + AVER(p != NULL); + AVER(size > 0); + AVER(p == mps_ap->init); + AVER(PointerAdd(mps_ap->init, size) == mps_ap->alloc); + + return mps_commit(mps_ap, p, size); +} + + +/* Allocation frame support + * + * These are candidates for being inlineable as macros. + * These functions are easier to maintain, so we'll avoid + * macros for now. */ + + +/* mps_ap_frame_push -- push a new allocation frame + * + * . */ + +mps_res_t (mps_ap_frame_push)(mps_frame_t *frame_o, mps_ap_t mps_ap) +{ + AVER(frame_o != NULL); + AVER(mps_ap != NULL); + + /* Fail if between reserve & commit */ + if ((char *)mps_ap->alloc != (char *)mps_ap->init) { + return MPS_RES_FAIL; + } + + if (mps_ap->init < mps_ap->limit) { + /* Valid state for a lightweight push */ + *frame_o = (mps_frame_t)mps_ap->init; + return MPS_RES_OK; + } else { + /* Need a heavyweight push */ + Buffer buf = BufferOfAP(mps_ap); + Arena arena; + AllocFrame frame; + Res res; + + AVER(TESTT(Buffer, buf)); + arena = BufferArena(buf); + + ArenaEnter(arena); + AVERT(Buffer, buf); + + res = BufferFramePush(&frame, buf); + + if (res == ResOK) { + *frame_o = (mps_frame_t)frame; + } + ArenaLeave(arena); + return (mps_res_t)res; + } +} + +/* mps_ap_frame_pop -- push a new allocation frame + * + * . */ + +mps_res_t (mps_ap_frame_pop)(mps_ap_t mps_ap, mps_frame_t frame) +{ + Buffer buf; + Pool pool; + + AVER(mps_ap != NULL); + /* Can't check frame because it's an arbitrary value */ + + /* Fail if between reserve & commit */ + if ((char *)mps_ap->alloc != (char *)mps_ap->init) { + return MPS_RES_FAIL; + } + + buf = BufferOfAP(mps_ap); + AVER(TESTT(Buffer, buf)); + pool = buf->pool; + AVER(TESTT(Pool, pool)); + + /* It's not thread-safe to read BufferBase here in an automatically + * managed pool (see job003947), so test AttrGC first. */ + if (!PoolHasAttr(pool, AttrGC) + && BufferBase(buf) <= (Addr)frame + && (mps_addr_t)frame < mps_ap->init) + { + /* Lightweight pop to earlier address in same buffer in a manually + * managed pool. */ + mps_ap->init = mps_ap->alloc = (mps_addr_t)frame; + return MPS_RES_OK; + + } else { + /* Need a heavyweight pop */ + Arena arena; + Res res; + + arena = BufferArena(buf); + + ArenaEnter(arena); + AVERT(Buffer, buf); + + res = BufferFramePop(buf, (AllocFrame)frame); + + ArenaLeave(arena); + return (mps_res_t)res; + } +} + + +/* mps_ap_fill -- called by mps_reserve when an AP hasn't enough arena + * + * .ap.fill.internal: mps_ap_fill is normally invoked by the + * mps_reserve macro, but may be "called" directly by the client code + * if necessary. See */ + +mps_res_t mps_ap_fill(mps_addr_t *p_o, mps_ap_t mps_ap, size_t size) +{ + Buffer buf = BufferOfAP(mps_ap); + Arena arena; + Addr p; + Res res; + + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, buf)); + arena = BufferArena(buf); + + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + + ArenaPoll(ArenaGlobals(arena)); /* .poll */ + + AVER(p_o != NULL); + AVERT(Buffer, buf); + AVER(size > 0); + AVER(SizeIsAligned(size, BufferPool(buf)->alignment)); /* */ + + res = BufferFill(&p, buf, size); + + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *p_o = (mps_addr_t)p; + return MPS_RES_OK; +} + + +/* mps_ap_trip -- called by mps_commit when an AP is tripped + * + * .ap.trip.internal: mps_ap_trip is normally invoked by the + * mps_commit macro, but may be "called" directly by the client code + * if necessary. See */ + +mps_bool_t mps_ap_trip(mps_ap_t mps_ap, mps_addr_t p, size_t size) +{ + Buffer buf = BufferOfAP(mps_ap); + Arena arena; + Bool b; + + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, buf)); + arena = BufferArena(buf); + + ArenaEnter(arena); + + AVERT(Buffer, buf); + AVER(size > 0); + AVER(SizeIsAligned(size, BufferPool(buf)->alignment)); + + b = BufferTrip(buf, (Addr)p, size); + + ArenaLeave(arena); + + return b; +} + + +/* mps_sac_create -- create an SAC object */ + +mps_res_t mps_sac_create(mps_sac_t *mps_sac_o, mps_pool_t pool, + size_t classes_count, mps_sac_classes_s *classes) +{ + Arena arena; + SAC sac; + Res res; + + AVER(mps_sac_o != NULL); + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + + ArenaEnter(arena); + + res = SACCreate(&sac, pool, (Count)classes_count, classes); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_sac_o = ExternalSACOfSAC(sac); + return (mps_res_t)res; +} + + +/* mps_sac_destroy -- destroy an SAC object */ + +void mps_sac_destroy(mps_sac_t mps_sac) +{ + SAC sac = SACOfExternalSAC(mps_sac); + Arena arena; + + AVER(TESTT(SAC, sac)); + arena = SACArena(sac); + + ArenaEnter(arena); + + SACDestroy(sac); + + ArenaLeave(arena); +} + + +/* mps_sac_flush -- flush an SAC, releasing all memory held in it */ + +void mps_sac_flush(mps_sac_t mps_sac) +{ + SAC sac = SACOfExternalSAC(mps_sac); + Arena arena; + + AVER(TESTT(SAC, sac)); + arena = SACArena(sac); + + ArenaEnter(arena); + + SACFlush(sac); + + ArenaLeave(arena); +} + + +/* mps_sac_fill -- alloc an object, and perhaps fill the cache */ + +mps_res_t mps_sac_fill(mps_addr_t *p_o, mps_sac_t mps_sac, size_t size, + mps_bool_t unused) +{ + SAC sac = SACOfExternalSAC(mps_sac); + Arena arena; + Addr p = NULL; /* suppress "may be used uninitialized" */ + Res res; + + AVER(p_o != NULL); + AVER(TESTT(SAC, sac)); + arena = SACArena(sac); + UNUSED(unused); + + ArenaEnter(arena); + + res = SACFill(&p, sac, size); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *p_o = (mps_addr_t)p; + return (mps_res_t)res; +} + + +/* mps_sac_empty -- free an object, and perhaps empty the cache */ + +void mps_sac_empty(mps_sac_t mps_sac, mps_addr_t p, size_t size) +{ + SAC sac = SACOfExternalSAC(mps_sac); + Arena arena; + + AVER(TESTT(SAC, sac)); + arena = SACArena(sac); + + ArenaEnter(arena); + + SACEmpty(sac, (Addr)p, (Size)size); + + ArenaLeave(arena); +} + + +/* mps_sac_alloc -- alloc an object, using cached space if possible */ + +mps_res_t mps_sac_alloc(mps_addr_t *p_o, mps_sac_t mps_sac, size_t size, + mps_bool_t unused) +{ + Res res; + + AVER(p_o != NULL); + AVER(TESTT(SAC, SACOfExternalSAC(mps_sac))); + AVER(size > 0); + + MPS_SAC_ALLOC_FAST(res, *p_o, mps_sac, size, (unused != 0)); + return (mps_res_t)res; +} + + +/* mps_sac_free -- free an object, to the cache if possible */ + +void mps_sac_free(mps_sac_t mps_sac, mps_addr_t p, size_t size) +{ + AVER(TESTT(SAC, SACOfExternalSAC(mps_sac))); + /* Can't check p outside arena lock */ + AVER(size > 0); + + MPS_SAC_FREE_FAST(mps_sac, p, size); +} + + +/* Roots */ + + +mps_res_t mps_root_create(mps_root_t *mps_root_o, mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + mps_root_scan_t mps_root_scan, void *p, size_t s) +{ + Rank rank = (Rank)mps_rank; + Root root; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(mps_rm == (mps_rm_t)0); + + /* See .root-mode. */ + res = RootCreateFun(&root, arena, rank, mps_root_scan, p, s); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + +mps_res_t mps_root_create_table(mps_root_t *mps_root_o, mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + mps_addr_t *base, size_t size) +{ + Rank rank = (Rank)mps_rank; + Root root; + RootMode mode = (RootMode)mps_rm; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(base != NULL); + AVER(size > 0); + + /* .root.table-size: size is the length of the array at base, not + the size in bytes. However, RootCreateArea expects base and limit + pointers. Be careful. Avoid type punning by casting through + void *. */ + + res = RootCreateArea(&root, arena, rank, mode, + (void *)base, (void *)(base + size), + mps_scan_area, NULL); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + +mps_res_t mps_root_create_area(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + void *base, void *limit, + mps_area_scan_t scan_area, + void *closure) +{ + Rank rank = (Rank)mps_rank; + Root root; + RootMode mode = (RootMode)mps_rm; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(base != NULL); + AVER(limit != NULL); + AVER(base < limit); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about closure */ + + res = RootCreateArea(&root, arena, rank, mode, + base, limit, + scan_area, closure); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + +mps_res_t mps_root_create_area_tagged(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_rank_t mps_rank, + mps_rm_t mps_rm, + void *base, + void *limit, + mps_area_scan_t scan_area, + mps_word_t mask, + mps_word_t pattern) +{ + Rank rank = (Rank)mps_rank; + Root root; + RootMode mode = (RootMode)mps_rm; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(base != NULL); + AVER(limit != NULL); + AVER(base < limit); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about mask or pattern, as they could mean + anything to scan_area. */ + + res = RootCreateAreaTagged(&root, arena, rank, mode, + base, limit, + scan_area, mask, pattern); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + + +mps_res_t mps_root_create_table_masked(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + mps_addr_t *base, size_t size, + mps_word_t mask) +{ + return mps_root_create_area_tagged(mps_root_o, arena, mps_rank, mps_rm, + base, base + size, + mps_scan_area_tagged, + mask, 0); +} + +mps_res_t mps_root_create_fmt(mps_root_t *mps_root_o, mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + mps_fmt_scan_t scan, + mps_addr_t base, mps_addr_t limit) +{ + Rank rank = (Rank)mps_rank; + Root root; + RootMode mode = (RootMode)mps_rm; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + + res = RootCreateFmt(&root, arena, rank, mode, scan, (Addr)base, (Addr)limit); + + ArenaLeave(arena); + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + +mps_res_t mps_root_create_reg(mps_root_t *mps_root_o, mps_arena_t arena, + mps_rank_t mps_rank, mps_rm_t mps_rm, + mps_thr_t thread, mps_reg_scan_t mps_reg_scan, + void *cold, size_t mps_size) +{ + Rank rank = (Rank)mps_rank; + Root root; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(mps_reg_scan != NULL); + AVER(mps_reg_scan == mps_stack_scan_ambig); /* .reg.scan */ + AVER(cold != NULL); + AVER(AddrIsAligned(cold, sizeof(Word))); + AVER(rank == mps_rank_ambig()); + AVER(mps_rm == (mps_rm_t)0); + + UNUSED(mps_size); + + /* See .root-mode. */ + res = RootCreateThreadTagged(&root, arena, rank, thread, + mps_scan_area_tagged, + sizeof(mps_word_t) - 1, 0, + (Word *)cold); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + + +mps_res_t mps_root_create_thread(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_thr_t thread, + void *stack) +{ + return mps_root_create_thread_tagged(mps_root_o, + arena, + mps_rank_ambig(), + (mps_rm_t)0, + thread, + mps_scan_area_tagged, + sizeof(mps_word_t) - 1, + 0, + stack); +} + + +mps_res_t mps_root_create_thread_scanned(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_rank_t mps_rank, + mps_rm_t mps_rm, + mps_thr_t thread, + mps_area_scan_t scan_area, + void *closure, + void *cold) +{ + Rank rank = (Rank)mps_rank; + Root root; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(cold != NULL); + AVER(AddrIsAligned(cold, sizeof(Word))); + AVER(rank == mps_rank_ambig()); + AVER(mps_rm == (mps_rm_t)0); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about closure. */ + + /* See .root-mode. */ + res = RootCreateThread(&root, arena, rank, thread, + scan_area, closure, + (Word *)cold); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + + +mps_res_t mps_root_create_thread_tagged(mps_root_t *mps_root_o, + mps_arena_t arena, + mps_rank_t mps_rank, + mps_rm_t mps_rm, + mps_thr_t thread, + mps_area_scan_t scan_area, + mps_word_t mask, + mps_word_t pattern, + void *cold) +{ + Rank rank = (Rank)mps_rank; + Root root; + Res res; + + ArenaEnter(arena); + + AVER(mps_root_o != NULL); + AVER(cold != NULL); + AVER(AddrIsAligned(cold, sizeof(Word))); + AVER(rank == mps_rank_ambig()); + AVER(mps_rm == (mps_rm_t)0); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about mask or pattern, as they could mean + anything to scan_area. */ + + /* See .root-mode. */ + res = RootCreateThreadTagged(&root, arena, rank, thread, + scan_area, mask, pattern, + cold); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_root_o = (mps_root_t)root; + return MPS_RES_OK; +} + + +/* mps_stack_scan_ambig -- scan the thread state ambiguously + * + * This is a helper function for the deprecated mps_root_create_reg + * and should no longer be reached since that has been reimplemented + * in terms of the more general RootCreateThreadTagged. + */ + +mps_res_t mps_stack_scan_ambig(mps_ss_t mps_ss, + mps_thr_t thread, void *p, size_t s) +{ + UNUSED(mps_ss); + UNUSED(thread); + UNUSED(p); + UNUSED(s); + + NOTREACHED; + + return ResUNIMPL; +} + + +void mps_root_destroy(mps_root_t mps_root) +{ + Root root = (Root)mps_root; + Arena arena; + + arena = RootArena(root); + + ArenaEnter(arena); + + RootDestroy(root); + + ArenaLeave(arena); +} + + +mps_res_t mps_thread_reg(mps_thr_t *mps_thr_o, mps_arena_t arena) +{ + Thread thread; + Res res; + + ArenaEnter(arena); + + AVER(mps_thr_o != NULL); + AVERT(Arena, arena); + + res = ThreadRegister(&thread, arena); + + ArenaLeave(arena); + + if (res != ResOK) + return (mps_res_t)res; + *mps_thr_o = (mps_thr_t)thread; + return MPS_RES_OK; +} + +void mps_thread_dereg(mps_thr_t thread) +{ + Arena arena; + + AVER(ThreadCheckSimple(thread)); + arena = ThreadArena(thread); + + ArenaEnter(arena); + + ThreadDeregister(thread, arena); + + ArenaLeave(arena); +} + +void mps_ld_reset(mps_ld_t ld, mps_arena_t arena) +{ + ArenaEnter(arena); + LDReset(ld, arena); + ArenaLeave(arena); +} + + +/* mps_ld_add -- add a reference to a location dependency + * + * . */ + +void mps_ld_add(mps_ld_t ld, mps_arena_t arena, mps_addr_t addr) +{ + LDAdd(ld, arena, (Addr)addr); +} + + +/* mps_ld_merge -- merge two location dependencies + * + * . */ + +void mps_ld_merge(mps_ld_t ld, mps_arena_t arena, + mps_ld_t from) +{ + LDMerge(ld, arena, from); +} + + +/* mps_ld_isstale -- check whether a location dependency is "stale" + * + * . */ + +mps_bool_t mps_ld_isstale(mps_ld_t ld, mps_arena_t arena, + mps_addr_t addr) +{ + Bool b; + + b = LDIsStale(ld, arena, (Addr)addr); + + return (mps_bool_t)b; +} + +mps_bool_t mps_ld_isstale_any(mps_ld_t ld, mps_arena_t arena) +{ + Bool b; + + b = LDIsStaleAny(ld, arena); + + return (mps_bool_t)b; +} + +mps_word_t mps_collections(mps_arena_t arena) +{ + return ArenaEpoch(arena); /* thread safe: see */ +} + + +/* mps_finalize -- register for finalization */ + +mps_res_t mps_finalize(mps_arena_t arena, mps_addr_t *refref) +{ + Res res; + Addr object; + + ArenaEnter(arena); + + object = (Addr)ArenaPeek(arena, (Ref *)refref); + res = ArenaFinalize(arena, object); + + ArenaLeave(arena); + return (mps_res_t)res; +} + + +/* mps_definalize -- deregister for finalization */ + +mps_res_t mps_definalize(mps_arena_t arena, mps_addr_t *refref) +{ + Res res; + Addr object; + + ArenaEnter(arena); + + object = (Addr)ArenaPeek(arena, (Ref *)refref); + res = ArenaDefinalize(arena, object); + + ArenaLeave(arena); + return (mps_res_t)res; +} + + +/* Messages */ + + +void mps_message_type_enable(mps_arena_t arena, + mps_message_type_t mps_type) +{ + MessageType type = (MessageType)mps_type; + + ArenaEnter(arena); + + MessageTypeEnable(arena, type); + + ArenaLeave(arena); +} + +void mps_message_type_disable(mps_arena_t arena, + mps_message_type_t mps_type) +{ + MessageType type = (MessageType)mps_type; + + ArenaEnter(arena); + + MessageTypeDisable(arena, type); + + ArenaLeave(arena); +} + +mps_bool_t mps_message_poll(mps_arena_t arena) +{ + Bool b; + + ArenaEnter(arena); + + b = MessagePoll(arena); + + ArenaLeave(arena); + return b; +} + +mps_bool_t mps_message_queue_type(mps_message_type_t *mps_message_type_return, + mps_arena_t arena) +{ + MessageType type; + Bool b; + + ArenaEnter(arena); + + b = MessageQueueType(&type, arena); + + ArenaLeave(arena); + if (b) { + *mps_message_type_return = (mps_message_type_t)type; + } + return b; +} + +mps_bool_t mps_message_get(mps_message_t *mps_message_return, + mps_arena_t arena, + mps_message_type_t mps_type) +{ + Bool b; + MessageType type = (MessageType)mps_type; + Message message; + + ArenaEnter(arena); + + b = MessageGet(&message, arena, type); + + ArenaLeave(arena); + if (b) { + *mps_message_return = (mps_message_t)message; + } + return b; +} + +void mps_message_discard(mps_arena_t arena, + mps_message_t message) +{ + ArenaEnter(arena); + + MessageDiscard(arena, message); + + ArenaLeave(arena); +} + + +/* Message Methods */ + +/* -- All Message Types */ + +mps_message_type_t mps_message_type(mps_arena_t arena, + mps_message_t message) +{ + MessageType type; + + ArenaEnter(arena); + + type = MessageGetType(message); + + ArenaLeave(arena); + + return (mps_message_type_t)type; +} + +mps_clock_t mps_message_clock(mps_arena_t arena, + mps_message_t message) +{ + Clock postedClock; + + ArenaEnter(arena); + + postedClock = MessageGetClock(message); + + ArenaLeave(arena); + + return (mps_clock_t)postedClock; +} + + +/* -- mps_message_type_finalization */ + +void mps_message_finalization_ref(mps_addr_t *mps_addr_return, + mps_arena_t arena, + mps_message_t message) +{ + Ref ref; + + AVER(mps_addr_return != NULL); + + ArenaEnter(arena); + + AVERT(Arena, arena); + MessageFinalizationRef(&ref, arena, message); + ArenaPoke(arena, (Ref *)mps_addr_return, ref); + + ArenaLeave(arena); +} + +/* -- mps_message_type_gc */ + +size_t mps_message_gc_live_size(mps_arena_t arena, + mps_message_t message) +{ + Size size; + + ArenaEnter(arena); + + AVERT(Arena, arena); + size = MessageGCLiveSize(message); + + ArenaLeave(arena); + return (size_t)size; +} + +size_t mps_message_gc_condemned_size(mps_arena_t arena, + mps_message_t message) +{ + Size size; + + ArenaEnter(arena); + + AVERT(Arena, arena); + size = MessageGCCondemnedSize(message); + + ArenaLeave(arena); + return (size_t)size; +} + +size_t mps_message_gc_not_condemned_size(mps_arena_t arena, + mps_message_t message) +{ + Size size; + + ArenaEnter(arena); + + AVERT(Arena, arena); + size = MessageGCNotCondemnedSize(message); + + ArenaLeave(arena); + return (size_t)size; +} + +/* -- mps_message_type_gc_start */ + +const char *mps_message_gc_start_why(mps_arena_t arena, + mps_message_t message) +{ + const char *s; + + ArenaEnter(arena); + + AVERT(Arena, arena); + + s = MessageGCStartWhy(message); + + ArenaLeave(arena); + + return s; +} + + +/* Telemetry */ + +/* TODO: need to consider locking. See job003387, job003388. */ + +void mps_telemetry_set(mps_word_t setMask) +{ + (void)EventControl((Word)setMask, (Word)setMask); +} + +void mps_telemetry_reset(mps_word_t resetMask) +{ + (void)EventControl((Word)resetMask, 0); +} + +mps_word_t mps_telemetry_get(void) +{ + return EventControl(0, 0); +} + +mps_label_t mps_telemetry_intern(const char *label) +{ + AVER(label != NULL); + return (mps_label_t)EventInternString(label); +} + +void mps_telemetry_label(mps_addr_t addr, mps_label_t intern_id) +{ + EventLabelAddr((Addr)addr, (Word)intern_id); +} + +void mps_telemetry_flush(void) +{ + /* Telemetry does its own concurrency control, so none here. */ + EventSync(); +} + + +/* Allocation Patterns */ + + +mps_alloc_pattern_t mps_alloc_pattern_ramp(void) +{ + return (mps_alloc_pattern_t)AllocPatternRamp(); +} + +mps_alloc_pattern_t mps_alloc_pattern_ramp_collect_all(void) +{ + return (mps_alloc_pattern_t)AllocPatternRampCollectAll(); +} + + +/* mps_ap_alloc_pattern_begin -- signal start of an allocation pattern + * + * .ramp.hack: There are only two allocation patterns, both ramps. So + * we assume it's a ramp, and call BufferRampBegin/End directly, without + * dispatching. No point in creating a mechanism for that. */ + +mps_res_t mps_ap_alloc_pattern_begin(mps_ap_t mps_ap, + mps_alloc_pattern_t alloc_pattern) +{ + Buffer buf; + Arena arena; + + AVER(mps_ap != NULL); + buf = BufferOfAP(mps_ap); + AVER(TESTT(Buffer, buf)); + + arena = BufferArena(buf); + ArenaEnter(arena); + + BufferRampBegin(buf, (AllocPattern)alloc_pattern); + + ArenaLeave(arena); + return MPS_RES_OK; +} + + +mps_res_t mps_ap_alloc_pattern_end(mps_ap_t mps_ap, + mps_alloc_pattern_t alloc_pattern) +{ + Arena arena; + Res res; + + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, BufferOfAP(mps_ap))); + UNUSED(alloc_pattern); /* .ramp.hack */ + + arena = BufferArena(BufferOfAP(mps_ap)); + + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + res = BufferRampEnd(BufferOfAP(mps_ap)); + ArenaPoll(ArenaGlobals(arena)); /* .poll */ + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + + return (mps_res_t)res; +} + + +mps_res_t mps_ap_alloc_pattern_reset(mps_ap_t mps_ap) +{ + Arena arena; + + AVER(mps_ap != NULL); + AVER(TESTT(Buffer, BufferOfAP(mps_ap))); + + arena = BufferArena(BufferOfAP(mps_ap)); + + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + BufferRampReset(BufferOfAP(mps_ap)); + ArenaPoll(ArenaGlobals(arena)); /* .poll */ + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + + return MPS_RES_OK; +} + + +/* Chains */ + + +/* mps_chain_create -- create a chain */ + +mps_res_t mps_chain_create(mps_chain_t *chain_o, mps_arena_t arena, + size_t gen_count, mps_gen_param_s *params) +{ + Chain chain; + Res res; + + ArenaEnter(arena); + + AVER(gen_count > 0); + res = ChainCreate(&chain, arena, gen_count, (GenParamStruct *)params); + + ArenaLeave(arena); + if (res != ResOK) + return (mps_res_t)res; + *chain_o = (mps_chain_t)chain; + return MPS_RES_OK; +} + + +/* mps_chain_destroy -- destroy a chain */ + +void mps_chain_destroy(mps_chain_t chain) +{ + Arena arena; + + AVER(TESTT(Chain, chain)); + arena = chain->arena; + + ArenaEnter(arena); + ChainDestroy(chain); + ArenaLeave(arena); +} + + +/* _mps_args_set_key -- set the key for a keyword argument + * + * This sets the key for the i'th keyword argument in the array args, + * with bounds checking on i. It is used by the MPS_ARGS_BEGIN, + * MPS_ARGS_ADD, and MPS_ARGS_DONE macros in mps.h. + * + * We implement this in a function here, rather than in a macro in + * mps.h, so that we can use AVER to do the bounds checking. + */ + +void _mps_args_set_key(mps_arg_s args[MPS_ARGS_MAX], unsigned i, + mps_key_t key) +{ + AVER(i < MPS_ARGS_MAX); + args[i].key = key; +} + + +/* Transforms */ + + +mps_res_t mps_transform_create(mps_transform_t *mps_transform_o, + mps_arena_t arena) +{ + Transform transform = NULL; + Res res; + + AVER(mps_transform_o != NULL); + + ArenaEnter(arena); + res = TransformCreate(&transform, arena); + ArenaLeave(arena); + if (res != ResOK) + return res; + + *mps_transform_o = (mps_transform_t)transform; + return MPS_RES_OK; +} + + +mps_res_t mps_transform_add_oldnew(mps_transform_t transform, + mps_addr_t *mps_old_list, + mps_addr_t *mps_new_list, + size_t mps_count) +{ + Ref *old_list = (Ref *)mps_old_list; + Ref *new_list = (Ref *)mps_new_list; + Count count = mps_count; + Arena arena; + Res res; + + AVER(mps_old_list != NULL); + AVER(mps_new_list != NULL); + /* count: cannot check */ + + arena = TransformArena(transform); + + ArenaEnter(arena); + res = TransformAddOldNew(transform, old_list, new_list, count); + ArenaLeave(arena); + + return res; +} + + +mps_res_t mps_transform_apply(mps_bool_t *applied_o, + mps_transform_t transform) +{ + Arena arena; + Res res; + + AVER(applied_o != NULL); + + arena = TransformArena(transform); + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + res = TransformApply(applied_o, transform); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + + return res; +} + + +void mps_transform_destroy(mps_transform_t transform) +{ + Arena arena; + + arena = TransformArena(transform); + + ArenaEnter(arena); + TransformDestroy(transform); + ArenaLeave(arena); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsicv.c b/mps/code/mpsicv.c new file mode 100644 index 00000000000..7c5a04ac791 --- /dev/null +++ b/mps/code/mpsicv.c @@ -0,0 +1,641 @@ +/* mpsicv.c: MPSI COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + */ + +#include "testlib.h" +#include "mpslib.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "mpscmvff.h" +#include "fmthe.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mps.h" +#include "mpstd.h" + +#include /* printf */ + + +#define exactRootsCOUNT 49 +#define ambigRootsCOUNT 49 +#define OBJECTS 100000 +#define patternFREQ 100 + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) +#define FILLER_OBJECT_SIZE 1023 + +#define genCOUNT 2 +static mps_gen_param_s testChain[genCOUNT] = { + { 150, 0.85 }, { 170, 0.45 } }; + + +static mps_pool_t amcpool; +static mps_ap_t ap; +static size_t ap_headerSIZE = 0; +/* For this ap.... */ +/* Auto_header format + * + * [ auto_header ][===object===] + * ^pMps ^pCli + * <-----------sizeMps---------> + * <---sizeCli--> + * + * Note: pMps < pCli; sizeMps > sizeCli. + */ +#define PtrMps2Cli(n) ((char*)n + ap_headerSIZE) +#define PtrCli2Mps(n) ((char*)n - ap_headerSIZE) +#define SizeMps2Cli(n) (n - ap_headerSIZE) +#define SizeCli2Mps(n) (n + ap_headerSIZE) +#define HeaderInit(pMps) do { \ + if(ap_headerSIZE != 0) { \ + mps_addr_t pMps_MACROCOPY = (pMps); /* macro hygiene */ \ + ((int*)pMps_MACROCOPY)[0] = realHeader; \ + ((int*)pMps_MACROCOPY)[1] = 0xED0ED; \ + } \ + } while(0) + +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; + + +/* Types for alignment tests */ + +#define hasLONG_LONG 1 + +#ifdef _MSC_VER +#define long_long_t __int64 +#else +#define long_long_t long long +#endif + +struct tdouble { + double d; +}; + +struct tlong { + long d; +}; + +#ifdef HAS_LONG_LONG +struct tlonglong { + long_long_t d; +}; +#endif + + +/* alignmentTest -- test default alignment is acceptable */ + +static void alignmentTest(mps_arena_t arena) +{ + mps_pool_t pool; + void *p; + int dummy = 0; + size_t j, size; + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 0x1000); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, 1024); + MPS_ARGS_ADD(args, MPS_KEY_MAX_SIZE, 16384); + die(mps_pool_create_k(&pool, arena, mps_class_mvff(), args), + "alignment pool create"); + } MPS_ARGS_END(args); + + size = max(sizeof(double), sizeof(long)); +#ifdef HAS_LONG_LONG + size = max(size, sizeof(long_long_t)); +#endif + for(j = 0; j <= size + (size_t)1; ++j) { + die(mps_alloc(&p, pool, size + 1), "alignment alloc"); + +#define access(type, p) *(type*)(p) = (type)dummy; dummy += (int)*(type*)(p); + + access(double, p); + access(long, p); +#ifdef HAS_LONG_LONG + access(long_long_t, p); +#endif + } + mps_pool_destroy(pool); +} + + +/* make -- allocate an object */ + +static mps_addr_t make(void) +{ + size_t length = rnd() % 20; + size_t sizeCli = (length+2)*sizeof(mps_word_t); + size_t sizeMps = SizeCli2Mps(sizeCli); + mps_addr_t pMps, pCli; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, pMps, ap, sizeMps); + if (res != MPS_RES_OK) + die(res, "MPS_RESERVE_BLOCK"); + HeaderInit(pMps); + pCli = PtrMps2Cli(pMps); + res = dylan_init(pCli, sizeCli, exactRoots, exactRootsCOUNT); + if (res != MPS_RES_OK) + die(res, "dylan_init"); + } while(!mps_commit(ap, pMps, sizeMps)); + + return pCli; +} + + +/* make_no_inline -- allocate an object, using non-inlined interface */ + +static mps_addr_t make_no_inline(void) +{ + size_t length = rnd() % 20; + size_t sizeCli = (length+2)*sizeof(mps_word_t); + size_t sizeMps = SizeCli2Mps(sizeCli); + mps_addr_t pMps, pCli; + mps_res_t res; + + do { + res = (mps_reserve)(&pMps, ap, sizeMps); + if (res != MPS_RES_OK) + die(res, "(mps_reserve)"); + HeaderInit(pMps); + pCli = PtrMps2Cli(pMps); + res = dylan_init(pCli, sizeCli, exactRoots, exactRootsCOUNT); + if (res != MPS_RES_OK) + die(res, "dylan_init"); + } while(!(mps_commit)(ap, pMps, sizeMps)); + + return pCli; +} + + +static void pool_create_v_test(mps_arena_t arena, ...) +{ + va_list args; + + va_start(args, arena); + die(mps_pool_create_v(&amcpool, arena, mps_class_amc(), args), + "pool_create_v(amc)"); + va_end(args); +} + +static void ap_create_v_test(mps_pool_t pool, ...) +{ + mps_ap_t apt; + va_list args; + + va_start(args, pool); + die(mps_ap_create_v(&apt, pool, args), "ap_create_v"); + va_end(args); + mps_ap_destroy(apt); +} + + +/* addr_pool_test + * + * intended to test: + * mps_arena_has_addr + * mps_addr_pool + * mps_addr_fmt + */ + +static void addr_pool_test(mps_arena_t arena, + mps_addr_t obj1, /* unformatted */ + mps_pool_t pool1, + mps_addr_t obj2, /* formatted */ + mps_pool_t pool2, + mps_fmt_t fmt2) +{ + /* Things we might test. An addr might be: + * 0- a valid reference to an MPS-managed object; + * 1- interior pointer to an MPS-managed object; + * 2- pointer into some other part of a Seg owned by a Pool; + * ^^^(mps_addr_pool returns TRUE for these) + * 3- pointer to some MPS memory that's not a Seg; + * 4- pointer to unmapped memory; + * 5- pointer to memory not in any Chunk. + * ^^^(mps_addr_pool returns FALSE for these) + * + * We actually test case 0 (for both unformatted and formatted + * objects), and case 5. + */ + + mps_bool_t b; + mps_addr_t addr; + /* DISTInguished values are to observe overwrites. */ + mps_pool_t poolDistinguished = (mps_pool_t)MPS_WORD_CONST(0x000d1521); + mps_pool_t pool = poolDistinguished; + mps_fmt_t fmtDistinguished = (mps_fmt_t)MPS_WORD_CONST(0x000d1521); + mps_fmt_t fmt = fmtDistinguished; + + /* 0a -- obj1 in pool1 (unformatted) */ + addr = obj1; + pool = poolDistinguished; + fmt = fmtDistinguished; + cdie(mps_arena_has_addr(arena, addr), "mps_arena_has_addr 0a"); + b = mps_addr_pool(&pool, arena, addr); + /* printf("b %d; pool %p; sig %lx\n", b, (void *)pool, + b ? ((mps_word_t*)pool)[0] : (mps_word_t)0); */ + cdie(b == TRUE && pool == pool1, "mps_addr_pool 0a"); + b = mps_addr_fmt(&fmt, arena, addr); + /* printf("b %d; fmt %p; sig %lx\n", b, (void *)fmt, + b ? ((mps_word_t*)fmt)[0] : (mps_word_t)0); */ + cdie(b == FALSE && fmt == fmtDistinguished, "mps_addr_fmt 0a"); + + /* 0b -- obj2 in pool2, with fmt2 */ + addr = obj2; + pool = poolDistinguished; + fmt = fmtDistinguished; + cdie(mps_arena_has_addr(arena, addr), "mps_arena_has_addr 0b"); + b = mps_addr_pool(&pool, arena, addr); + /* printf("b %d; pool %p; sig %lx\n", b, (void *)pool, + b ? ((mps_word_t*)pool)[0] : (mps_word_t)0); */ + cdie(b == TRUE && pool == pool2, "mps_addr_pool 0b"); + b = mps_addr_fmt(&fmt, arena, addr); + /* printf("b %d; fmt %p; sig %lx\n", b, (void *)fmt, + b ? ((mps_word_t*)fmt)[0] : (mps_word_t)0); */ + cdie(b == TRUE && fmt == fmt2, "mps_addr_fmt 0b"); + + /* 5 */ + addr = &pool; /* point at stack, not in any chunk */ + pool = poolDistinguished; + fmt = fmtDistinguished; + cdie(mps_arena_has_addr(arena, addr) == FALSE, "mps_arena_has_addr 5"); + b = mps_addr_pool(&pool, arena, addr); + cdie(b == FALSE && pool == poolDistinguished, "mps_addr_pool 5"); + b = mps_addr_fmt(&fmt, arena, addr); + cdie(b == FALSE && fmt == fmtDistinguished, "mps_addr_fmt 5"); +} + + +static mps_res_t root_single(mps_ss_t ss, void *p, size_t s) +{ + mps_res_t res; + mps_addr_t *ref = p; + testlib_unused(s); + + MPS_SCAN_BEGIN(ss) { + res = MPS_FIX12(ss, ref); + } MPS_SCAN_END(ss); + + return res; +} + + +/* arena_commit_test + * + * intended to test: + * MPS_RES_COMMIT_LIMIT + * mps_arena_commit_limit + * mps_arena_commit_limit_set + * mps_arena_committed + * mps_arena_reserved + * mps_arena_spare + * mps_arena_spare_committed + * mps_arena_spare_set + * incidentally tests: + * mps_alloc + * mps_arena_commit_limit_set + * mps_class_mvff + * mps_pool_create + * mps_pool_destroy + */ + +static void arena_commit_test(mps_arena_t arena) +{ + mps_pool_t pool; + size_t committed; + size_t reserved; + size_t spare_committed; + size_t limit; + double spare; + void *p; + mps_res_t res; + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 0x1000); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, 1024); + MPS_ARGS_ADD(args, MPS_KEY_MAX_SIZE, 16384); + die(mps_pool_create_k(&pool, arena, mps_class_mvff(), args), + "commit pool create"); + } MPS_ARGS_END(args); + + limit = mps_arena_commit_limit(arena); + committed = mps_arena_committed(arena); + spare = mps_arena_spare(arena); + spare_committed = mps_arena_spare_committed(arena); + reserved = mps_arena_reserved(arena); + Insist(0.0 <= spare); + Insist(spare <= 1.0); + Insist((double)spare_committed <= spare * (double)committed); + Insist(spare_committed < committed); + Insist(committed <= reserved); + Insist(committed <= limit); + die(mps_arena_commit_limit_set(arena, committed), "commit_limit_set before"); + do { + res = mps_alloc(&p, pool, FILLER_OBJECT_SIZE); + } while (res == MPS_RES_OK); + die_expect(res, MPS_RES_COMMIT_LIMIT, "Commit limit allocation"); + die(mps_arena_commit_limit_set(arena, limit), "commit_limit_set after"); + res = mps_alloc(&p, pool, FILLER_OBJECT_SIZE); + die_expect(res, MPS_RES_OK, "Allocation failed after raising commit_limit"); + mps_arena_spare_set(arena, 0.0); + Insist(mps_arena_spare(arena) == 0.0); + Insist(mps_arena_spare_committed(arena) == 0); + mps_pool_destroy(pool); +} + + +static void test(mps_arena_t arena) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_root_t exactAreaRoot, exactTableRoot, ambigAreaRoot, ambigTableRoot, + singleRoot, fmtRoot; + unsigned long i; + /* Leave arena clamped until we have allocated this many objects. + is 0 when arena has not been clamped. */ + unsigned long clamp_until = 0; + size_t j; + mps_word_t collections; + mps_pool_t mv; + mps_addr_t alloced_obj; + size_t asize = 32; /* size of alloced obj */ + mps_addr_t obj; + mps_ld_s ld; + mps_alloc_pattern_t ramp = mps_alloc_pattern_ramp(); + size_t rampCount = 0; + mps_res_t res; + + if (rnd() & 1) { + printf("Using auto_header format.\n"); + die(EnsureHeaderFormat(&format, arena), "EnsureHeaderFormat"); + ap_headerSIZE = headerSIZE; /* from fmthe.h */ + } else { + printf("Using normal format (no implicit object header: client pointers point at start of storage).\n"); + die(dylan_fmt(&format, arena), "fmt_create"); + ap_headerSIZE = 0; + } + + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 0x10000); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, 32); + MPS_ARGS_ADD(args, MPS_KEY_MAX_SIZE, 0x10000); + die(mps_pool_create_k(&mv, arena, mps_class_mvff(), args), + "pool_create(mv)"); + } MPS_ARGS_END(args); + + pool_create_v_test(arena, format, chain); /* creates amc pool */ + + ap_create_v_test(amcpool); + + die(mps_ap_create(&ap, amcpool), "ap_create"); + + for(j = 0; j < exactRootsCOUNT; ++j) { + exactRoots[j] = objNULL; + } + for(j = 0; j < ambigRootsCOUNT; ++j) { + ambigRoots[j] = rnd_addr(); + } + + die(mps_root_create_area_tagged(&exactAreaRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], + &exactRoots[exactRootsCOUNT / 2], + mps_scan_area_tagged, + MPS_WORD_CONST(1), 0), + "root_create_area_tagged(exact)"); + die(mps_root_create_table_masked(&exactTableRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[exactRootsCOUNT / 2], + (exactRootsCOUNT + 1) / 2, + MPS_WORD_CONST(1)), + "root_create_table_masked(exact)"); + die(mps_root_create_area(&ambigAreaRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], + &ambigRoots[ambigRootsCOUNT / 2], + mps_scan_area, NULL), + "root_create_area(ambig)"); + die(mps_root_create_table(&ambigTableRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[ambigRootsCOUNT / 2], + (ambigRootsCOUNT + 1) / 2), + "root_create_table(ambig)"); + + obj = objNULL; + + die(mps_root_create(&singleRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &root_single, &obj, 0), + "root_create(single)"); + + /* test non-inlined reserve/commit */ + obj = make_no_inline(); + + die(mps_alloc(&alloced_obj, mv, asize), "mps_alloc"); + die(dylan_init(alloced_obj, asize, exactRoots, exactRootsCOUNT), + "dylan_init(alloced_obj)"); + + addr_pool_test(arena, alloced_obj, mv, make(), amcpool, format); + + die(mps_root_create_fmt(&fmtRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + dylan_fmt_A()->scan, + alloced_obj, + (mps_addr_t)(((char*)alloced_obj)+asize)), + "root_create_fmt"); + + mps_ld_reset(&ld, arena); + mps_ld_add(&ld, arena, obj); + + if (mps_ld_isstale(&ld, arena, obj)) { + cdie(mps_ld_isstale_any(&ld, arena), "mps_ld_isstale_any"); + mps_ld_reset(&ld, arena); + mps_ld_add(&ld, arena, obj); + } + + collections = mps_collections(arena); + + for(i = 0; i < OBJECTS; ++i) { + mps_word_t c; + size_t r; + + Insist(!mps_arena_busy(arena)); + + c = mps_collections(arena); + + if(collections != c) { + collections = c; + printf("Collection %"PRIuLONGEST", %lu objects.\n", (ulongest_t)c, i); + for(r = 0; r < exactRootsCOUNT; ++r) { + cdie(exactRoots[r] == objNULL || dylan_check(exactRoots[r]), + "all roots check"); + } + if(collections == 1) { + mps_arena_clamp(arena); + clamp_until = i + 10000; + } + if(collections % 3 == 2) { + mps_arena_park(arena); + mps_arena_release(arena); + } + } + + if(clamp_until && i >= clamp_until) { + mps_arena_release(arena); + clamp_until = 0; + } + + if (rnd() % patternFREQ == 0) { + switch(rnd() % 4) { + case 0: /* fall through */ + case 1: + die(mps_ap_alloc_pattern_begin(ap, ramp), "alloc_pattern_begin"); + ++rampCount; + break; + case 2: + res = mps_ap_alloc_pattern_end(ap, ramp); + cdie(rampCount > 0 ? res == MPS_RES_OK : res == MPS_RES_FAIL, + "alloc_pattern_end"); + if (rampCount > 0) { + --rampCount; + } + break; + default: + die(mps_ap_alloc_pattern_reset(ap), "alloc_pattern_reset"); + rampCount = 0; + break; + } + } + + if (rnd() & 1) { + exactRoots[rnd() % exactRootsCOUNT] = make(); + } else { + ambigRoots[rnd() % ambigRootsCOUNT] = make(); + } + + r = rnd() % exactRootsCOUNT; + if (exactRoots[r] != objNULL) { + cdie(dylan_check(exactRoots[r]), "random root check"); + } + } + + arena_commit_test(arena); + alignmentTest(arena); + + die(mps_arena_collect(arena), "collect"); + mps_arena_release(arena); + + mps_free(mv, alloced_obj, 32); + + mps_arena_park(arena); + mps_pool_destroy(mv); + mps_ap_destroy(ap); + mps_root_destroy(fmtRoot); + mps_root_destroy(singleRoot); + mps_root_destroy(exactAreaRoot); + mps_root_destroy(exactTableRoot); + mps_root_destroy(ambigAreaRoot); + mps_root_destroy(ambigTableRoot); + mps_pool_destroy(amcpool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); +} + + +#define TEST_ARENA_SIZE ((size_t)16<<20) + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + mps_root_t reg_root; + void *marker = ▮ + + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + /* Randomize pause time as a regression test for job004011. */ + MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, rnd_pause_time()); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, TEST_ARENA_SIZE); + MPS_ARGS_ADD(args, MPS_KEY_SPARE, rnd_double()); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), + "arena_create"); + } MPS_ARGS_END(args); + die(mps_thread_reg(&thread, arena), "thread_reg"); + + switch (rnd() % 3) { + default: + case 0: + die(mps_root_create_reg(®_root, arena, + mps_rank_ambig(), (mps_rm_t)0, + thread, &mps_stack_scan_ambig, + marker, (size_t)0), + "root_create_reg"); + break; + case 1: + die(mps_root_create_thread(®_root, arena, thread, marker), + "root_create_thread"); + break; + case 2: + die(mps_root_create_thread_scanned(®_root, arena, mps_rank_ambig(), + (mps_rm_t)0, thread, mps_scan_area, + NULL, marker), + "root_create_thread"); + break; + } + + test(arena); + switch (rnd() % 2) { + default: + case 0: + mps_root_destroy(reg_root); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + break; + case 1: + mps_arena_postmortem(arena); + break; + } + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsio.h b/mps/code/mpsio.h new file mode 100644 index 00000000000..69480aca78f --- /dev/null +++ b/mps/code/mpsio.h @@ -0,0 +1,55 @@ +/* mpsio.h: RAVENBROOK MEMORY POOL SYSTEM I/O INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: For MPS client application developers, MPS developers. + * .sources: + */ + +#ifndef mpsio_h +#define mpsio_h + +#include "mps.h" /* for mps_res_t */ + + +typedef struct mps_io_s *mps_io_t; + +extern mps_res_t mps_io_create(mps_io_t *); +extern void mps_io_destroy(mps_io_t); + +extern mps_res_t mps_io_write(mps_io_t, void *, size_t); +extern mps_res_t mps_io_flush(mps_io_t); + + +#endif /* mpsio_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsioan.c b/mps/code/mpsioan.c new file mode 100644 index 00000000000..40362207b32 --- /dev/null +++ b/mps/code/mpsioan.c @@ -0,0 +1,128 @@ +/* mpsioan.c: RAVENBROOK MEMORY POOL SYSTEM I/O IMPLEMENTATION (ANSI) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: For MPS client application developers and MPS developers. + * .sources: + */ + +#include "mpsio.h" +#include "mpstd.h" + +/* We don't want to use the ANSI assert() to check that the interface + * is being used correctly, because it's not controlled by the MPS + * variety mechanism: we might end up with assertions being turned on + * in the HOT variety or turned off in the COOL variety (depending on + * whether or not the client program compiles the MPS with NDEBUG + * defined). So we include "check.h" and use AVER() instead. See + * job003504. If you are developing your own plinth, you should + * consider whether to use your own preferred assertion mechanism + * instead. + */ +#include "check.h" +#include "config.h" /* to get platform configurations */ + +#include +#include + + +static FILE *ioFile = NULL; + +#ifdef MPS_BUILD_MV +/* MSVC warning 4996 = stdio / C runtime 'unsafe' */ +/* Objects to: fopen. See job001934. */ +#pragma warning( disable : 4996 ) +#endif + +mps_res_t mps_io_create(mps_io_t *mps_io_r) +{ + FILE *f; + const char *filename; + + if(ioFile != NULL) /* See */ + return MPS_RES_LIMIT; /* Cannot currently open more than one log */ + + filename = getenv("MPS_TELEMETRY_FILENAME"); + if(filename == NULL) + filename = "mpsio.log"; + + f = fopen(filename, "wb"); + if(f == NULL) + return MPS_RES_IO; + + *mps_io_r = (mps_io_t)f; + ioFile = f; + return MPS_RES_OK; +} + + +void mps_io_destroy(mps_io_t mps_io) +{ + FILE *f = (FILE *)mps_io; + AVER(f == ioFile); + AVER(f != NULL); + + ioFile = NULL; + (void)fclose(f); +} + + +mps_res_t mps_io_write(mps_io_t mps_io, void *buf, size_t size) +{ + FILE *f = (FILE *)mps_io; + size_t n; + AVER(f == ioFile); + AVER(f != NULL); + + n = fwrite(buf, size, 1, f); + if(n != 1) + return MPS_RES_IO; + + return MPS_RES_OK; +} + + +mps_res_t mps_io_flush(mps_io_t mps_io) +{ + FILE *f = (FILE *)mps_io; + int e; + AVER(f == ioFile); + AVER(f != NULL); + + e = fflush(f); + if(e == EOF) + return MPS_RES_IO; + + return MPS_RES_OK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsiw3.c b/mps/code/mpsiw3.c new file mode 100644 index 00000000000..2f3a9d169d0 --- /dev/null +++ b/mps/code/mpsiw3.c @@ -0,0 +1,65 @@ +/* mpsint.c: WIN32 MEMORY POOL SYSTEM INTERFACE LAYER EXTRAS + * + * $Id$ + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_W3) +#error "mpsiw3.c is specific to MPS_OS_W3" +#endif + +#include "mps.h" +#include "mpswin.h" + +SRCID(mpsiw3, "$Id$"); + + +/* This is defined in protw3.c */ +extern LONG WINAPI ProtSEHfilter(LPEXCEPTION_POINTERS info); + +LONG mps_SEH_filter(LPEXCEPTION_POINTERS info, + void **hp_o, size_t *hs_o) +{ + UNUSED(hp_o); + UNUSED(hs_o); + return ProtSEHfilter(info); +} + +void mps_SEH_handler(void *p, size_t s) +{ + UNUSED(p); UNUSED(s); + NOTREACHED; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpslib.h b/mps/code/mpslib.h new file mode 100644 index 00000000000..be0fbde385a --- /dev/null +++ b/mps/code/mpslib.h @@ -0,0 +1,106 @@ +/* mpslib.h: RAVENBROOK MEMORY POOL SYSTEM LIBRARY INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS client application developers, MPS developers. + * .sources: + * + * .purpose: The purpose of this file is to declare the functions and types + * required for the MPS library interface. + */ + +#ifndef mpslib_h +#define mpslib_h + +#include +#include "mps.h" /* mps_clock_t */ + +/* Return the token that will be returned by I/O functions when the end + of file is reached. Analogous to `EOF` from stdio.h. */ +extern int mps_lib_get_EOF(void); +#define mps_lib_EOF (mps_lib_get_EOF()) + +/* An anonymous structure type used to represent files. Analogous to + `FILE *` from stdio.h. */ +typedef struct mps_lib_stream_s mps_lib_FILE; + +/* Return the standard output and standard error streams. Analogous to + `stdout` and `stderr` from stdio.h. */ +extern mps_lib_FILE *mps_lib_get_stderr(void); +extern mps_lib_FILE *mps_lib_get_stdout(void); +#define mps_lib_stderr (mps_lib_get_stderr()) +#define mps_lib_stdout (mps_lib_get_stdout()) + +/* Send a character or string to a stream. Analogous to `fputc` and `fputs` + from stdio.h. */ +extern int mps_lib_fputc(int, mps_lib_FILE *); +extern int mps_lib_fputs(const char *, mps_lib_FILE *); + +/* Assertion handler. When the MPS detects an illegal condition, it calls + `mps_lib_assert_fail` with the source code filename, line number, and + a string representing the condition. That function should log or report + the condition, and preferably allow for debugging, though in a production + environment it can return and the MPS will attempt to continue, though + this may cause failure of the process soon after. */ +extern void mps_lib_assert_fail(const char *, unsigned, const char *); + +/* The default ANSI plinth in mpsliban.c allows the assertion handler to be + replaced by passing a replacement to `mps_lib_assert_fail_install`, + which returns the previous handler. This is for convenience so that + a complete replacement plinth need not be supplied just to achieve the + same thing. The MPS itself does not use `mps_lib_assert_fail_install` + and so it need not be supplied by the plinth. */ +typedef void (*mps_lib_assert_fail_t)(const char *, unsigned, const char *); +extern mps_lib_assert_fail_t mps_lib_assert_fail_install(mps_lib_assert_fail_t); + + +/* Set, copy, or compare memory. Analogous to `memset`, `memcpy`, and + `memcmp` from string.h. */ +extern void *(mps_lib_memset)(void *, int, size_t); +extern void *(mps_lib_memcpy)(void *, const void *, size_t); +extern int (mps_lib_memcmp)(const void *, const void *, size_t); + +/* Return a measure of time since process start. Equivalent to `clock` + from time.h. */ +extern mps_clock_t mps_clock(void); +extern mps_clock_t mps_clocks_per_sec(void); + + +/* Return a telemetry control word from somewhere. This controls which kinds + of events get output to the telemetry stream. Each bit in the word + switches on the corresponding EventKind defined in eventcom.h. */ +extern unsigned long mps_lib_telemetry_control(void); + + +#endif /* mpslib_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpsliban.c b/mps/code/mpsliban.c new file mode 100644 index 00000000000..a7927c80cd2 --- /dev/null +++ b/mps/code/mpsliban.c @@ -0,0 +1,234 @@ +/* mpsliban.c: RAVENBROOK MEMORY POOL SYSTEM LIBRARY INTERFACE (ANSI) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * .purpose: The purpose of this code is + * 1. to connect the MPS Library Interface to the ANSI C libraries, + * where they exist, and + * 2. to provide an example of how to implement the MPS Library + * Interface. + * + * .readership: For MPS client application developers and MPS developers. + * .sources: + * + * + * TRANSGRESSIONS (rule.impl.trans) + * + * .trans.file: The ANSI standard says (in section 7.9.1) that FILE is an + * object type, and hence the casts between FILE and mps_lib_FILE (an + * incomplete type) are not necessarily valid. We assume that this trick + * works, however, in all current environments. + */ + +#include "mpslib.h" + +#include "mpstd.h" +#include "event.h" + +#include +#include +#include +#include +#include +#include + + +int mps_lib_get_EOF(void) +{ + return EOF; +} + +mps_lib_FILE *mps_lib_get_stderr(void) +{ + return (mps_lib_FILE *)stderr; /* see .trans.file */ +} + +mps_lib_FILE *mps_lib_get_stdout(void) +{ + return (mps_lib_FILE *)stdout; /* see .trans.file */ +} + +int mps_lib_fputc(int c, mps_lib_FILE *stream) +{ + return fputc(c, (FILE *)stream); /* see .trans.file */ +} + +int mps_lib_fputs(const char *s, mps_lib_FILE *stream) +{ + return fputs(s, (FILE *)stream); /* see .trans.file */ +} + + +static void mps_lib_assert_fail_default(const char *file, unsigned line, + const char *condition) +{ + /* Synchronize with stdout. */ + (void)fflush(stdout); + (void)fprintf(stderr, + "The MPS detected a problem!\n" + "%s:%u: MPS ASSERTION FAILED: %s\n" + "See the \"Assertions\" section in the reference manual:\n" + "https://www.ravenbrook.com/project/mps/master/manual/html/topic/error.html#assertions\n", + file, line, condition); + /* Ensure the message is output even if stderr is buffered. */ + (void)fflush(stderr); + mps_telemetry_flush(); + ASSERT_ABORT(); /* see config.h */ +} + +static mps_lib_assert_fail_t mps_lib_assert_handler = mps_lib_assert_fail_default; + +void mps_lib_assert_fail(const char *file, + unsigned line, + const char *condition) +{ + mps_lib_assert_handler(file, line, condition); +} + +mps_lib_assert_fail_t mps_lib_assert_fail_install(mps_lib_assert_fail_t handler) +{ + mps_lib_assert_fail_t old_handler = mps_lib_assert_handler; + mps_lib_assert_handler = handler; + return old_handler; +} + + +void *(mps_lib_memset)(void *s, int c, size_t n) +{ + return memset(s, c, n); +} + +void *(mps_lib_memcpy)(void *s1, const void *s2, size_t n) +{ + return memcpy(s1, s2, n); +} + +int (mps_lib_memcmp)(const void *s1, const void *s2, size_t n) +{ + return memcmp(s1, s2, n); +} + + +/* If your platform has a low-resolution clock(), and there are + * higher-resolution clocks readily available, then using one of those + * will improve MPS scheduling decisions and the quality of telemetry + * output. For instance, with getrusage(): + * + * #include + * struct rusage s; + * int res = getrusage(RUSAGE_SELF, &s); + * if (res != 0) { + * ... + * } + * return ((mps_clock_t)s.ru_utime.tv_sec) * 1000000 + s.ru_utime.tv_usec; + */ + +mps_clock_t mps_clock(void) +{ + /* The clock values need to fit in mps_clock_t. If your platform + has a very wide clock type, trim or truncate it. */ + assert(sizeof(mps_clock_t) >= sizeof(clock_t)); + + return (mps_clock_t)clock(); +} + + +mps_clock_t mps_clocks_per_sec(void) +{ + /* must correspond to whatever mps_clock() does */ + return (mps_clock_t)CLOCKS_PER_SEC; +} + + +/* mps_lib_telemetry_control -- get and interpret MPS_TELEMETRY_CONTROL */ + +#ifdef MPS_BUILD_MV +/* MSVC warning 4996 = stdio / C runtime 'unsafe' */ +/* Objects to: getenv. See job001934. */ +#pragma warning( disable : 4996 ) +#endif + +/* Simple case-insensitive string comparison */ +static int striequal(const char *s0, const char *s1) +{ + int c; + do { + c = *s0; + if (tolower(c) != tolower(*s1)) /* note: works for '\0' */ + return 0; + ++s0; + ++s1; + } while (c != '\0'); + return 1; +} + +unsigned long mps_lib_telemetry_control(void) +{ + char *s; + char **null = NULL; + unsigned long mask; + char buf[256]; + char *word; + const char *sep = " "; + + s = getenv("MPS_TELEMETRY_CONTROL"); + if (s == NULL) + return 0; + + /* If the value can be read as a number, use it. */ + mask = strtoul(s, null, 0); + if (mask != 0) + return mask; + + /* copy the envar to a buffer so we can mess with it. */ + strncpy(buf, s, sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + + /* Split the value at spaces and try to match the words against the names + of event kinds, enabling them if there's a match. */ + for (word = strtok(buf, sep); word != NULL; word = strtok(NULL, sep)) { + if (striequal(word, "all")) { + mask = (unsigned long)-1; + return mask; + } +#define TELEMATCH(X, name, rowDoc) \ + if (striequal(word, #name)) \ + mask |= (1ul << EventKind##name); + EventKindENUM(TELEMATCH, X) +#undef TELEMATCH + } + + return mask; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpstd.h b/mps/code/mpstd.h new file mode 100644 index 00000000000..7df1c2a12f6 --- /dev/null +++ b/mps/code/mpstd.h @@ -0,0 +1,430 @@ +/* mpstd.h: RAVENBROOK MEMORY POOL SYSTEM TARGET DETECTION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + * + * Detect the target platform using predefined preprocessor symbols + * defined by the build environment. The symbols are derived from the + * documentation, or, in the case of GCC, from the compiler itself. + * References to the documentation appear above each detection line. + * + * For more details on how this file fits into the MPS build system, + * and an explanation of all the MPS_* defines, see design.config.pf + * "MPS Configuration" <../design/config.txt> + * + * .macos.ppc.align: MacOS / PowerPC requires 8 bytes alignment (in + * general). See "Mac OS Runtime Architecture", table 4-2. + * + * mpstd.h fails if it cannot detect the platform (even if CONFIG_PF_* + * is specified). This is intentional. mpstd.h does *not* allow + * CONFIG_PF_* to override the platform as detected from preprocessor + * symbols. This is intentional. [This needs justifying. RB 2013-05-11] + */ + +#ifndef mpstd_h +#define mpstd_h + + +/* Visual C++ 2.0, Books Online, C/C++ Book, Preprocessor Reference, + * Chapter 1: The Preprocessor, Macros, Predefined Macros. + * Alignment of 4 would work, but the MS library uses 8 bytes for + * doubles and __int64, so we choose that. The actual granularity of + * VC malloc is 16! + * + * PellesC /Ze (Microsoft compatibility mode) defines _MSC_VER but + * isn't compatible enough for MPS purposes. + */ + +#if defined(_MSC_VER) && defined(_WIN32) && defined(_M_IX86) && !defined(__POCC__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_W3I3MV) +#error "specified CONFIG_PF_... inconsistent with detected w3i3mv" +#endif +#define MPS_PF_W3I3MV +#define MPS_PF_STRING "w3i3mv" +#define MPS_OS_W3 +#define MPS_ARCH_I3 +#define MPS_BUILD_MV +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 8 + + +/* "Predefined Macros" from "Visual Studio 2010" on MSDN + * . + * Note that Win32 includes 64-bit Windows! + * We use the same alignment as MS malloc: 16, which is used for XMM + * operations. + * See MSDN -> x64 Software Conventions -> Overview of x64 Calling Conventions + * + */ + +#elif defined(_MSC_VER) && defined(_WIN32) && defined(_WIN64) && defined(_M_X64) && !defined(__POCC__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_W3I6MV) +#error "specified CONFIG_PF_... inconsistent with detected w3i6mv" +#endif +#define MPS_PF_W3I6MV +#define MPS_PF_STRING "w3i6mv" +#define MPS_OS_W3 +#define MPS_ARCH_I6 +#define MPS_BUILD_MV +#define MPS_T_WORD unsigned __int64 +#define MPS_T_ULONGEST unsigned __int64 +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 16 + + +/* PellesC version 7.00.25 with /Ze option (Microsoft compatibility mode) + * Help node "Predefined preprocessor symbols (POCC)" + */ + +#elif defined(__POCC__) && defined(_WIN32) && defined(_M_IX86) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_W3I3PC) +#error "specified CONFIG_PF_... inconsistent with detected w3i3pc" +#endif +#define MPS_PF_W3I3PC +#define MPS_PF_STRING "w3i3pc" +#define MPS_OS_W3 +#define MPS_ARCH_I3 +#define MPS_BUILD_PC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 8 + + +/* PellesC version 7.00.25 with /Ze option (Microsoft compatibility mode) + * and /Tarm64-coff (Create a COFF object file for a X64 processor). + * Help node "Predefined preprocessor symbols (POCC)" + */ + +#elif defined(__POCC__) && defined(_WIN32) && defined(_WIN64) && defined(_M_X64) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_W3I6PC) +#error "specified CONFIG_PF_... inconsistent with detected w3i6pc" +#endif +#define MPS_PF_W3I6PC +#define MPS_PF_STRING "w3i6pc" +#define MPS_OS_W3 +#define MPS_ARCH_I6 +#define MPS_BUILD_PC +#define MPS_T_WORD unsigned __int64 +#define MPS_T_ULONGEST unsigned __int64 +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 16 + + +/* GCC 4.0.1 (As supplied by Apple on Mac OS X 10.4.8 on an Intel Mac), + * gcc -E -dM + * And above for xcppgc. + * Note that Clang also defines __GNUC__ since it's generally GCC compatible, + * but that doesn't fit our system so we exclude Clang here. + */ + +#elif defined(__APPLE__) && defined(__i386__) && defined(__MACH__) \ + && defined(__GNUC__) && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_XCI3GC) +#error "specified CONFIG_PF_... inconsistent with detected xci3gc" +#endif +#define MPS_PF_XCI3GC +#define MPS_PF_STRING "xci3gc" +#define MPS_OS_XC +#define MPS_ARCH_I3 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 4 /* I'm just guessing. */ + + +/* gcc-mp-4.7 (MacPorts gcc47 4.7.4_5) 4.7.4 + * gcc -E -dM + * Note that Clang also defines __GNUC__ since it's generally GCC compatible, + * but that doesn't fit our system so we exclude Clang here. + */ + +#elif defined(__APPLE__) && defined(__x86_64__) && defined(__MACH__) \ + && defined(__GNUC__) && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_XCI6GC) +#error "specified CONFIG_PF_... inconsistent with detected xci6gc" +#endif +#define MPS_PF_XCI6GC +#define MPS_PF_STRING "xci6gc" +#define MPS_OS_XC +#define MPS_ARCH_I6 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* Apple clang version 12.0, clang -E -dM */ + +#elif defined(__APPLE__) && defined(__arm64__) && defined(__MACH__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_XCA6LL) +#error "specified CONFIG_PF_... inconsistent with detected xca6ll" +#endif +#define MPS_PF_XCA6LL +#define MPS_PF_STRING "xca6ll" +#define MPS_OS_XC +#define MPS_ARCH_A6 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* Apple clang version 3.1, clang -E -dM */ + +#elif defined(__APPLE__) && defined(__i386__) && defined(__MACH__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_XCI3LL) +#error "specified CONFIG_PF_... inconsistent with detected xci3ll" +#endif +#define MPS_PF_XCI3LL +#define MPS_PF_STRING "xci3ll" +#define MPS_OS_XC +#define MPS_ARCH_I3 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 4 /* I'm just guessing. */ + + +/* Apple clang version 3.1, clang -E -dM */ + +#elif defined(__APPLE__) && defined(__x86_64__) && defined(__MACH__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_XCI6LL) +#error "specified CONFIG_PF_... inconsistent with detected xci6ll" +#endif +#define MPS_PF_XCI6LL +#define MPS_PF_STRING "xci6ll" +#define MPS_OS_XC +#define MPS_ARCH_I6 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* GCC 7.5, gcc -E -dM */ + +#elif defined(__linux__) && defined(__aarch64__) && defined(__GNUC__) \ + && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_LIA6GC) +#error "specified CONFIG_PF_... inconsistent with detected lia6gc" +#endif +#define MPS_PF_LIA6GC +#define MPS_PF_STRING "lia6gc" +#define MPS_OS_LI +#define MPS_ARCH_A6 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* Clang/LLVM 10.0, clang -E -dM */ + +#elif defined(__linux__) && defined(__aarch64__) && defined(__GNUC__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_LIA6LL) +#error "specified CONFIG_PF_... inconsistent with detected lia6ll" +#endif +#define MPS_PF_LIA6LL +#define MPS_PF_STRING "lia6ll" +#define MPS_OS_LI +#define MPS_ARCH_A6 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* GCC 2.6.3, gcc -E -dM + * The actual granularity of GNU malloc is 8, but field alignments are + * all 4. + * Note that Clang also defines __GNUC__ since it's generally GCC compatible, + * but that doesn't fit our system so we exclude Clang here. + */ + +#elif defined(__linux__) && defined(__i386__) && defined(__GNUC__) \ + && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_LII3GC) +#error "specified CONFIG_PF_... inconsistent with detected lii3gc" +#endif +#define MPS_PF_LII3GC +#define MPS_PF_STRING "lii3gc" +#define MPS_OS_LI +#define MPS_ARCH_I3 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 4 + + +/* GCC 4.6.3, gcc -E -dM */ + +#elif defined(__linux__) && defined(__x86_64) && defined(__GNUC__) \ + && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_LII6GC) +#error "specified CONFIG_PF_... inconsistent with detected lii6gc" +#endif +#define MPS_PF_LII6GC +#define MPS_PF_STRING "lii6gc" +#define MPS_OS_LI +#define MPS_ARCH_I6 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* Clang/LLVM 3.0, clang -E -dM */ + +#elif defined(__linux__) && defined(__x86_64) && defined(__GNUC__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_LII6LL) +#error "specified CONFIG_PF_... inconsistent with detected lii6ll" +#endif +#define MPS_PF_LII6LL +#define MPS_PF_STRING "lii6ll" +#define MPS_OS_LI +#define MPS_ARCH_I6 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +/* GCC 2.95.3, gcc -E -dM */ + +#elif defined(__FreeBSD__) && defined (__i386__) && defined (__GNUC__) \ + && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_FRI3GC) +#error "specified CONFIG_PF_... inconsistent with detected fri3gc" +#endif +#define MPS_PF_FRI3GC +#define MPS_PF_STRING "fri3gc" +#define MPS_OS_FR +#define MPS_ARCH_I3 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 4 + + +#elif defined(__FreeBSD__) && defined (__i386__) && defined (__GNUC__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_FRI3LL) +#error "specified CONFIG_PF_... inconsistent with detected fri3ll" +#endif +#define MPS_PF_FRI3LL +#define MPS_PF_STRING "fri3ll" +#define MPS_OS_FR +#define MPS_ARCH_I3 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 32 +#define MPS_WORD_SHIFT 5 +#define MPS_PF_ALIGN 4 + + +#elif defined(__FreeBSD__) && defined (__x86_64__) && defined (__GNUC__) \ + && !defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_FRI6GC) +#error "specified CONFIG_PF_... inconsistent with detected fri6gc" +#endif +#define MPS_PF_FRI6GC +#define MPS_PF_STRING "fri6gc" +#define MPS_OS_FR +#define MPS_ARCH_I6 +#define MPS_BUILD_GC +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +#elif defined(__FreeBSD__) && defined (__x86_64__) && defined (__GNUC__) \ + && defined(__clang__) +#if defined(CONFIG_PF_STRING) && ! defined(CONFIG_PF_FRI6LL) +#error "specified CONFIG_PF_... inconsistent with detected fri6ll" +#endif +#define MPS_PF_FRI6LL +#define MPS_PF_STRING "fri6ll" +#define MPS_OS_FR +#define MPS_ARCH_I6 +#define MPS_BUILD_LL +#define MPS_T_WORD unsigned long +#define MPS_T_ULONGEST unsigned long +#define MPS_WORD_WIDTH 64 +#define MPS_WORD_SHIFT 6 +#define MPS_PF_ALIGN 8 + + +#else +#error "The MPS Kit does not have a configuration for this platform out of the box; see manual/build.txt" +#endif + + +#endif /* mpstd_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mpswin.h b/mps/code/mpswin.h new file mode 100644 index 00000000000..39e3231c62e --- /dev/null +++ b/mps/code/mpswin.h @@ -0,0 +1,56 @@ +/* mpswin.h: RAVENBROOK MEMORY POOL SYSTEM WINDOWS.H INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: For MPS client application developers, MPS developers. + * + * .purpose: Shared file for the incantations needed to include windows.h. + */ + +#ifndef mpswin_h +#define mpswin_h + +#ifndef WIN32_LEAN_AND_MEAN +/* Speed up the build process by excluding parts of windows.h that we + * don't use. See + * + */ +#define WIN32_LEAN_AND_MEAN +#include +#undef WIN32_LEAN_AND_MEAN +#else +#include +#endif + +#endif /* mpswin_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/mv.nmk b/mps/code/mv.nmk new file mode 100644 index 00000000000..08597ddb8e5 --- /dev/null +++ b/mps/code/mv.nmk @@ -0,0 +1,69 @@ +# -*- makefile -*- +# +# mv.nmk: NMAKE FRAGMENT FOR MICROSOFT VISUAL C/C++ +# +# $Id$ +# Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. +# +# This file is included by platform nmake files that use the Microsoft +# Visual C/C+ compiler. It defines the compiler-specific variables +# that the common nmake file fragment () requires. + +CC = cl +LIBMAN = lib +LINKER = link + +# /D_CRT_SECURE_NO_WARNINGS suppresses "This function or variable may +# be unsafe" warnings for standard C library functions fopen, getenv, +# snprintf, sscanf, strcpy, and so on. +# +# /Gs appears to be necessary to suppress stack checks. Stack checks +# (if not suppressed) generate a dependency on the C library, __chkesp, +# which causes the linker step to fail when building the DLL, mpsdy.dll. +# +# /W4 /WX might be enabled by clients . +CFLAGSCOMMONPRE = $(CFLAGSCOMMONPRE) /D_CRT_SECURE_NO_WARNINGS /W4 /WX /Gs /Fd$(PFM)\$(VARIETY) +LIBFLAGSCOMMON = $(LIBFLAGSCOMMON) /nologo + +# /MD means compile for multi-threaded environment with separate C library DLL. +# /MT means compile for multi-threaded environment. +# /ML means compile for single-threaded environment. +# A 'd' at the end means compile for debugging. +CRTFLAGSHOT = $(CRTFLAGSHOT) /MT +CRTFLAGSCOOL = $(CRTFLAGSCOOL) /MTd + +CFLAGSCOOL = $(CFLAGSCOOL) /Od + +LINKFLAGSCOMMON = $(LINKFLAGSCOMMON) /PDB:$*.pdb +LINKFLAGSHOT = $(LINKFLAGSHOT) libcmt.lib +LINKFLAGSCOOL = $(LINKFLAGSCOOL) libcmtd.lib + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/mv2test.c b/mps/code/mv2test.c new file mode 100644 index 00000000000..ee7d29841ce --- /dev/null +++ b/mps/code/mv2test.c @@ -0,0 +1,250 @@ +/* mv2test.c: POOLMVT STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include +#include +#include +#include + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscmvt.h" +#include "mpslib.h" +#include "mpstd.h" +#include "testlib.h" + +/* expdev() -- exponentially distributed random deviates + * + * From + * + * Returns an exponentially distributed, positive, random deviate of + * unit mean, using rnd_double() as the source of uniform deviates. + */ + +static double expdev(void) +{ + double dum; + do + dum=rnd_double(); + while (dum == 0.0); + return (float)-log(dum); +} + + +static size_t size_min; +static size_t size_mean; +static size_t size_max; +static int verbose = 0; +static mps_pool_t pool; + +static size_t randomSize(unsigned long i) +{ + /* Distribution centered on mean. Verify that allocations + below min and above max are handled correctly */ + size_t s = (size_max - size_mean)/4; + size_t m = size_mean; + double r; + double x; + + testlib_unused(i); + + /* per SGR */ + do { + r = expdev(); + x = (double)s * sqrt(2 * r); + x += (double)m; + } while (x <= 1.0); + + return (size_t)x; + +} + + +#define testArenaSIZE ((size_t)64<<20) +#define TEST_SET_SIZE 1234 +#define TEST_LOOPS 27 + +static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size, mps_align_t align) +{ + mps_res_t res; + + size = alignUp(size, align); + + do { + MPS_RESERVE_BLOCK(res, *p, ap, size); + if(res != MPS_RES_OK) + return res; + } while(!mps_commit(ap, *p, size)); + + return MPS_RES_OK; +} + + +static mps_res_t stress(mps_arena_t arena, mps_align_t align, + size_t (*size)(unsigned long i), + mps_pool_class_t pool_class, mps_arg_s args[]) +{ + mps_res_t res; + mps_ap_t ap; + unsigned long i, k; + int *ps[TEST_SET_SIZE]; + size_t ss[TEST_SET_SIZE]; + + res = mps_pool_create_k(&pool, arena, pool_class, args); + if (res != MPS_RES_OK) + return res; + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + + /* allocate a load of objects */ + for(i=0; i 0) { + mps_free(pool, (mps_addr_t)ps[i], ss[i]); + ss[i] = 0; + } + } + /* allocate some new objects */ + for(i=x; i. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/nailboard.c b/mps/code/nailboard.c new file mode 100644 index 00000000000..35fc5176c7b --- /dev/null +++ b/mps/code/nailboard.c @@ -0,0 +1,487 @@ +/* nailboard.c: NAILBOARD IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .sources: . + */ + +#include "bt.h" +#include "check.h" +#include "mpm.h" +#include "nailboard.h" + +SRCID(nailboard, "$Id$"); + + +/* Log2 of scale factor between levels. . */ +#define LEVEL_SHIFT MPS_WORD_SHIFT + + +/* nailboardLevels -- return the number of levels in a nailboard with + * the given number of nails. + * + * + */ + +static Count nailboardLevels(Count nails) +{ + return (SizeFloorLog2((Size)nails) + LEVEL_SHIFT) / LEVEL_SHIFT; +} + + +/* nailboardNails -- return the total number of nails in the board */ + +static Count nailboardNails(Nailboard board) +{ + return RangeSize(&board->range) >> board->alignShift; +} + + +/* nailboardLevelBits -- return the number of bits in the bit table + * for the given level. + */ + +static Count nailboardLevelBits(Count nails, Index level) +{ + Shift shift = (Shift)(level * LEVEL_SHIFT); + return (nails + ((Count)1 << shift) - 1) >> shift; +} + +Bool NailboardCheck(Nailboard board) +{ + Index i; + Count nails; + CHECKS(Nailboard, board); + CHECKL(RangeCheck(&board->range)); + CHECKL(0 < board->levels); + nails = nailboardNails(board); + CHECKL(board->levels == nailboardLevels(nails)); + CHECKL(nails == nailboardLevelBits(nails, 0)); + CHECKL(nailboardLevelBits(nails, board->levels - 1) != 0); + CHECKL(nailboardLevelBits(nails, board->levels) == 1); + CHECKL(BoolCheck(board->newNails)); + for (i = 0; i < board->levels; ++i) { + CHECKL(board->level[i] != NULL); + } + return TRUE; +} + + +/* nailboardStructSize -- return the size of the nailboard structure, + * plus the array of pointers to levels. + */ + +static Size nailboardStructSize(Count levels) +{ + return offsetof(NailboardStruct, level) + sizeof(BT *) * levels; +} + + +/* nailboardSize -- return the total size of the nailboard + * + * This is the size of the nailboard structure plus the combined sizes + * of the bit tables. + */ + +static Size nailboardSize(Count nails, Count levels) +{ + Index i; + Size size; + size = nailboardStructSize(levels); + for (i = 0; i < levels; ++i) { + size += BTSize(nailboardLevelBits(nails, i)); + } + return size; +} + + +/* NailboardCreate -- allocate a nailboard + * + * Allocate a nailboard in the control pool for arena, to cover the + * range of addresses from base to limit (which must be non-empty). If + * successful, set *boardReturn to point to the nailboard and return + * ResOK. Otherwise, return a result code to indicate failure. + * + * alignment specifies the granularity of the nails: that is, the + * number of bytes covered by each nail. + */ + +Res NailboardCreate(Nailboard *boardReturn, Arena arena, Align alignment, + Addr base, Addr limit) +{ + void *p; + Nailboard board; + Shift alignShift; + Count nails, levels; + Index i; + Res res; + + AVER(boardReturn != NULL); + AVERT(Arena, arena); + AVERT(Align, alignment); + AVER(base < limit); + AVER(AddrIsAligned(base, alignment)); + AVER(AddrIsAligned(limit, alignment)); + + alignShift = SizeLog2((Size)alignment); + nails = AddrOffset(base, limit) >> alignShift; + levels = nailboardLevels(nails); + res = ControlAlloc(&p, arena, nailboardSize(nails, levels)); + if (res != ResOK) + return res; + + board = p; + RangeInit(&board->range, base, limit); + board->levels = levels; + board->alignShift = alignShift; + board->newNails = FALSE; + + p = PointerAdd(p, nailboardStructSize(levels)); + for (i = 0; i < levels; ++i) { + Count levelBits = nailboardLevelBits(nails, i); + AVER(levelBits > 0); + board->level[i] = p; + BTResRange(board->level[i], 0, levelBits); + p = PointerAdd(p, BTSize(levelBits)); + } + + board->sig = NailboardSig; + AVERT(Nailboard, board); + *boardReturn = board; + return ResOK; +} + + +/* NailboardDestroy -- destroy a nailboard */ + +void NailboardDestroy(Nailboard board, Arena arena) +{ + Count nails; + Size size; + + AVERT(Nailboard, board); + AVERT(Arena, arena); + + nails = nailboardNails(board); + size = nailboardSize(nails, board->levels); + + board->sig = SigInvalid; + ControlFree(arena, board, size); +} + + +/* NailboardClearNewNails -- clear the "new nails" flag */ + +void (NailboardClearNewNails)(Nailboard board) +{ + AVERT(Nailboard, board); + NailboardClearNewNails(board); +} + + +/* NailboardNewNails -- return the "new nails" flag + * + * Return TRUE if any new nails have been set in the nailboard since + * the last call to NailboardClearNewNails (or since the nailboard was + * created, if there have never been any such calls), FALSE otherwise. + */ + +Bool (NailboardNewNails)(Nailboard board) +{ + AVERT(Nailboard, board); + return NailboardNewNails(board); +} + + +/* nailboardIndex -- return the index of the nail corresponding to + * addr in the given level. + */ + +static Index nailboardIndex(Nailboard board, Index level, Addr addr) +{ + Index i = AddrOffset(RangeBase(&board->range), addr) + >> (board->alignShift + level * LEVEL_SHIFT); + AVER_CRITICAL(i < nailboardLevelBits(nailboardNails(board), level)); + return i; +} + + +/* nailboardAddr -- return the address corresponding to the index in + * the given level. + */ + +static Addr nailboardAddr(Nailboard board, Index level, Index index) +{ + return AddrAdd(RangeBase(&board->range), + index << (board->alignShift + level * LEVEL_SHIFT)); +} + + +/* nailboardIndexRange -- update *ibaseReturn and *ilimitReturn to be + * the interval of indexes of nails in the given level, corresponding + * to the interval of addresses base and limit. See + * . + */ + +static void nailboardIndexRange(Index *ibaseReturn, Index *ilimitReturn, + Nailboard board, Index level, + Addr base, Addr limit) +{ + *ibaseReturn = nailboardIndex(board, level, base); + *ilimitReturn = nailboardIndex(board, level, AddrSub(limit, 1)) + 1; +} + + +/* NailboardGet -- return nail corresponding to address + * + * Return the nail in the nailboard corresponding to the address addr. + * It is an error if addr does not lie in the range of addresses + * covered by the nailboard. + */ + +Bool NailboardGet(Nailboard board, Addr addr) +{ + AVERT(Nailboard, board); + AVER(RangeContains(&board->range, addr)); + return BTGet(board->level[0], nailboardIndex(board, 0, addr)); +} + + +/* NailboardSet -- set nail corresponding to address + * + * Set the nail in the nailboard corresponding to the address addr. + * Return the old nail at that position. It is an error if addr does + * not lie in the range of addresses covered by the nailboard. + * + * This function is on the critical path because it is called for + * every fix of an ambiguous reference to an address in an AMC pool. + */ + +Bool NailboardSet(Nailboard board, Addr addr) +{ + Index i, j; + + AVERT_CRITICAL(Nailboard, board); + AVER_CRITICAL(RangeContains(&board->range, addr)); + + j = nailboardIndex(board, 0, addr); + if (BTGet(board->level[0], j)) + return TRUE; + board->newNails = TRUE; + BTSet(board->level[0], j); + + for (i = 1; i < board->levels; ++i) { + j = nailboardIndex(board, i, addr); + if (BTGet(board->level[i], j)) + break; + BTSet(board->level[i], j); + } + return FALSE; +} + + +/* NailboardSetRange -- set all nails in range + * + * Set all nails in the nailboard corresponding to the range between + * base and limit. It is an error if any part of the range is not + * covered by the nailboard, or if any nail in the range is set. + */ + +void NailboardSetRange(Nailboard board, Addr base, Addr limit) +{ + Index i, ibase, ilimit; + nailboardIndexRange(&ibase, &ilimit, board, 0, base, limit); + AVER(BTIsResRange(board->level[0], ibase, ilimit)); + BTSetRange(board->level[0], ibase, ilimit); + for (i = 1; i < board->levels; ++i) { + nailboardIndexRange(&ibase, &ilimit, board, i, base, limit); + BTSetRange(board->level[i], ibase, ilimit); + } +} + + +/* NailboardIsSetRange -- test if all nails are set in a range + * + * Return TRUE if all nails are set in the range between base and + * limit, or FALSE if any nail is unset. It is an error if any part of + * the range is not covered by the nailboard. + * + * This function is not expected to be efficient because it is only + * used in an AVER in AMCWhiten to check that the unused part of the + * buffer for a nailboarded segment has in fact been nailed. + */ + +Bool NailboardIsSetRange(Nailboard board, Addr base, Addr limit) +{ + Index ibase, ilimit; + AVERT(Nailboard, board); + nailboardIndexRange(&ibase, &ilimit, board, 0, base, limit); + return BTIsSetRange(board->level[0], ibase, ilimit); +} + + +/* NailboardIsResRange -- test if all nails are reset in a range + * + * Return TRUE if no nails are set in the range between base and + * limit, or FALSE if any nail is set. It is an error if any part of + * the range is not covered by the nailboard. + * + * This function is on the critical path as it is called for every + * object in every nailed segment. It must take time that is no more + * than logarithmic in the size of the range. + * + * . + */ + +Bool NailboardIsResRange(Nailboard board, Addr base, Addr limit) +{ + Index i, ibase, ilimit; + Index j, jbase, jlimit; + Addr leftLimit, rightBase; + + AVERT_CRITICAL(Nailboard, board); + + /* Descend levels until ibase and ilimit are two or more bits apart: + * that is, until there is an "inner" part to the range. */ + i = board->levels; + do { + -- i; + nailboardIndexRange(&ibase, &ilimit, board, i, base, limit); + if (BTIsResRange(board->level[i], ibase, ilimit)) + /* The entire range was clear. This is expected to be the common + * case. */ + return TRUE; + if (i == 0) + /* At level 0 there is only one nail per bit so the set bit is known + * to be within the range. */ + return FALSE; + } while (ibase + 1 >= ilimit - 1); + + /* At this point we know there is an "inner" part. Are there any + * bits set in it? */ + if (!BTIsResRange(board->level[i], ibase + 1, ilimit - 1)) + return FALSE; + + /* At this point we know that in level i, there is is a bit set at + * ibase or at ilimit - 1 (or both), and everything between them is + * reset. */ + AVER_CRITICAL(BTGet(board->level[i], ibase) + || BTGet(board->level[i], ilimit - 1)); + + /* Left splinter */ + j = i; + jbase = ibase; + for (;;) { + leftLimit = nailboardAddr(board, j, jbase + 1); + AVER_CRITICAL(base < leftLimit); + AVER_CRITICAL(leftLimit < limit); + -- j; + nailboardIndexRange(&jbase, &jlimit, board, j, base, leftLimit); + if (jbase + 1 < jlimit && !BTIsResRange(board->level[j], jbase + 1, jlimit)) + return FALSE; /* */ + if (!BTGet(board->level[j], jbase)) + break; + if (j == 0) + return FALSE; + } + + /* Right splinter */ + j = i; + jlimit = ilimit; + for (;;) { + rightBase = nailboardAddr(board, j, jlimit - 1); + AVER_CRITICAL(base < rightBase); + AVER_CRITICAL(rightBase < limit); + -- j; + nailboardIndexRange(&jbase, &jlimit, board, j, rightBase, limit); + if (jbase < jlimit - 1 && !BTIsResRange(board->level[j], jbase, jlimit - 1)) + return FALSE; /* */ + if (!BTGet(board->level[j], jlimit - 1)) + break; + if (j == 0) + return FALSE; + } + + return TRUE; +} + + +Res NailboardDescribe(Nailboard board, mps_lib_FILE *stream, Count depth) +{ + Index i, j; + Res res; + + if (!TESTT(Nailboard, board)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "Nailboard $P {\n", (WriteFP)board, + " levels: $U\n", (WriteFU)board->levels, + " newNails: $S\n", WriteFYesNo(board->newNails), + " alignShift: $U\n", (WriteFU)board->alignShift, + NULL); + if (res != ResOK) + return res; + + res = RangeDescribe(&board->range, stream, depth + 2); + if (res != ResOK) + return res; + + for(i = 0; i < board->levels; ++i) { + Count levelNails = nailboardLevelBits(nailboardNails(board), i); + Count resetNails = BTCountResRange(board->level[i], 0, levelNails); + res = WriteF(stream, depth + 2, "Level $U ($U bits, $U set): ", + (WriteFU)i, (WriteFU)levelNails, + (WriteFU)(levelNails - resetNails), + NULL); + if (res != ResOK) + return res; + for (j = 0; j < levelNails; ++j) { + char c = BTGet(board->level[i], j) ? '*' : '.'; + res = WriteF(stream, 0, "$C", (WriteFC)c, NULL); + if (res != ResOK) + return res; + } + res = WriteF(stream, 0, "\n", NULL); + if (res != ResOK) + return res; + } + res = WriteF(stream, depth, "} Nailboard $P\n", (WriteFP)board, NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/nailboard.h b/mps/code/nailboard.h new file mode 100644 index 00000000000..6f3b9587171 --- /dev/null +++ b/mps/code/nailboard.h @@ -0,0 +1,80 @@ +/* nailboard.h: NAILBOARD INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef nailboard_h +#define nailboard_h + +#include "mpmtypes.h" +#include "range.h" + +typedef struct NailboardStruct *Nailboard; + +/* NOTE: we could reduce the size of this structure using bitfields. + * levels can be at most MPS_WORD_WIDTH / LEVEL_SHIFT + 1, which is 11 + * on 64-bit, so it would fit in 4 bits. (Or it could be recalculated + * from range each time it's needed.) alignShift is at most + * MPS_WORD_SHIFT so would fit in 3 bits. (Or it could be supplied in + * each function call by the owner.) newNails would fit in 1 bit. + */ +typedef struct NailboardStruct { + Sig sig; /* design.mps.sig.field */ + RangeStruct range; /* range of addresses covered by nailboard */ + Count levels; /* number of levels */ + Shift alignShift; /* shift due to address alignment */ + Bool newNails; /* set to TRUE if a new nail is set */ + BT level[1]; /* bit tables for each level */ +} NailboardStruct; + +#define NailboardSig ((Sig)0x5194A17B) /* SIGnature NAILBoard */ + +#define NailboardClearNewNails(board) ((board)->newNails = FALSE) +#define NailboardNewNails(board) RVALUE((board)->newNails) + +extern Bool NailboardCheck(Nailboard board); +extern Res NailboardCreate(Nailboard *boardReturn, Arena arena, Align alignment, Addr base, Addr limit); +extern void NailboardDestroy(Nailboard board, Arena arena); +extern void (NailboardClearNewNails)(Nailboard board); +extern Bool (NailboardNewNails)(Nailboard board); +extern Bool NailboardGet(Nailboard board, Addr addr); +extern Bool NailboardSet(Nailboard board, Addr addr); +extern void NailboardSetRange(Nailboard board, Addr base, Addr limit); +extern Bool NailboardIsSetRange(Nailboard board, Addr base, Addr limit); +extern Bool NailboardIsResRange(Nailboard board, Addr base, Addr limit); +extern Res NailboardDescribe(Nailboard board, mps_lib_FILE *stream, Count depth); + +#endif /* nailboard.h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/nailboardtest.c b/mps/code/nailboardtest.c new file mode 100644 index 00000000000..0a82a425374 --- /dev/null +++ b/mps/code/nailboardtest.c @@ -0,0 +1,102 @@ +/* nailboardtest.c: NAILBOARD TEST + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + */ + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "testlib.h" +#include "bt.h" +#include "nailboard.h" + +#include /* printf */ + + +static void test(mps_arena_t arena) +{ + BT bt; + Nailboard board; + Align align; + Count nails; + Addr base, limit; + Index i, j, k; + + align = (Align)1 << (rnd() % 10); + nails = (Count)1 << (rnd() % 16); + nails += rnd() % nails; + base = AddrAlignUp(0, align); + limit = AddrAdd(base, nails * align); + + die(BTCreate(&bt, arena, nails), "BTCreate"); + BTResRange(bt, 0, nails); + die(NailboardCreate(&board, arena, align, base, limit), "NailboardCreate"); + + for (i = 0; i <= nails / 8; ++i) { + Bool old; + j = rnd() % nails; + old = BTGet(bt, j); + BTSet(bt, j); + cdie(NailboardSet(board, AddrAdd(base, j * align)) == old, "NailboardSet"); + for (k = 0; k < nails / 8; ++k) { + Index b, l; + b = rnd() % nails; + l = b + rnd() % (nails - b) + 1; + cdie(BTIsResRange(bt, b, l) + == NailboardIsResRange(board, AddrAdd(base, b * align), + AddrAdd(base, l * align)), + "NailboardIsResRange"); + } + } + + die(NailboardDescribe(board, mps_lib_get_stdout(), 0), "NailboardDescribe"); +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), 1024 * 1024), + "mps_arena_create"); + + test(arena); + + mps_arena_destroy(arena); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + diff --git a/mps/code/pc.nmk b/mps/code/pc.nmk new file mode 100644 index 00000000000..bc01bedf379 --- /dev/null +++ b/mps/code/pc.nmk @@ -0,0 +1,48 @@ +# -*- makefile -*- +# +# pc.nmk: NMAKE FRAGMENT FOR PELLES C +# +# $Id$ +# Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. +# +# This file is included by platform nmake files that use the Pelles C +# compiler. It defines the compiler-specific variables that the common +# nmake file fragment () requires. + +CC = pocc +LIBMAN = polib +LINKER = polink + +CFLAGSCOMMONPRE = $(CFLAGSCOMMONPRE) /Ze /W2 +CRTFLAGSHOT = /MT +CRTFLAGSCOOL = /MT + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2014-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/policy.c b/mps/code/policy.c new file mode 100644 index 00000000000..54b9867e620 --- /dev/null +++ b/mps/code/policy.c @@ -0,0 +1,442 @@ +/* policy.c: POLICY DECISIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This module collects the decision-making code for the MPS, so that + * policy can be maintained and adjusted. + * + * .sources: . + */ + +#include "locus.h" +#include "mpm.h" + +SRCID(policy, "$Id$"); + + +/* PolicyAlloc -- allocation policy + * + * This is the code responsible for making decisions about where to allocate + * memory. + * + * pref describes the address space preferences for the allocation. + * size is the amount of memory requested to be allocated, in bytes. + * pool is the pool that is requesting the memory. + * + * If successful, update *tractReturn to point to the initial tract of + * the allocated memory and return ResOK. Otherwise return a result + * code describing the problem. + */ + +Res PolicyAlloc(Tract *tractReturn, Arena arena, LocusPref pref, + Size size, Pool pool) +{ + Res res; + Tract tract; + ZoneSet zones, moreZones, evenMoreZones; + + AVER(tractReturn != NULL); + AVERT(Arena, arena); + AVERT(LocusPref, pref); + AVER(size > (Size)0); + AVER(SizeIsArenaGrains(size, arena)); + AVERT(Pool, pool); + AVER(arena == PoolArena(pool)); + + /* Don't attempt to allocate if doing so would definitely exceed the + * commit limit. */ + if (arena->spareCommitted < size) { + Size necessaryCommitIncrease = size - arena->spareCommitted; + if (arena->committed + necessaryCommitIncrease > arena->commitLimit + || arena->committed + necessaryCommitIncrease < arena->committed) { + return ResCOMMIT_LIMIT; + } + } + + /* Plan A: allocate from the free land in the requested zones */ + zones = ZoneSetDiff(pref->zones, pref->avoid); + if (zones != ZoneSetEMPTY) { + res = ArenaFreeLandAlloc(&tract, arena, zones, pref->high, size, pool); + if (res == ResOK) + goto found; + } + + /* Plan B: add free zones that aren't blacklisted */ + /* TODO: Pools without ambiguous roots might not care about the blacklist. */ + /* TODO: zones are precious and (currently) never deallocated, so we + * should consider extending the arena first if address space is plentiful. + * See also job003384. */ + moreZones = ZoneSetUnion(pref->zones, ZoneSetDiff(arena->freeZones, pref->avoid)); + if (moreZones != zones) { + res = ArenaFreeLandAlloc(&tract, arena, moreZones, pref->high, size, pool); + if (res == ResOK) + goto found; + } + + /* Plan C: Extend the arena, then try A and B again. */ + if (moreZones != ZoneSetEMPTY) { + res = Method(Arena, arena, grow)(arena, pref, size); + /* If we can't extend because we hit the commit limit, try purging + some spare committed memory and try again.*/ + /* TODO: This would be a good time to *remap* VM instead of + returning it to the OS. */ + if (res == ResCOMMIT_LIMIT) { + if (Method(Arena, arena, purgeSpare)(arena, size) >= size) + res = Method(Arena, arena, grow)(arena, pref, size); + } + if (res == ResOK) { + if (zones != ZoneSetEMPTY) { + res = ArenaFreeLandAlloc(&tract, arena, zones, pref->high, size, pool); + if (res == ResOK) + goto found; + } + if (moreZones != zones) { + res = ArenaFreeLandAlloc(&tract, arena, moreZones, pref->high, + size, pool); + if (res == ResOK) + goto found; + } + } + /* TODO: Log an event here, since something went wrong, before + trying the next plan anyway. */ + } + + /* Plan D: add every zone that isn't blacklisted. This might mix GC'd + * objects with those from other generations, causing the zone check + * to give false positives and slowing down the collector. */ + /* TODO: log an event for this */ + evenMoreZones = ZoneSetDiff(ZoneSetUNIV, pref->avoid); + if (evenMoreZones != moreZones) { + res = ArenaFreeLandAlloc(&tract, arena, evenMoreZones, pref->high, + size, pool); + if (res == ResOK) + goto found; + } + + /* Last resort: try anywhere. This might put GC'd objects in zones where + * common ambiguous bit patterns pin them down, causing the zone check + * to give even more false positives permanently, and possibly retaining + * garbage indefinitely. */ + res = ArenaFreeLandAlloc(&tract, arena, ZoneSetUNIV, pref->high, size, pool); + if (res == ResOK) + goto found; + + /* Uh oh. */ + return res; + +found: + *tractReturn = tract; + return ResOK; +} + + +/* policyCollectionTime -- estimate time to collect the world, in seconds */ + +static double policyCollectionTime(Arena arena) +{ + Size collectableSize; + double collectionRate; + double collectionTime; + + AVERT(Arena, arena); + + collectableSize = ArenaCollectable(arena); + /* The condition arena->tracedTime >= 1.0 ensures that the division + * can't overflow. */ + if (arena->tracedTime >= 1.0) + collectionRate = arena->tracedWork / arena->tracedTime; + else + collectionRate = ARENA_DEFAULT_COLLECTION_RATE; + collectionTime = (double)collectableSize / collectionRate; + collectionTime += ARENA_DEFAULT_COLLECTION_OVERHEAD; + + return collectionTime; +} + + +/* PolicyShouldCollectWorld -- should we collect the world now? + * + * Return TRUE if we should try collecting the world now, FALSE if + * not. + * + * This is the policy behind mps_arena_step, and so the client + * must have provided us with enough time to collect the world, and + * enough time must have passed since the last time we did that + * opportunistically. + */ + +Bool PolicyShouldCollectWorld(Arena arena, double availableTime, + Clock now, Clock clocks_per_sec) +{ + Size collectableSize; + double collectionTime, sinceLastWorldCollect; + + AVERT(Arena, arena); + /* Can't collect the world if we're already collecting. */ + AVER(arena->busyTraces == TraceSetEMPTY); + + if (availableTime <= 0.0) + /* Can't collect the world if we're not given any time. */ + return FALSE; + + /* Don't collect the world if it's very small. */ + collectableSize = ArenaCollectable(arena); + if (collectableSize < ARENA_MINIMUM_COLLECTABLE_SIZE) + return FALSE; + + /* How long would it take to collect the world? */ + collectionTime = policyCollectionTime(arena); + + /* How long since we last collected the world? */ + sinceLastWorldCollect = (double)(now - arena->lastWorldCollect) / + (double)clocks_per_sec; + + /* Offered enough time, and long enough since we last did it? */ + return availableTime > collectionTime + && sinceLastWorldCollect > collectionTime / ARENA_MAX_COLLECT_FRACTION; +} + + +/* policyCondemnChain -- condemn appropriate parts of this chain + * + * If successful, set *mortalityReturn to an estimate of the mortality + * of the condemned parts of this chain and return ResOK. + * + * This is only called if ChainDeferral returned a value sufficiently + * low that we decided to start the collection. (Usually such values + * are less than zero; see .) + */ + +static Res policyCondemnChain(double *mortalityReturn, Chain chain, Trace trace) +{ + size_t topCondemnedGen, i; + GenDesc gen; + + AVER(mortalityReturn != NULL); + AVERT(Chain, chain); + AVERT(Trace, trace); + AVER(chain->arena == trace->arena); + + /* Find the highest generation that's over capacity. We will condemn + * this and all lower generations in the chain. */ + topCondemnedGen = chain->genCount; + for (;;) { + /* It's an error to call this function unless some generation is + * over capacity as reported by ChainDeferral. */ + AVER(topCondemnedGen > 0); + if (topCondemnedGen == 0) + return ResFAIL; + -- topCondemnedGen; + gen = &chain->gens[topCondemnedGen]; + AVERT(GenDesc, gen); + if (GenDescNewSize(gen) >= gen->capacity) + break; + } + + /* At this point, we've decided to condemn topCondemnedGen and all + * lower generations. */ + TraceCondemnStart(trace); + for (i = 0; i <= topCondemnedGen; ++i) { + gen = &chain->gens[i]; + AVERT(GenDesc, gen); + GenDescStartTrace(gen, trace); + } + EVENT5(ChainCondemnAuto, chain->arena, chain, trace, topCondemnedGen, + chain->genCount); + return TraceCondemnEnd(mortalityReturn, trace); +} + + +/* PolicyStartTrace -- consider starting a trace + * + * If collectWorldAllowed is TRUE, consider starting a collection of + * the world. Otherwise, consider only starting collections of individual + * chains or generations. + * + * If a collection of the world was started, set *collectWorldReturn + * to TRUE. Otherwise leave it unchanged. + * + * If a trace was started, update *traceReturn and return TRUE. + * Otherwise, leave *traceReturn unchanged and return FALSE. + */ + +Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn, + Arena arena, Bool collectWorldAllowed) +{ + Res res; + Trace trace; + double TraceWorkFactor = 0.25; + /* Fix the mortality of the world to avoid runaway feedback between the + dynamic criterion and the mortality of the arena's top generation, + leading to all traces collecting the world. This is a (hopefully) + temporary hack, pending an improved scheduling algorithm. */ + double TraceWorldMortality = 0.5; + + AVER(traceReturn != NULL); + AVERT(Arena, arena); + + if (collectWorldAllowed) { + Size sFoundation, sCondemned, sSurvivors, sConsTrace; + double tTracePerScan; /* tTrace/cScan */ + double dynamicDeferral; + + /* Compute dynamic criterion. See strategy.lisp-machine. */ + sFoundation = (Size)0; /* condemning everything, only roots @@@@ */ + /* @@@@ sCondemned should be scannable only */ + sCondemned = ArenaCommitted(arena) - ArenaSpareCommitted(arena); + sSurvivors = (Size)((double)sCondemned * (1 - TraceWorldMortality)); + tTracePerScan = (double)sFoundation + + ((double)sSurvivors * (1 + TraceCopyScanRATIO)); + AVER(TraceWorkFactor >= 0); + AVER((double)sSurvivors + tTracePerScan * TraceWorkFactor + <= (double)SizeMAX); + sConsTrace = (Size)((double)sSurvivors + tTracePerScan * TraceWorkFactor); + dynamicDeferral = (double)ArenaAvail(arena) - (double)sConsTrace; + + if (dynamicDeferral < 0.0) { + /* Start full collection. */ + res = TraceStartCollectAll(&trace, arena, TraceStartWhyDYNAMICCRITERION); + if (res != ResOK) + goto failStart; + *collectWorldReturn = TRUE; + *traceReturn = trace; + return TRUE; + } + } + { + /* Find the chain most over its capacity. */ + Ring node, nextNode; + double firstTime = 0.0; + Chain firstChain = NULL; + + RING_FOR(node, &arena->chainRing, nextNode) { + Chain chain = RING_ELT(Chain, chainRing, node); + double time; + + AVERT(Chain, chain); + time = ChainDeferral(chain); + if (time < firstTime) { + firstTime = time; firstChain = chain; + } + } + + /* If one was found, start collection on that chain. */ + if(firstTime < 0) { + double mortality; + + res = TraceCreate(&trace, arena, TraceStartWhyCHAIN_GEN0CAP); + AVER(res == ResOK); + res = policyCondemnChain(&mortality, firstChain, trace); + if (res != ResOK) /* should try some other trace, really @@@@ */ + goto failCondemn; + if (TraceIsEmpty(trace)) + goto nothingCondemned; + res = TraceStart(trace, mortality, + (double)trace->condemned * TraceWorkFactor); + /* We don't expect normal GC traces to fail to start. */ + AVER(res == ResOK); + *traceReturn = trace; + return TRUE; + } + } /* (dynamicDeferral > 0.0) */ + return FALSE; + +nothingCondemned: +failCondemn: + TraceDestroyInit(trace); +failStart: + return FALSE; +} + + +/* PolicyPoll -- do some tracing work? + * + * Return TRUE if the MPS should do some tracing work; FALSE if it + * should return to the mutator. + */ + +Bool PolicyPoll(Arena arena) +{ + Globals globals; + AVERT(Arena, arena); + globals = ArenaGlobals(arena); + return globals->pollThreshold <= globals->fillMutatorSize; +} + + +/* PolicyPollAgain -- do another unit of work? + * + * Return TRUE if the MPS should do another unit of work; FALSE if it + * should return to the mutator. + * + * start is the clock time when the MPS was entered. + * moreWork and tracedWork are the results of the last call to TracePoll. + */ + +Bool PolicyPollAgain(Arena arena, Clock start, Bool moreWork, Work tracedWork) +{ + Bool moreTime; + Globals globals; + double nextPollThreshold; + + AVERT(Arena, arena); + UNUSED(tracedWork); + + if (ArenaEmergency(arena)) + return TRUE; + + /* Is there more work to do and more time to do it in? */ + moreTime = (double)(ClockNow() - start) + < ArenaPauseTime(arena) * (double)ClocksPerSec(); + if (moreWork && moreTime) + return TRUE; + + /* We're not going to do more work now, so calculate when to come back. */ + + globals = ArenaGlobals(arena); + + if (moreWork) { + /* We did one quantum of work; consume one unit of 'time'. */ + nextPollThreshold = globals->pollThreshold + ArenaPollALLOCTIME; + } else { + /* No more work to do. Sleep until NOW + a bit. */ + nextPollThreshold = globals->fillMutatorSize + ArenaPollALLOCTIME; + } + + /* Advance pollThreshold; check: enough precision? */ + AVER(nextPollThreshold > globals->pollThreshold); + globals->pollThreshold = nextPollThreshold; + + return FALSE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/pool.c b/mps/code/pool.c new file mode 100644 index 00000000000..6e7d71dcd05 --- /dev/null +++ b/mps/code/pool.c @@ -0,0 +1,468 @@ +/* pool.c: POOL IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + * + * DESIGN + * + * .design: . + * + * PURPOSE + * + * .purpose: This is the implementation of the generic pool interface. + * There are three sorts of functions provided: + * .purpose.support: Support functions for manipulating and accessing + * Pool and PoolClass objects (create, destroy, check, various + * accessors, and other miscellaneous functions). + * .purpose.dispatch: Dispatch functions that implement the generic + * function dispatch mechanism for Pool Classes (PoolAlloc, PoolFree, + * etc.). + * + * SOURCES + * + * .source: See .design also. PoolStruct and PoolClassStruct, the + * central types for this module, are defined in , the + * corresponding abstract types in . Declarations and + * prototypes are in . Several functions have macro versions + * defined in . + */ + +#include "mpm.h" + +SRCID(pool, "$Id$"); + + +/* PoolClassCheck -- check a pool class */ + +Bool PoolClassCheck(PoolClass klass) +{ + CHECKD(InstClass, &klass->instClassStruct); + CHECKL(klass->size >= sizeof(PoolStruct)); + CHECKL(AttrCheck(klass->attr)); + CHECKL(!(klass->attr & AttrMOVINGGC) || (klass->attr & AttrGC)); + CHECKL(FUNCHECK(klass->varargs)); + CHECKL(FUNCHECK(klass->init)); + CHECKL(FUNCHECK(klass->alloc)); + CHECKL(FUNCHECK(klass->free)); + CHECKL(FUNCHECK(klass->segPoolGen)); + CHECKL(FUNCHECK(klass->bufferFill)); + CHECKL(FUNCHECK(klass->bufferEmpty)); + CHECKL(FUNCHECK(klass->rampBegin)); + CHECKL(FUNCHECK(klass->rampEnd)); + CHECKL(FUNCHECK(klass->framePush)); + CHECKL(FUNCHECK(klass->framePop)); + CHECKL(FUNCHECK(klass->freewalk)); + CHECKL(FUNCHECK(klass->bufferClass)); + CHECKL(FUNCHECK(klass->debugMixin)); + CHECKL(FUNCHECK(klass->totalSize)); + CHECKL(FUNCHECK(klass->freeSize)); + CHECKL(FUNCHECK(klass->addrObject)); + + /* Check that pool classes override sets of related methods. */ + CHECKL((klass->init == PoolAbsInit) == + (klass->instClassStruct.finish == PoolAbsFinish)); + CHECKL((klass->bufferFill == PoolNoBufferFill) == + (klass->bufferEmpty == PoolNoBufferEmpty)); + CHECKL((klass->framePush == PoolNoFramePush) == + (klass->framePop == PoolNoFramePop)); + CHECKL((klass->rampBegin == PoolNoRampBegin) == + (klass->rampEnd == PoolNoRampEnd)); + + CHECKS(PoolClass, klass); + return TRUE; +} + + +/* PoolCheck -- check the generic part of a pool */ + +Bool PoolCheck(Pool pool) +{ + PoolClass klass; + /* Checks ordered as per struct decl in */ + CHECKS(Pool, pool); + CHECKC(AbstractPool, pool); + /* Break modularity for checking efficiency */ + CHECKL(pool->serial < ArenaGlobals(pool->arena)->poolSerial); + klass = ClassOfPoly(Pool, pool); + CHECKD(PoolClass, klass); + CHECKU(Arena, pool->arena); + CHECKD_NOSIG(Ring, &pool->arenaRing); + CHECKD_NOSIG(Ring, &pool->bufferRing); + /* Cannot check pool->bufferSerial */ + CHECKD_NOSIG(Ring, &pool->segRing); + CHECKL(AlignCheck(pool->alignment)); + CHECKL(ShiftCheck(pool->alignShift)); + CHECKL(pool->alignment == PoolGrainsSize(pool, (Align)1)); + if (pool->format != NULL) + CHECKD(Format, pool->format); + return TRUE; +} + + +/* Common keywords to PoolInit */ + +ARG_DEFINE_KEY(FORMAT, Format); +ARG_DEFINE_KEY(CHAIN, Chain); +ARG_DEFINE_KEY(GEN, Cant); +ARG_DEFINE_KEY(RANK, Rank); +ARG_DEFINE_KEY(EXTEND_BY, Size); +ARG_DEFINE_KEY(LARGE_SIZE, Size); +ARG_DEFINE_KEY(MIN_SIZE, Size); +ARG_DEFINE_KEY(MEAN_SIZE, Size); +ARG_DEFINE_KEY(MAX_SIZE, Size); +ARG_DEFINE_KEY(ALIGN, Align); +ARG_DEFINE_KEY(SPARE, double); +ARG_DEFINE_KEY(INTERIOR, Bool); + + +/* PoolInit -- initialize a pool + * + * Initialize the generic fields of the pool and calls class-specific + * init. . + */ + +Res PoolInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Res res; + + AVERT(PoolClass, klass); + + res = klass->init(pool, arena, klass, args); + if (res != ResOK) + return res; + + EVENT4(PoolInit, pool, PoolArena(pool), ClassOfPoly(Pool, pool), + pool->serial); + + return ResOK; +} + + +/* PoolCreate: Allocate and initialise pool */ + +Res PoolCreate(Pool *poolReturn, Arena arena, + PoolClass klass, ArgList args) +{ + Res res; + Pool pool; + void *base; + + AVER(poolReturn != NULL); + AVERT(Arena, arena); + AVERT(PoolClass, klass); + + /* .space.alloc: Allocate the pool instance structure with the size */ + /* requested in the pool class. See .space.free */ + res = ControlAlloc(&base, arena, klass->size); + if (res != ResOK) + goto failControlAlloc; + pool = (Pool)base; + + /* Initialize the pool. */ + res = PoolInit(pool, arena, klass, args); + if (res != ResOK) + goto failPoolInit; + + *poolReturn = pool; + return ResOK; + +failPoolInit: + ControlFree(arena, base, klass->size); +failControlAlloc: + return res; +} + + +/* PoolFinish -- Finish pool including class-specific and generic fields. */ + +void PoolFinish(Pool pool) +{ + AVERT(Pool, pool); + Method(Inst, pool, finish)(MustBeA(Inst, pool)); +} + + +/* PoolDestroy -- Finish and free pool. */ + +void PoolDestroy(Pool pool) +{ + Arena arena; + Size size; + + AVERT(Pool, pool); + arena = pool->arena; + size = ClassOfPoly(Pool, pool)->size; + PoolFinish(pool); + + /* .space.free: Free the pool instance structure. See .space.alloc */ + ControlFree(arena, pool, size); +} + + +/* PoolDefaultBufferClass -- return the buffer class used by the pool */ + +BufferClass PoolDefaultBufferClass(Pool pool) +{ + AVERT(Pool, pool); + return Method(Pool, pool, bufferClass)(); +} + + +/* PoolAlloc -- allocate a block of memory from a pool + * + * .alloc.critical: In manual-allocation-bound programs this is on the + * critical path via mps_alloc. + */ + +Res PoolAlloc(Addr *pReturn, Pool pool, Size size) +{ + Res res; + + AVER_CRITICAL(pReturn != NULL); + AVERT_CRITICAL(Pool, pool); + AVER_CRITICAL(size > 0); + + res = Method(Pool, pool, alloc)(pReturn, pool, size); + if (res != ResOK) + return res; + /* Make sure that the allocated address was in the pool's memory. */ + AVER_CRITICAL(PoolHasAddr(pool, *pReturn)); + /* All allocations should be aligned to the pool's alignment */ + AVER_CRITICAL(AddrIsAligned(*pReturn, pool->alignment)); + + /* All PoolAllocs should advance the allocation clock, so we count */ + /* it all in the fillMutatorSize field. */ + ArenaGlobals(PoolArena(pool))->fillMutatorSize += (double)size; + + EVENT_CRITICAL3(PoolAlloc, pool, *pReturn, size); + + return ResOK; +} + + +/* PoolFree -- deallocate a block of memory allocated from the pool */ + +void (PoolFree)(Pool pool, Addr old, Size size) +{ + AVERT(Pool, pool); + AVER(old != NULL); + /* The pool methods should check that old is in pool. */ + AVER(size > 0); + AVER(AddrIsAligned(old, pool->alignment)); + AVER(PoolHasRange(pool, old, AddrAdd(old, size))); + + PoolFreeMacro(pool, old, size); + + EVENT3(PoolFree, pool, old, size); +} + + +/* PoolSegPoolGen -- get pool generation for a segment */ + +PoolGen PoolSegPoolGen(Pool pool, Seg seg) +{ + AVERT(Pool, pool); + AVERT(Seg, seg); + AVER(pool == SegPool(seg)); + return Method(Pool, pool, segPoolGen)(pool, seg); +} + + +/* PoolFreeWalk -- walk free blocks in this pool + * + * PoolFreeWalk is not required to find all free blocks. + */ + +void PoolFreeWalk(Pool pool, FreeBlockVisitor f, void *p) +{ + AVERT(Pool, pool); + AVER(FUNCHECK(f)); + /* p is arbitrary, hence can't be checked. */ + + Method(Pool, pool, freewalk)(pool, f, p); +} + + +/* PoolTotalSize -- return total memory allocated from arena */ + +Size PoolTotalSize(Pool pool) +{ + AVERT(Pool, pool); + + return Method(Pool, pool, totalSize)(pool); +} + + +/* PoolFreeSize -- return free memory (unused by client program) */ + +Size PoolFreeSize(Pool pool) +{ + AVERT(Pool, pool); + + return Method(Pool, pool, freeSize)(pool); +} + + +/* PoolAddrObject -- return base pointer from interior pointer + * + * Note: addr is not necessarily inside the pool, even though + * mps_addr_object dispatches via the tract table. This allows this + * function to be used more generally internally. The pool should + * check (it has to anyway). + */ + +Res PoolAddrObject(Addr *pReturn, Pool pool, Addr addr) +{ + AVER(pReturn != NULL); + AVERT(Pool, pool); + return Method(Pool, pool, addrObject)(pReturn, pool, addr); +} + +/* PoolDescribe -- describe a pool */ + +Res PoolDescribe(Pool pool, mps_lib_FILE *stream, Count depth) +{ + return Method(Inst, pool, describe)(MustBeA(Inst, pool), stream, depth); +} + + +/* PoolFormat -- get the format of a pool, if any + * + * Returns the format of the pool (the format of objects in the pool). + * If the pool is unformatted or doesn't declare a format then this + * function returns FALSE and does not update *formatReturn. Otherwise + * this function returns TRUE and *formatReturn is updated to be the + * pool's format. + */ + +Bool PoolFormat(Format *formatReturn, Pool pool) +{ + AVER(formatReturn != NULL); + AVERT(Pool, pool); + + if (pool->format) { + *formatReturn = pool->format; + return TRUE; + } + return FALSE; +} + + +/* PoolOfAddr -- return the pool containing the given address + * + * If the address points to a tract assigned to a pool, this returns TRUE + * and sets *poolReturn to that pool. Otherwise, it returns FALSE, and + * *poolReturn is unchanged. + */ + +Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr) +{ + Tract tract; + + AVER(poolReturn != NULL); + AVERT(Arena, arena); + + if (TractOfAddr(&tract, arena, addr)) { + *poolReturn = TractPool(tract); + return TRUE; + } + + return FALSE; +} + + +/* PoolOfRange -- return the pool containing a given range + * + * If all addresses in the range [base, limit) are owned by a single + * pool, update *poolReturn to that pool and return TRUE. Otherwise, + * leave *poolReturn unchanged and return FALSE. + */ +Bool PoolOfRange(Pool *poolReturn, Arena arena, Addr base, Addr limit) +{ + Bool havePool = FALSE; + Pool pool = NULL; + Tract tract; + Addr addr, alignedBase, alignedLimit; + + AVER(poolReturn != NULL); + AVERT(Arena, arena); + AVER(base < limit); + + alignedBase = AddrArenaGrainDown(base, arena); + alignedLimit = AddrArenaGrainUp(limit, arena); + + TRACT_FOR(tract, addr, arena, alignedBase, alignedLimit) { + Pool p = TractPool(tract); + if (havePool && pool != p) + return FALSE; + pool = p; + havePool = TRUE; + } + + if (havePool) { + *poolReturn = pool; + return TRUE; + } else { + return FALSE; + } +} + + +Bool PoolHasAddr(Pool pool, Addr addr) +{ + Pool addrPool; + Arena arena; + Bool managed; + + AVERT(Pool, pool); + + arena = PoolArena(pool); + managed = PoolOfAddr(&addrPool, arena, addr); + return (managed && addrPool == pool); +} + + +Bool PoolHasRange(Pool pool, Addr base, Addr limit) +{ + Pool rangePool; + Arena arena; + Bool managed; + + AVERT_CRITICAL(Pool, pool); + AVER_CRITICAL(base < limit); + + arena = PoolArena(pool); + managed = PoolOfRange(&rangePool, arena, base, limit); + return (managed && rangePool == pool); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolabs.c b/mps/code/poolabs.c new file mode 100644 index 00000000000..d6eae2dd046 --- /dev/null +++ b/mps/code/poolabs.c @@ -0,0 +1,517 @@ +/* poolabs.c: ABSTRACT POOL CLASSES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * PURPOSE + * + * .purpose: This defines the abstract pool classes, giving + * a single-inheritance framework which concrete classes + * may utilize. The purpose is to reduce the fragility of class + * definitions for pool implementations when small changes are + * made to the pool protocol. For now, the class hierarchy for + * the abstract classes is intended to be useful, but not to + * represent any particular design for pool inheritance. + * + * HIERARCHY + * + * .hierarchy: define the following hierarchy of abstract pool classes: + * AbstractPoolClass - implements init, finish, describe + * AbstractBufferPoolClass - implements the buffer protocol + * AbstractSegBufPoolClass - uses SegBuf buffer class + * AbstractCollectPoolClass - implements basic GC + */ + +#include "mpm.h" + +SRCID(poolabs, "$Id$"); + + +/* Mixins: + * + * For now (at least) we're avoiding multiple inheritance. + * However, there is a significant use of multiple inheritance + * in practice amongst the pool classes, as there are several + * orthogonal sub-protocols included in the pool protocol. + * The following mixin functions help to provide the inheritance + * via a simpler means than real multiple inheritance. + */ + + +/* PoolClassMixInBuffer -- mix in the protocol for buffer reserve / commit */ + +void PoolClassMixInBuffer(PoolClass klass) +{ + /* Can't check klass because it's not initialized yet */ + klass->bufferFill = PoolTrivBufferFill; + klass->bufferEmpty = PoolTrivBufferEmpty; + /* By default, buffered pools treat frame operations as NOOPs */ + klass->framePush = PoolTrivFramePush; + klass->framePop = PoolTrivFramePop; + klass->bufferClass = BufferClassGet; +} + + +/* PoolClassMixInCollect -- mix in the protocol for GC */ + +void PoolClassMixInCollect(PoolClass klass) +{ + /* Can't check klass because it's not initialized yet */ + klass->attr |= AttrGC; + klass->rampBegin = PoolTrivRampBegin; + klass->rampEnd = PoolTrivRampEnd; +} + + +/* Classes */ + + +/* PoolAbsInit -- initialize an abstract pool instance */ + +Res PoolAbsInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + ArgStruct arg; + + AVER(pool != NULL); + AVERT(Arena, arena); + UNUSED(args); + UNUSED(klass); /* used for debug pools only */ + + /* Superclass init */ + InstInit(CouldBeA(Inst, pool)); + + pool->arena = arena; + RingInit(&pool->arenaRing); + RingInit(&pool->bufferRing); + RingInit(&pool->segRing); + pool->bufferSerial = (Serial)0; + pool->alignment = MPS_PF_ALIGN; + pool->alignShift = SizeLog2(pool->alignment); + pool->format = NULL; + + if (ArgPick(&arg, args, MPS_KEY_FORMAT)) { + Format format = arg.val.format; + AVERT(Format, format); + AVER(FormatArena(format) == arena); + pool->format = format; + /* .init.format: Increment reference count on the format for + consistency checking. See .finish.format. */ + ++pool->format->poolCount; + } else { + pool->format = NULL; + } + + pool->serial = ArenaGlobals(arena)->poolSerial; + ++ArenaGlobals(arena)->poolSerial; + + /* Initialise signature last; see design.mps.sig.init */ + SetClassOfPoly(pool, CLASS(AbstractPool)); + pool->sig = PoolSig; + AVERT(Pool, pool); + + /* Add initialized pool to list of pools in arena. */ + RingAppend(ArenaPoolRing(arena), PoolArenaRing(pool)); + + return ResOK; +} + + +/* PoolAbsFinish -- finish an abstract pool instance */ + +void PoolAbsFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + + EVENT2(PoolFinish, pool, PoolArena(pool)); + + /* Detach the pool from the arena and format, and unsig it. */ + RingRemove(PoolArenaRing(pool)); + + /* .finish.format: Decrement the reference count on the format for + consistency checking. See .format.init. */ + if (pool->format) { + AVER(pool->format->poolCount > 0); + --pool->format->poolCount; + pool->format = NULL; + } + + pool->sig = SigInvalid; + InstFinish(CouldBeA(Inst, pool)); + + RingFinish(&pool->segRing); + RingFinish(&pool->bufferRing); + RingFinish(&pool->arenaRing); +} + +DEFINE_CLASS(Inst, PoolClass, klass) +{ + INHERIT_CLASS(klass, PoolClass, InstClass); + AVERT(InstClass, klass); +} + +DEFINE_CLASS(Pool, AbstractPool, klass) +{ + INHERIT_CLASS(&klass->instClassStruct, AbstractPool, Inst); + klass->instClassStruct.describe = PoolAbsDescribe; + klass->instClassStruct.finish = PoolAbsFinish; + klass->size = sizeof(PoolStruct); + klass->attr = 0; + klass->varargs = ArgTrivVarargs; + klass->init = PoolAbsInit; + klass->alloc = PoolNoAlloc; + klass->free = PoolNoFree; + klass->bufferFill = PoolNoBufferFill; + klass->bufferEmpty = PoolNoBufferEmpty; + klass->rampBegin = PoolNoRampBegin; + klass->rampEnd = PoolNoRampEnd; + klass->framePush = PoolNoFramePush; + klass->framePop = PoolNoFramePop; + klass->segPoolGen = PoolNoSegPoolGen; + klass->freewalk = PoolTrivFreeWalk; + klass->bufferClass = PoolNoBufferClass; + klass->debugMixin = PoolNoDebugMixin; + klass->totalSize = PoolNoSize; + klass->freeSize = PoolNoSize; + klass->addrObject = PoolTrivAddrObject; + klass->sig = PoolClassSig; + AVERT(PoolClass, klass); +} + +DEFINE_CLASS(Pool, AbstractBufferPool, klass) +{ + INHERIT_CLASS(klass, AbstractBufferPool, AbstractPool); + PoolClassMixInBuffer(klass); + AVERT(PoolClass, klass); +} + +DEFINE_CLASS(Pool, AbstractSegBufPool, klass) +{ + INHERIT_CLASS(klass, AbstractSegBufPool, AbstractBufferPool); + klass->bufferClass = SegBufClassGet; + klass->bufferEmpty = PoolSegBufferEmpty; + AVERT(PoolClass, klass); +} + +DEFINE_CLASS(Pool, AbstractCollectPool, klass) +{ + INHERIT_CLASS(klass, AbstractCollectPool, AbstractSegBufPool); + PoolClassMixInCollect(klass); + AVERT(PoolClass, klass); +} + + +/* PoolNo*, PoolTriv* -- Trivial and non-methods for Pool Classes + * + * and + */ + +Res PoolNoAlloc(Addr *pReturn, Pool pool, Size size) +{ + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + NOTREACHED; + return ResUNIMPL; +} + +Res PoolTrivAlloc(Addr *pReturn, Pool pool, Size size) +{ + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + return ResLIMIT; +} + +void PoolNoFree(Pool pool, Addr old, Size size) +{ + AVERT(Pool, pool); + AVER(old != NULL); + AVER(size > 0); + NOTREACHED; +} + +void PoolTrivFree(Pool pool, Addr old, Size size) +{ + AVERT(Pool, pool); + AVER(old != NULL); + AVER(size > 0); + NOOP; /* trivial free has no effect */ +} + +PoolGen PoolNoSegPoolGen(Pool pool, Seg seg) +{ + AVERT(Pool, pool); + AVERT(Seg, seg); + AVER(pool == SegPool(seg)); + NOTREACHED; + return NULL; +} + +Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(size > 0); + NOTREACHED; + return ResUNIMPL; +} + +Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + Res res; + Addr p; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(size > 0); + + res = PoolAlloc(&p, pool, size); + if (res != ResOK) + return res; + + *baseReturn = p; + *limitReturn = AddrAdd(p, size); + return ResOK; +} + + +void PoolNoBufferEmpty(Pool pool, Buffer buffer) +{ + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + NOTREACHED; +} + +void PoolTrivBufferEmpty(Pool pool, Buffer buffer) +{ + Addr init, limit; + + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(init <= limit); + if (limit > init) + PoolFree(pool, init, AddrOffset(init, limit)); +} + +void PoolSegBufferEmpty(Pool pool, Buffer buffer) +{ + Seg seg; + + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + seg = BufferSeg(buffer); + AVERT(Seg, seg); + + Method(Seg, seg, bufferEmpty)(seg, buffer); +} + + +Res PoolAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + Res res; + Ring node, nextNode; + + if (!TESTC(AbstractPool, pool)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = InstDescribe(CouldBeA(Inst, pool), stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "serial $U\n", (WriteFU)pool->serial, + "arena $P ($U)\n", + (WriteFP)pool->arena, (WriteFU)pool->arena->serial, + "alignment $W\n", (WriteFW)pool->alignment, + "alignShift $W\n", (WriteFW)pool->alignShift, + NULL); + if (res != ResOK) + return res; + + if (pool->format != NULL) { + res = FormatDescribe(pool->format, stream, depth + 2); + if (res != ResOK) + return res; + } + + RING_FOR(node, &pool->bufferRing, nextNode) { + Buffer buffer = RING_ELT(Buffer, poolRing, node); + res = BufferDescribe(buffer, stream, depth + 2); + if (res != ResOK) + return res; + } + + return ResOK; +} + + +Res PoolNoTraceBegin(Pool pool, Trace trace) +{ + AVERT(Pool, pool); + AVERT(Trace, trace); + AVER(PoolArena(pool) == trace->arena); + NOTREACHED; + return ResUNIMPL; +} + +Res PoolTrivTraceBegin(Pool pool, Trace trace) +{ + AVERT(Pool, pool); + AVERT(Trace, trace); + AVER(PoolArena(pool) == trace->arena); + return ResOK; +} + +void PoolNoRampBegin(Pool pool, Buffer buf, Bool collectAll) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); + AVERT(Bool, collectAll); + NOTREACHED; +} + + +void PoolNoRampEnd(Pool pool, Buffer buf) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); + NOTREACHED; +} + + +void PoolTrivRampBegin(Pool pool, Buffer buf, Bool collectAll) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); + AVERT(Bool, collectAll); +} + + +void PoolTrivRampEnd(Pool pool, Buffer buf) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); +} + + +Res PoolNoFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf) +{ + AVER(frameReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buf); + NOTREACHED; + return ResUNIMPL; +} + + +Res PoolNoFramePop(Pool pool, Buffer buf, AllocFrame frame) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); + /* frame is of an abstract type & can't be checked */ + UNUSED(frame); + NOTREACHED; + return ResUNIMPL; +} + + +Res PoolTrivFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf) +{ + AVER(frameReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buf); + return ResOK; +} + + +Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame) +{ + AVERT(Pool, pool); + AVERT(Buffer, buf); + /* frame is of an abstract type & can't be checked */ + UNUSED(frame); + return ResOK; +} + + +void PoolTrivFreeWalk(Pool pool, FreeBlockVisitor f, void *p) +{ + AVERT(Pool, pool); + AVER(FUNCHECK(f)); + /* p is arbitrary, hence can't be checked */ + UNUSED(p); + + /* FreeWalk doesn't have be perfect, so just pretend you didn't find any. */ + NOOP; +} + + +BufferClass PoolNoBufferClass(void) +{ + NOTREACHED; + return NULL; +} + + +Size PoolNoSize(Pool pool) +{ + AVERT(Pool, pool); + NOTREACHED; + return UNUSED_SIZE; +} + + +Res PoolTrivAddrObject(Addr *pReturn, Pool pool, Addr addr) +{ + AVERT(Pool, pool); + AVER(pReturn != NULL); + UNUSED(addr); + + return ResUNIMPL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c new file mode 100644 index 00000000000..b712ce52370 --- /dev/null +++ b/mps/code/poolamc.c @@ -0,0 +1,2236 @@ +/* poolamc.c: AUTOMATIC MOSTLY-COPYING MEMORY POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .sources: . + */ + +#include "mpscamc.h" +#include "locus.h" +#include "bt.h" +#include "mpm.h" +#include "nailboard.h" + +SRCID(poolamc, "$Id$"); + +typedef struct AMCStruct *AMC; +typedef struct amcGenStruct *amcGen; + +/* Function returning TRUE if block in nailboarded segment is pinned. */ +typedef Bool (*amcPinnedFunction)(AMC amc, Nailboard board, Addr base, Addr limit); + +/* forward declarations */ + +static void amcSegBufferEmpty(Seg seg, Buffer buffer); +static Res amcSegWhiten(Seg seg, Trace trace); +static Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss); +static void amcSegReclaim(Seg seg, Trace trace); +static Bool amcSegHasNailboard(Seg seg); +static Nailboard amcSegNailboard(Seg seg); +static Bool AMCCheck(AMC amc); +static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO); +static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO); +static void amcSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s); + +/* local class declarations */ + +typedef AMC AMCZPool; +#define AMCZPoolCheck AMCCheck +DECLARE_CLASS(Pool, AMCZPool, AbstractCollectPool); + +typedef AMC AMCPool; +DECLARE_CLASS(Pool, AMCPool, AMCZPool); + +DECLARE_CLASS(Buffer, amcBuf, SegBuf); +DECLARE_CLASS(Seg, amcSeg, MutatorSeg); + + +/* amcGenStruct -- pool AMC generation descriptor */ + +#define amcGenSig ((Sig)0x519A3C9E) /* SIGnature AMC GEn */ + +typedef struct amcGenStruct { + PoolGenStruct pgen; + RingStruct amcRing; /* link in list of gens in pool */ + Buffer forward; /* forwarding buffer */ + Sig sig; /* design.mps.sig.field.end.outer */ +} amcGenStruct; + +#define amcGenAMC(amcgen) MustBeA(AMCZPool, (amcgen)->pgen.pool) +#define amcGenPool(amcgen) ((amcgen)->pgen.pool) + +#define amcGenNr(amcgen) ((amcgen)->pgen.nr) + + +#define RAMP_RELATION(X) \ + X(RampOUTSIDE, "outside ramp") \ + X(RampBEGIN, "begin ramp") \ + X(RampRAMPING, "ramping") \ + X(RampFINISH, "finish ramp") \ + X(RampCOLLECTING, "collecting ramp") + +#define RAMP_ENUM(e, s) e, +enum { + RAMP_RELATION(RAMP_ENUM) + RampLIMIT +}; +#undef RAMP_ENUM + + +/* amcSegStruct -- AMC-specific fields appended to GCSegStruct + * + * .seg.accounted-as-buffered: The "accountedAsBuffered" flag is TRUE + * if the segment has an attached buffer and is accounted against the + * pool generation's bufferedSize. But note that if this is FALSE, the + * segment might still have an attached buffer -- this happens if the + * segment was condemned while the buffer was attached. + * + * .seg.old: The "old" flag is TRUE if the segment has been collected + * at least once, and so its size is accounted against the pool + * generation's oldSize. + * + * .seg.deferred: The "deferred" flag is TRUE if its size accounting + * in the pool generation has been deferred. This is set if the + * segment was created in ramping mode (and so we don't want it to + * contribute to the pool generation's newSize and so provoke a + * collection via TracePoll), and by hash array allocations (where we + * don't want the allocation to provoke a collection that makes the + * location dependency stale immediately). + */ + +typedef struct amcSegStruct *amcSeg; + +#define amcSegSig ((Sig)0x519A3C59) /* SIGnature AMC SeG */ + +typedef struct amcSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + amcGen gen; /* generation this segment belongs to */ + Nailboard board; /* nailboard for this segment or NULL if none */ + Size forwarded[TraceLIMIT]; /* size of objects forwarded for each trace */ + BOOLFIELD(accountedAsBuffered); /* .seg.accounted-as-buffered */ + BOOLFIELD(old); /* .seg.old */ + BOOLFIELD(deferred); /* .seg.deferred */ + Sig sig; /* design.mps.sig.field.end.outer */ +} amcSegStruct; + + +ATTRIBUTE_UNUSED +static Bool amcSegCheck(amcSeg amcseg) +{ + CHECKS(amcSeg, amcseg); + CHECKD(GCSeg, &amcseg->gcSegStruct); + CHECKU(amcGen, amcseg->gen); + if (amcseg->board) { + CHECKD(Nailboard, amcseg->board); + CHECKL(SegNailed(MustBeA(Seg, amcseg)) != TraceSetEMPTY); + } + /* CHECKL(BoolCheck(amcseg->accountedAsBuffered)); */ + /* CHECKL(BoolCheck(amcseg->old)); */ + /* CHECKL(BoolCheck(amcseg->deferred)); */ + return TRUE; +} + + +/* AMCSegInit -- initialise an AMC segment */ + +ARG_DEFINE_KEY(amc_seg_gen, Pointer); +#define amcKeySegGen (&_mps_key_amc_seg_gen) + +static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + amcGen amcgen; + amcSeg amcseg; + Res res; + ArgStruct arg; + + ArgRequire(&arg, args, amcKeySegGen); + amcgen = arg.val.p; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, amcSeg, init)(seg, pool, base, size, args); + if(res != ResOK) + return res; + amcseg = CouldBeA(amcSeg, seg); + + amcseg->gen = amcgen; + amcseg->board = NULL; + amcseg->accountedAsBuffered = FALSE; + amcseg->old = FALSE; + amcseg->deferred = FALSE; + + SetClassOfPoly(seg, CLASS(amcSeg)); + amcseg->sig = amcSegSig; + AVERC(amcSeg, amcseg); + + return ResOK; +} + + +/* amcSegFinish -- finish an AMC segment */ + +static void amcSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + amcSeg amcseg = MustBeA(amcSeg, seg); + + amcseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, amcSeg, finish)(inst); +} + + +/* AMCSegSketch -- summarise the segment state for a human reader + * + * Write a short human-readable text representation of the segment + * state into storage indicated by pbSketch+cbSketch. + * + * A typical sketch is "bGW_", meaning the seg has a nailboard, has + * some Grey and some White objects, and has no buffer attached. + */ + +static void AMCSegSketch(Seg seg, char *pbSketch, size_t cbSketch) +{ + Buffer buffer; + + AVER(pbSketch); + AVER(cbSketch >= 5); + + if(SegNailed(seg) == TraceSetEMPTY) { + pbSketch[0] = 'm'; /* mobile */ + } else if (amcSegHasNailboard(seg)) { + pbSketch[0] = 'b'; /* boarded */ + } else { + pbSketch[0] = 's'; /* stuck */ + } + + if(SegGrey(seg) == TraceSetEMPTY) { + pbSketch[1] = '_'; + } else { + pbSketch[1] = 'G'; /* Grey */ + } + + if(SegWhite(seg) == TraceSetEMPTY) { + pbSketch[2] = '_'; + } else { + pbSketch[2] = 'W'; /* White */ + } + + if (!SegBuffer(&buffer, seg)) { + pbSketch[3] = '_'; + } else { + Bool mut = BufferIsMutator(buffer); + Bool flipped = ((buffer->mode & BufferModeFLIPPED) != 0); + Bool trapped = BufferIsTrapped(buffer); + Bool limitzeroed = (buffer->ap_s.limit == 0); + + pbSketch[3] = 'X'; /* I don't know what's going on! */ + + if((flipped == trapped) && (trapped == limitzeroed)) { + if(mut) { + if(flipped) { + pbSketch[3] = 's'; /* stalo */ + } else { + pbSketch[3] = 'n'; /* neo */ + } + } else { + if(!flipped) { + pbSketch[3] = 'f'; /* forwarding */ + } + } + } else { + /* I don't know what's going on! */ + } + } + + pbSketch[4] = '\0'; + AVER(4 < cbSketch); +} + + +/* AMCSegDescribe -- describe the contents of a segment + * + * . + */ +static Res AMCSegDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + amcSeg amcseg = CouldBeA(amcSeg, inst); + Seg seg = CouldBeA(Seg, amcseg); + Res res; + Pool pool; + Addr i, p, base, limit, init; + Align step; + Size row; + char abzSketch[5]; + Buffer buffer; + + if (!TESTC(amcSeg, amcseg)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + /* Describe the superclass fields first via next-method call */ + res = NextMethod(Inst, amcSeg, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + pool = SegPool(seg); + step = PoolAlignment(pool); + row = step * 64; + + base = SegBase(seg); + p = AddrAdd(base, pool->format->headerSize); + limit = SegLimit(seg); + + if (amcSegHasNailboard(seg)) { + res = WriteF(stream, depth + 2, "Boarded\n", NULL); + } else if (SegNailed(seg) == TraceSetEMPTY) { + res = WriteF(stream, depth + 2, "Mobile\n", NULL); + } else { + res = WriteF(stream, depth + 2, "Stuck\n", NULL); + } + if(res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "Map: *===:object @+++:nails bbbb:buffer\n", NULL); + if (res != ResOK) + return res; + + if (SegBuffer(&buffer, seg)) + init = BufferGetInit(buffer); + else + init = limit; + + for (i = base; i < limit; i = AddrAdd(i, row)) { + Addr j; + char c; + + res = WriteF(stream, depth + 2, "$A ", (WriteFA)i, NULL); + if (res != ResOK) + return res; + + /* @@@@ This misses a header-sized pad at the end. */ + for (j = i; j < AddrAdd(i, row); j = AddrAdd(j, step)) { + if (j >= limit) + c = ' '; /* if seg is not a whole number of print rows */ + else if (j >= init) + c = 'b'; + else { + Bool nailed = amcSegHasNailboard(seg) + && NailboardGet(amcSegNailboard(seg), j); + if (j == p) { + c = (nailed ? '@' : '*'); + p = (pool->format->skip)(p); + } else { + c = (nailed ? '+' : '='); + } + } + res = WriteF(stream, 0, "$C", (WriteFC)c, NULL); + if (res != ResOK) + return res; + } + + res = WriteF(stream, 0, "\n", NULL); + if (res != ResOK) + return res; + } + + AMCSegSketch(seg, abzSketch, NELEMS(abzSketch)); + res = WriteF(stream, depth + 2, "Sketch: $S\n", (WriteFS)abzSketch, NULL); + if(res != ResOK) + return res; + + return ResOK; +} + + +/* amcSegClass -- Class definition for AMC segments */ + +DEFINE_CLASS(Seg, amcSeg, klass) +{ + INHERIT_CLASS(klass, amcSeg, MutatorSeg); + SegClassMixInNoSplitMerge(klass); /* no support for this (yet) */ + klass->instClassStruct.describe = AMCSegDescribe; + klass->instClassStruct.finish = amcSegFinish; + klass->size = sizeof(amcSegStruct); + klass->init = AMCSegInit; + klass->bufferEmpty = amcSegBufferEmpty; + klass->whiten = amcSegWhiten; + klass->scan = amcSegScan; + klass->fix = amcSegFix; + klass->fixEmergency = amcSegFixEmergency; + klass->reclaim = amcSegReclaim; + klass->walk = amcSegWalk; + AVERT(SegClass, klass); +} + + + +/* amcSegHasNailboard -- test whether the segment has a nailboard + * + * . + */ +static Bool amcSegHasNailboard(Seg seg) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + return amcseg->board != NULL; +} + + +/* amcSegNailboard -- get the nailboard for this segment */ + +static Nailboard amcSegNailboard(Seg seg) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + AVER(amcSegHasNailboard(seg)); + return amcseg->board; +} + + +/* amcSegGen -- get the generation structure for this segment */ + +static amcGen amcSegGen(Seg seg) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + return amcseg->gen; +} + + +/* AMCStruct -- pool AMC descriptor + * + * . + */ + +#define AMCSig ((Sig)0x519A3C99) /* SIGnature AMC */ + +typedef struct AMCStruct { /* */ + PoolStruct poolStruct; /* generic pool structure */ + RankSet rankSet; /* rankSet for entire pool */ + RingStruct genRing; /* ring of generations */ + Bool gensBooted; /* used during boot (init) */ + size_t gens; /* number of generations */ + amcGen *gen; /* (pointer to) array of generations */ + amcGen nursery; /* the default mutator generation */ + amcGen rampGen; /* the ramp generation */ + amcGen afterRampGen; /* the generation after rampGen */ + unsigned rampCount; /* */ + int rampMode; /* */ + amcPinnedFunction pinned; /* function determining if block is pinned */ + Size extendBy; /* segment size to extend pool by */ + Size largeSize; /* min size of "large" segments */ + Sig sig; /* design.mps.sig.field.end.outer */ +} AMCStruct; + + +/* amcGenCheck -- check consistency of a generation structure */ + +ATTRIBUTE_UNUSED +static Bool amcGenCheck(amcGen gen) +{ + AMC amc; + + CHECKS(amcGen, gen); + CHECKD(PoolGen, &gen->pgen); + amc = amcGenAMC(gen); + CHECKU(AMC, amc); + CHECKD(Buffer, gen->forward); + CHECKD_NOSIG(Ring, &gen->amcRing); + + return TRUE; +} + + +/* amcBufStruct -- AMC Buffer subclass + * + * This subclass of SegBuf records a link to a generation. + */ + +#define amcBufSig ((Sig)0x519A3CBF) /* SIGnature AMC BuFfer */ + +typedef struct amcBufStruct *amcBuf; + +typedef struct amcBufStruct { + SegBufStruct segbufStruct; /* superclass fields must come first */ + amcGen gen; /* The AMC generation */ + Bool forHashArrays; /* allocates hash table arrays, see AMCBufferFill */ + Sig sig; /* design.mps.sig.field.end.outer */ +} amcBufStruct; + + +/* amcBufCheck -- check consistency of an amcBuf */ + +ATTRIBUTE_UNUSED +static Bool amcBufCheck(amcBuf amcbuf) +{ + CHECKS(amcBuf, amcbuf); + CHECKD(SegBuf, &amcbuf->segbufStruct); + if(amcbuf->gen != NULL) + CHECKD(amcGen, amcbuf->gen); + CHECKL(BoolCheck(amcbuf->forHashArrays)); + /* hash array buffers only created by mutator */ + CHECKL(BufferIsMutator(MustBeA(Buffer, amcbuf)) || !amcbuf->forHashArrays); + return TRUE; +} + + +/* amcBufGen -- Return the AMC generation of an amcBuf */ + +static amcGen amcBufGen(Buffer buffer) +{ + return MustBeA(amcBuf, buffer)->gen; +} + + +/* amcBufSetGen -- Set the AMC generation of an amcBuf */ + +static void amcBufSetGen(Buffer buffer, amcGen gen) +{ + amcBuf amcbuf = MustBeA(amcBuf, buffer); + if (gen != NULL) + AVERT(amcGen, gen); + amcbuf->gen = gen; +} + + +ARG_DEFINE_KEY(ap_hash_arrays, Bool); + +#define amcKeyAPHashArrays (&_mps_key_ap_hash_arrays) + +/* AMCBufInit -- Initialize an amcBuf */ + +static Res AMCBufInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) +{ + AMC amc = MustBeA(AMCZPool, pool); + amcBuf amcbuf; + Res res; + Bool forHashArrays = FALSE; + ArgStruct arg; + + if (ArgPick(&arg, args, amcKeyAPHashArrays)) + forHashArrays = arg.val.b; + + res = NextMethod(Buffer, amcBuf, init)(buffer, pool, isMutator, args); + if(res != ResOK) + return res; + amcbuf = CouldBeA(amcBuf, buffer); + + if (BufferIsMutator(buffer)) { + /* Set up the buffer to be allocating in the nursery. */ + amcbuf->gen = amc->nursery; + } else { + /* No gen yet -- see . */ + amcbuf->gen = NULL; + } + amcbuf->forHashArrays = forHashArrays; + + SetClassOfPoly(buffer, CLASS(amcBuf)); + amcbuf->sig = amcBufSig; + AVERC(amcBuf, amcbuf); + + BufferSetRankSet(buffer, amc->rankSet); + + return ResOK; +} + + +/* AMCBufFinish -- Finish an amcBuf */ + +static void AMCBufFinish(Inst inst) +{ + Buffer buffer = MustBeA(Buffer, inst); + amcBuf amcbuf = MustBeA(amcBuf, buffer); + amcbuf->sig = SigInvalid; + NextMethod(Inst, amcBuf, finish)(inst); +} + + +/* amcBufClass -- The class definition */ + +DEFINE_CLASS(Buffer, amcBuf, klass) +{ + INHERIT_CLASS(klass, amcBuf, SegBuf); + klass->instClassStruct.finish = AMCBufFinish; + klass->size = sizeof(amcBufStruct); + klass->init = AMCBufInit; + AVERT(BufferClass, klass); +} + + +/* amcGenCreate -- create a generation */ + +static Res amcGenCreate(amcGen *genReturn, AMC amc, GenDesc gen) +{ + Pool pool = MustBeA(AbstractPool, amc); + Arena arena; + Buffer buffer; + amcGen amcgen; + Res res; + void *p; + + arena = pool->arena; + + res = ControlAlloc(&p, arena, sizeof(amcGenStruct)); + if(res != ResOK) + goto failControlAlloc; + amcgen = (amcGen)p; + + res = BufferCreate(&buffer, CLASS(amcBuf), pool, FALSE, argsNone); + if(res != ResOK) + goto failBufferCreate; + + res = PoolGenInit(&amcgen->pgen, gen, pool); + if(res != ResOK) + goto failGenInit; + RingInit(&amcgen->amcRing); + amcgen->forward = buffer; + amcgen->sig = amcGenSig; + + AVERT(amcGen, amcgen); + + RingAppend(&amc->genRing, &amcgen->amcRing); + + *genReturn = amcgen; + return ResOK; + +failGenInit: + BufferDestroy(buffer); +failBufferCreate: + ControlFree(arena, p, sizeof(amcGenStruct)); +failControlAlloc: + return res; +} + + +/* amcGenDestroy -- destroy a generation */ + +static void amcGenDestroy(amcGen gen) +{ + Arena arena; + + AVERT(amcGen, gen); + + arena = PoolArena(amcGenPool(gen)); + gen->sig = SigInvalid; + RingRemove(&gen->amcRing); + RingFinish(&gen->amcRing); + PoolGenFinish(&gen->pgen); + BufferDestroy(gen->forward); + ControlFree(arena, gen, sizeof(amcGenStruct)); +} + + +/* amcGenDescribe -- describe an AMC generation */ + +static Res amcGenDescribe(amcGen gen, mps_lib_FILE *stream, Count depth) +{ + Res res; + + if(!TESTT(amcGen, gen)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "amcGen $P {\n", (WriteFP)gen, + " buffer $P\n", (WriteFP)gen->forward, NULL); + if (res != ResOK) + return res; + + res = PoolGenDescribe(&gen->pgen, stream, depth + 2); + if (res != ResOK) + return res; + + res = WriteF(stream, depth, "} amcGen $P\n", (WriteFP)gen, NULL); + return res; +} + + +/* amcSegCreateNailboard -- create nailboard for segment */ + +static Res amcSegCreateNailboard(Seg seg) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + Pool pool = SegPool(seg); + Nailboard board; + Arena arena; + Res res; + + AVER(!amcSegHasNailboard(seg)); + arena = PoolArena(pool); + + res = NailboardCreate(&board, arena, pool->alignment, + SegBase(seg), SegLimit(seg)); + if (res != ResOK) + return res; + + amcseg->board = board; + + return ResOK; +} + + +/* amcPinnedInterior -- block is pinned by any nail */ + +static Bool amcPinnedInterior(AMC amc, Nailboard board, Addr base, Addr limit) +{ + Size headerSize = MustBeA(AbstractPool, amc)->format->headerSize; + return !NailboardIsResRange(board, AddrSub(base, headerSize), + AddrSub(limit, headerSize)); +} + + +/* amcPinnedBase -- block is pinned only if base is nailed */ + +static Bool amcPinnedBase(AMC amc, Nailboard board, Addr base, Addr limit) +{ + UNUSED(amc); + UNUSED(limit); + return NailboardGet(board, base); +} + + +/* amcVarargs -- decode obsolete varargs */ + +static void AMCVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_FORMAT; + args[0].val.format = va_arg(varargs, Format); + args[1].key = MPS_KEY_CHAIN; + args[1].val.chain = va_arg(varargs, Chain); + args[2].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* amcInitComm -- initialize AMC/Z pool + * + * . + * Shared by AMCInit and AMCZinit. + */ +static Res amcInitComm(Pool pool, Arena arena, PoolClass klass, + RankSet rankSet, ArgList args) +{ + AMC amc; + Res res; + Index i; + size_t genArraySize; + size_t genCount; + Bool interior = AMC_INTERIOR_DEFAULT; + Chain chain; + Size extendBy = AMC_EXTEND_BY_DEFAULT; + Size largeSize = AMC_LARGE_SIZE_DEFAULT; + ArgStruct arg; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + AVERT(PoolClass, klass); + AVER(IsSubclass(klass, AMCZPool)); + + if (ArgPick(&arg, args, MPS_KEY_CHAIN)) + chain = arg.val.chain; + else + chain = ArenaGlobals(arena)->defaultChain; + if (ArgPick(&arg, args, MPS_KEY_INTERIOR)) + interior = arg.val.b; + if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY)) + extendBy = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_LARGE_SIZE)) + largeSize = arg.val.size; + + AVERT(Chain, chain); + AVER(chain->arena == arena); + AVER(extendBy > 0); + AVER(largeSize > 0); + /* TODO: it would be nice to be able to manage large objects that + * are smaller than the extendBy, but currently this results in + * unacceptable fragmentation due to the padding objects. This + * assertion catches this bad case. */ + AVER(largeSize >= extendBy); + + res = NextMethod(Pool, AMCZPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + amc = CouldBeA(AMCZPool, pool); + + /* Ensure a format was supplied in the argument list. */ + AVER(pool->format != NULL); + + pool->alignment = pool->format->alignment; + pool->alignShift = SizeLog2(pool->alignment); + amc->rankSet = rankSet; + + RingInit(&amc->genRing); + /* amc gets checked before the generations get created, but they */ + /* do get created later in this function. */ + amc->gen = NULL; + amc->nursery = NULL; + amc->rampGen = NULL; + amc->afterRampGen = NULL; + amc->gensBooted = FALSE; + + amc->rampCount = 0; + amc->rampMode = RampOUTSIDE; + + if (interior) { + amc->pinned = amcPinnedInterior; + } else { + amc->pinned = amcPinnedBase; + } + /* .extend-by.aligned: extendBy is aligned to the arena alignment. */ + amc->extendBy = SizeArenaGrains(extendBy, arena); + amc->largeSize = largeSize; + + SetClassOfPoly(pool, klass); + amc->sig = AMCSig; + AVERC(AMCZPool, amc); + + /* Init generations. */ + genCount = ChainGens(chain); + { + void *p; + + /* One gen for each one in the chain plus dynamic gen. */ + genArraySize = sizeof(amcGen) * (genCount + 1); + res = ControlAlloc(&p, arena, genArraySize); + if(res != ResOK) + goto failGensAlloc; + amc->gen = p; + for (i = 0; i <= genCount; ++i) { + res = amcGenCreate(&amc->gen[i], amc, ChainGen(chain, i)); + if (res != ResOK) + goto failGenAlloc; + } + /* Set up forwarding buffers. */ + for(i = 0; i < genCount; ++i) { + amcBufSetGen(amc->gen[i]->forward, amc->gen[i+1]); + } + /* Dynamic gen forwards to itself. */ + amcBufSetGen(amc->gen[genCount]->forward, amc->gen[genCount]); + } + amc->nursery = amc->gen[0]; + amc->rampGen = amc->gen[genCount-1]; /* last ephemeral gen */ + amc->afterRampGen = amc->gen[genCount]; + amc->gensBooted = TRUE; + + AVERT(AMC, amc); + if(rankSet == RankSetEMPTY) + EVENT2(PoolInitAMCZ, pool, pool->format); + else + EVENT2(PoolInitAMC, pool, pool->format); + return ResOK; + +failGenAlloc: + while(i > 0) { + --i; + amcGenDestroy(amc->gen[i]); + } + ControlFree(arena, amc->gen, genArraySize); +failGensAlloc: + NextMethod(Inst, AMCZPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + +/* TODO: AMCInit should call AMCZInit (its superclass) then + specialize, but amcInitComm creates forwarding buffers that copy + the rank set from the pool, making this awkward. */ + +static Res AMCInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + UNUSED(klass); /* used for debug pools only */ + return amcInitComm(pool, arena, CLASS(AMCPool), RankSetSingle(RankEXACT), args); +} + +static Res AMCZInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + UNUSED(klass); /* used for debug pools only */ + return amcInitComm(pool, arena, CLASS(AMCZPool), RankSetEMPTY, args); +} + + +/* AMCFinish -- finish AMC pool + * + * . + */ +static void AMCFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + AMC amc = MustBeA(AMCZPool, pool); + Ring ring; + Ring node, nextNode; + + /* @@@@ Make sure that segments aren't buffered by forwarding */ + /* buffers. This is a hack which allows the pool to be destroyed */ + /* while it is collecting. Note that there aren't any mutator */ + /* buffers by this time. */ + RING_FOR(node, &amc->genRing, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + BufferDetach(gen->forward, pool); + } + + ring = PoolSegRing(pool); + RING_FOR(node, ring, nextNode) { + Seg seg = SegOfPoolRing(node); + amcGen gen = amcSegGen(seg); + amcSeg amcseg = MustBeA(amcSeg, seg); + AVERT(amcSeg, amcseg); + AVER(!amcseg->accountedAsBuffered); + PoolGenFree(&gen->pgen, seg, + 0, + amcseg->old ? SegSize(seg) : 0, + amcseg->old ? 0 : SegSize(seg), + amcseg->deferred); + } + + /* Disassociate forwarding buffers from gens before they are */ + /* destroyed. */ + ring = &amc->genRing; + RING_FOR(node, ring, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + amcBufSetGen(gen->forward, NULL); + } + RING_FOR(node, ring, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + amcGenDestroy(gen); + } + + amc->sig = SigInvalid; + + NextMethod(Inst, AMCZPool, finish)(inst); +} + + +/* AMCBufferFill -- refill an allocation buffer + * + * . + */ +static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + Seg seg; + AMC amc = MustBeA(AMCZPool, pool); + Res res; + Addr base, limit; + Arena arena; + Size grainsSize; + amcGen gen; + PoolGen pgen; + amcBuf amcbuf = MustBeA(amcBuf, buffer); + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(size > 0); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + arena = PoolArena(pool); + gen = amcBufGen(buffer); + AVERT(amcGen, gen); + pgen = &gen->pgen; + + /* Create and attach segment. The location of this segment is */ + /* expressed via the pool generation. We rely on the arena to */ + /* organize locations appropriately. */ + if (size < amc->extendBy) { + grainsSize = amc->extendBy; /* .extend-by.aligned */ + } else { + grainsSize = SizeArenaGrains(size, arena); + } + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD_FIELD(args, amcKeySegGen, p, gen); + res = PoolGenAlloc(&seg, pgen, CLASS(amcSeg), grainsSize, args); + } MPS_ARGS_END(args); + if(res != ResOK) + return res; + AVER(grainsSize == SegSize(seg)); + + /* */ + if(BufferRankSet(buffer) == RankSetEMPTY) + SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetEMPTY); + else + SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetUNIV); + + /* If ramping, or if the buffer is intended for allocating hash + * table arrays, defer the size accounting. */ + if ((amc->rampMode == RampRAMPING + && buffer == amc->rampGen->forward + && gen == amc->rampGen) + || amcbuf->forHashArrays) + { + MustBeA(amcSeg, seg)->deferred = TRUE; + } + + base = SegBase(seg); + if (size < amc->largeSize) { + /* Small or Medium segment: give the buffer the entire seg. */ + limit = AddrAdd(base, grainsSize); + AVER(limit == SegLimit(seg)); + } else { + /* Large segment: ONLY give the buffer the size requested, and */ + /* pad the remainder of the segment: see job001811. */ + Size padSize; + + limit = AddrAdd(base, size); + AVER(limit <= SegLimit(seg)); + + padSize = grainsSize - size; + AVER(SizeIsAligned(padSize, PoolAlignment(pool))); + AVER(AddrAdd(limit, padSize) == SegLimit(seg)); + if(padSize > 0) { + ShieldExpose(arena, seg); + (*pool->format->pad)(limit, padSize); + ShieldCover(arena, seg); + } + } + + PoolGenAccountForFill(pgen, SegSize(seg)); + MustBeA(amcSeg, seg)->accountedAsBuffered = TRUE; + + *baseReturn = base; + *limitReturn = limit; + return ResOK; +} + + +/* amcSegBufferEmpty -- free from buffer to segment + * + * . + */ +static void amcSegBufferEmpty(Seg seg, Buffer buffer) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + Pool pool = SegPool(seg); + Arena arena = PoolArena(pool); + AMC amc = MustBeA(AMCZPool, pool); + Addr base, init, limit; + TraceId ti; + Trace trace; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + base = BufferBase(buffer); + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(SegBase(seg) <= base); + AVER(base <= init); + AVER(init <= limit); + if(SegSize(seg) < amc->largeSize) { + /* Small or Medium segment: buffer had the entire seg. */ + AVER(limit == SegLimit(seg)); + } else { + /* Large segment: buffer had only the size requested; job001811. */ + AVER(limit <= SegLimit(seg)); + } + + /* */ + if (init < limit) { + ShieldExpose(arena, seg); + (*pool->format->pad)(init, AddrOffset(init, limit)); + ShieldCover(arena, seg); + } + + /* Any allocation in the buffer (including the padding object just + * created) is white, so needs to be accounted as condemned for all + * traces for which this segment is white. */ + TRACE_SET_ITER(ti, trace, seg->white, arena) + GenDescCondemned(amcseg->gen->pgen.gen, trace, + AddrOffset(base, limit)); + TRACE_SET_ITER_END(ti, trace, seg->white, arena); + + if (amcseg->accountedAsBuffered) { + /* Account the entire buffer (including the padding object) as used. */ + PoolGenAccountForEmpty(&amcseg->gen->pgen, SegSize(seg), 0, + amcseg->deferred); + amcseg->accountedAsBuffered = FALSE; + } +} + + +/* AMCRampBegin -- note an entry into a ramp pattern */ + +static void AMCRampBegin(Pool pool, Buffer buf, Bool collectAll) +{ + AMC amc = MustBeA(AMCZPool, pool); + + AVERT(Buffer, buf); + AVERT(Bool, collectAll); + UNUSED(collectAll); /* obsolete */ + + AVER(amc->rampCount < UINT_MAX); + ++amc->rampCount; + if(amc->rampCount == 1) { + if(amc->rampMode != RampFINISH) + amc->rampMode = RampBEGIN; + } +} + + +/* AMCRampEnd -- note an exit from a ramp pattern */ + +static void AMCRampEnd(Pool pool, Buffer buf) +{ + AMC amc = MustBeA(AMCZPool, pool); + + AVERT(Buffer, buf); + + AVER(amc->rampCount > 0); + --amc->rampCount; + if(amc->rampCount == 0) { + PoolGen pgen = &amc->rampGen->pgen; + Ring node, nextNode; + + switch(amc->rampMode) { + case RampRAMPING: + /* We were ramping, so clean up. */ + amc->rampMode = RampFINISH; + break; + case RampBEGIN: + /* short-circuit for short ramps */ + amc->rampMode = RampOUTSIDE; + break; + case RampCOLLECTING: + /* we have finished a circuit of the state machine */ + amc->rampMode = RampOUTSIDE; + break; + case RampFINISH: + /* stay in FINISH because we need to pass through COLLECTING */ + break; + default: + /* can't get here if already OUTSIDE */ + NOTREACHED; + } + + /* Now all the segments in the ramp generation contribute to the + * pool generation's sizes. */ + RING_FOR(node, PoolSegRing(pool), nextNode) { + Seg seg = SegOfPoolRing(node); + amcSeg amcseg = MustBeA(amcSeg, seg); + if(amcSegGen(seg) == amc->rampGen + && amcseg->deferred + && SegWhite(seg) == TraceSetEMPTY) + { + if (!amcseg->accountedAsBuffered) + PoolGenUndefer(pgen, + amcseg->old ? SegSize(seg) : 0, + amcseg->old ? 0 : SegSize(seg)); + amcseg->deferred = FALSE; + } + } + } +} + + +/* amcSegPoolGen -- get pool generation for a segment */ + +static PoolGen amcSegPoolGen(Pool pool, Seg seg) +{ + amcSeg amcseg = MustBeA(amcSeg, seg); + AVERT(Pool, pool); + AVER(pool == SegPool(seg)); + return &amcseg->gen->pgen; +} + + +/* amcSegWhiten -- condemn the segment for the trace + * + * If the segment has a mutator buffer on it, we nail the buffer, + * because we can't scan or reclaim uncommitted buffers. + */ +static Res amcSegWhiten(Seg seg, Trace trace) +{ + Size condemned = 0; + amcGen gen; + Buffer buffer; + amcSeg amcseg = MustBeA(amcSeg, seg); + Pool pool = SegPool(seg); + AMC amc = MustBeA(AMCZPool, pool); + Res res; + + AVERT(Trace, trace); + + if (SegBuffer(&buffer, seg)) { + AVERT(Buffer, buffer); + + if(!BufferIsMutator(buffer)) { /* forwarding buffer */ + AVER(BufferIsReady(buffer)); + BufferDetach(buffer, pool); + } else { /* mutator buffer */ + if(BufferScanLimit(buffer) == SegBase(seg)) { + /* There's nothing but the buffer, don't condemn. */ + return ResOK; + } + /* [The following else-if section is just a comment added in */ + /* 1998-10-08. It has never worked. RHSK 2007-01-16] */ + /* else if (BufferScanLimit(buffer) == BufferLimit(buffer)) { */ + /* The buffer is full, so it won't be used by the mutator. */ + /* @@@@ We should detach it, but can't for technical */ + /* reasons. */ + /* BufferDetach(buffer, pool); */ + /* } */ + else { + Addr bufferScanLimit = BufferScanLimit(buffer); + /* There is an active buffer, make sure it's nailed. */ + if(!amcSegHasNailboard(seg)) { + if(SegNailed(seg) == TraceSetEMPTY) { + res = amcSegCreateNailboard(seg); + if(res != ResOK) { + /* Can't create nailboard, don't condemn. */ + return ResOK; + } + if (bufferScanLimit != BufferLimit(buffer)) { + NailboardSetRange(amcSegNailboard(seg), + bufferScanLimit, + BufferLimit(buffer)); + } + STATISTIC(++trace->nailCount); + SegSetNailed(seg, TraceSetSingle(trace)); + } else { + /* Segment is nailed already, cannot create a nailboard */ + /* (see .nail.new), just give up condemning. */ + return ResOK; + } + } else { + /* We have a nailboard, the buffer must be nailed already. */ + AVER(bufferScanLimit == BufferLimit(buffer) + || NailboardIsSetRange(amcSegNailboard(seg), + bufferScanLimit, + BufferLimit(buffer))); + /* Nail it for this trace as well. */ + SegSetNailed(seg, TraceSetAdd(SegNailed(seg), trace)); + } + /* Move the buffer's base up to the scan limit, so that we can + * detect allocation that happens during the trace, and + * account for it correctly in amcSegBufferEmpty and + * amcSegReclaimNailed. */ + buffer->base = bufferScanLimit; + /* We didn't condemn the buffer, subtract it from the count. */ + /* Relies on unsigned arithmetic wrapping round */ + /* on under- and overflow (which it does). */ + condemned -= AddrOffset(BufferBase(buffer), BufferLimit(buffer)); + } + } + } + + gen = amcSegGen(seg); + AVERT(amcGen, gen); + if (!amcseg->old) { + amcseg->old = TRUE; + if (amcseg->accountedAsBuffered) { + /* Note that the segment remains buffered but the buffer contents + * are accounted as old. See .seg.accounted-as-buffered. */ + amcseg->accountedAsBuffered = FALSE; + PoolGenAccountForAge(&gen->pgen, SegSize(seg), 0, amcseg->deferred); + } else + PoolGenAccountForAge(&gen->pgen, 0, SegSize(seg), amcseg->deferred); + } + + amcseg->forwarded[trace->ti] = 0; + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + GenDescCondemned(gen->pgen.gen, trace, condemned + SegSize(seg)); + + /* Ensure we are forwarding into the right generation. */ + + /* see */ + /* This switching needs to be more complex for multiple traces. */ + AVER(TraceSetIsSingle(PoolArena(pool)->busyTraces)); + if(amc->rampMode == RampBEGIN && gen == amc->rampGen) { + BufferDetach(gen->forward, pool); + amcBufSetGen(gen->forward, gen); + amc->rampMode = RampRAMPING; + } else if(amc->rampMode == RampFINISH && gen == amc->rampGen) { + BufferDetach(gen->forward, pool); + amcBufSetGen(gen->forward, amc->afterRampGen); + amc->rampMode = RampCOLLECTING; + } + + return ResOK; +} + + +/* amcSegScanNailedRange -- make one scanning pass over a range of + * addresses in a nailed segment. + * + * *totalReturn is set to FALSE if not all the objects between base and + * limit have been scanned. It is not touched otherwise. + */ +static Res amcSegScanNailedRange(Bool *totalReturn, Bool *moreReturn, + ScanState ss, AMC amc, Nailboard board, + Addr base, Addr limit) +{ + Format format; + Size headerSize; + Addr p, clientLimit; + Pool pool = MustBeA(AbstractPool, amc); + format = pool->format; + headerSize = format->headerSize; + p = AddrAdd(base, headerSize); + clientLimit = AddrAdd(limit, headerSize); + while (p < clientLimit) { + Addr q; + q = (*format->skip)(p); + if ((*amc->pinned)(amc, board, p, q)) { + Res res = TraceScanFormat(ss, p, q); + if(res != ResOK) { + *totalReturn = FALSE; + *moreReturn = TRUE; + return res; + } + } else { + *totalReturn = FALSE; + } + AVER(p < q); + p = q; + } + AVER(p == clientLimit); + return ResOK; +} + + +/* amcSegScanNailedOnce -- make one scanning pass over a nailed segment + * + * *totalReturn is set to TRUE iff all objects in segment scanned. + * *moreReturn is set to FALSE only if there are no more objects + * on the segment that need scanning (which is normally the case). + * It is set to TRUE if scanning had to be abandoned early on, and + * also if during emergency fixing any new marks got added to the + * nailboard. + */ +static Res amcSegScanNailedOnce(Bool *totalReturn, Bool *moreReturn, + ScanState ss, Seg seg, AMC amc) +{ + Addr p, limit; + Nailboard board; + Res res; + Buffer buffer; + + *totalReturn = TRUE; + board = amcSegNailboard(seg); + NailboardClearNewNails(board); + + p = SegBase(seg); + while (SegBuffer(&buffer, seg)) { + limit = BufferScanLimit(buffer); + if(p >= limit) { + AVER(p == limit); + goto returnGood; + } + res = amcSegScanNailedRange(totalReturn, moreReturn, + ss, amc, board, p, limit); + if (res != ResOK) + return res; + p = limit; + } + + limit = SegLimit(seg); + /* @@@@ Shouldn't p be set to BufferLimit here?! */ + res = amcSegScanNailedRange(totalReturn, moreReturn, + ss, amc, board, p, limit); + if (res != ResOK) + return res; + +returnGood: + *moreReturn = NailboardNewNails(board); + return ResOK; +} + + +/* amcSegScanNailed -- scan a nailed segment */ + +static Res amcSegScanNailed(Bool *totalReturn, ScanState ss, Pool pool, + Seg seg, AMC amc) +{ + Bool total, moreScanning; + size_t loops = 0; + + do { + Res res; + res = amcSegScanNailedOnce(&total, &moreScanning, ss, seg, amc); + if(res != ResOK) { + *totalReturn = FALSE; + return res; + } + loops += 1; + } while(moreScanning); + + if(loops > 1) { + RefSet refset; + + AVER(ArenaEmergency(PoolArena(pool))); + + /* Looped: fixed refs (from 1st pass) were seen by MPS_FIX1 + * (in later passes), so the "ss.unfixedSummary" is _not_ + * purely unfixed. In this one case, unfixedSummary is not + * accurate, and cannot be used to verify the SegSummary (see + * impl/trace/#verify.segsummary). Use ScanStateSetSummary to + * store ScanStateSummary in ss.fixedSummary and reset + * ss.unfixedSummary. See job001548. + */ + + refset = ScanStateSummary(ss); + + /* A rare event, which might prompt a rare defect to appear. */ + EVENT6(AMCScanNailed, loops, SegSummary(seg), ScanStateWhite(ss), + ScanStateUnfixedSummary(ss), ss->fixedSummary, refset); + + ScanStateSetSummary(ss, refset); + } + + *totalReturn = total; + return ResOK; +} + + +/* amcSegScan -- scan a single seg, turning it black + * + * . + */ +static Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + Addr base, limit; + Format format; + Pool pool; + AMC amc; + Res res; + Buffer buffer; + + AVER(totalReturn != NULL); + AVERT(Seg, seg); + AVERT(ScanState, ss); + + pool = SegPool(seg); + amc = MustBeA(AMCZPool, pool); + format = pool->format; + + if(amcSegHasNailboard(seg)) { + return amcSegScanNailed(totalReturn, ss, pool, seg, amc); + } + + base = AddrAdd(SegBase(seg), format->headerSize); + /* */ + while (SegBuffer(&buffer, seg)) { + limit = AddrAdd(BufferScanLimit(buffer), + format->headerSize); + if(base >= limit) { + /* @@@@ Are we sure we don't need scan the rest of the */ + /* segment? */ + AVER(base == limit); + *totalReturn = TRUE; + return ResOK; + } + res = TraceScanFormat(ss, base, limit); + if(res != ResOK) { + *totalReturn = FALSE; + return res; + } + base = limit; + } + + /* @@@@ base? */ + limit = AddrAdd(SegLimit(seg), format->headerSize); + AVER(SegBase(seg) <= base); + AVER(base <= AddrAdd(SegLimit(seg), format->headerSize)); + if(base < limit) { + res = TraceScanFormat(ss, base, limit); + if(res != ResOK) { + *totalReturn = FALSE; + return res; + } + } + + *totalReturn = TRUE; + return ResOK; +} + + +/* amcSegFixInPlace -- fix a reference without moving the object + * + * Usually this function is used for ambiguous references, but during + * emergency tracing may be used for references of any rank. + * + * If the segment has a nailboard then we use that to record the fix. + * Otherwise we simply grey and nail the entire segment. + */ +static void amcSegFixInPlace(Seg seg, ScanState ss, Ref *refIO) +{ + Addr ref; + + ref = (Addr)*refIO; + /* An ambiguous reference can point before the header. */ + AVER(SegBase(seg) <= ref); + /* .ref-limit: A reference passed to Fix can't be beyond the */ + /* segment, because then TraceFix would not have picked this */ + /* segment. */ + AVER(ref < SegLimit(seg)); + + if(amcSegHasNailboard(seg)) { + Bool wasMarked = NailboardSet(amcSegNailboard(seg), ref); + /* If there are no new marks (i.e., no new traces for which we */ + /* are marking, and no new mark bits set) then we can return */ + /* immediately, without changing colour. */ + if(TraceSetSub(ss->traces, SegNailed(seg)) && wasMarked) + return; + } else if(TraceSetSub(ss->traces, SegNailed(seg))) { + return; + } + SegSetNailed(seg, TraceSetUnion(SegNailed(seg), ss->traces)); + /* AMCZ segments don't contain references and so don't need to */ + /* become grey */ + if(SegRankSet(seg) != RankSetEMPTY) + SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces)); +} + + +/* amcSegFixEmergency -- fix a reference, without allocating + * + * . + */ +static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO) +{ + Arena arena; + Addr newRef; + Pool pool; + + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(refIO != NULL); + + pool = SegPool(seg); + arena = PoolArena(pool); + + if(ss->rank == RankAMBIG) + goto fixInPlace; + + ShieldExpose(arena, seg); + newRef = (*pool->format->isMoved)(*refIO); + ShieldCover(arena, seg); + if(newRef != (Addr)0) { + /* Object has been forwarded already, so snap-out pointer. */ + /* TODO: Implement weak pointer semantics in emergency fixing. This + would be a good idea since we really want to reclaim as much as + possible in an emergency. */ + *refIO = newRef; + return ResOK; + } + +fixInPlace: /* see .Nailboard.emergency */ + amcSegFixInPlace(seg, ss, refIO); + return ResOK; +} + + +/* amcSegFix -- fix a reference to the segment + * + * . + */ +static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO) +{ + Arena arena; + Pool pool; + AMC amc; + Res res; + Format format; /* cache of pool->format */ + Size headerSize; /* cache of pool->format->headerSize */ + Ref ref; /* reference to be fixed */ + Addr base; /* base address of reference */ + Ref newRef; /* new location, if moved */ + Addr newBase; /* base address of new copy */ + Size length; /* length of object to be relocated */ + Buffer buffer; /* buffer to allocate new copy into */ + amcGen gen; /* generation of old copy of object */ + TraceSet grey; /* greyness of object being relocated */ + Seg toSeg; /* segment to which object is being relocated */ + TraceId ti; + Trace trace; + + /* */ + AVERT_CRITICAL(ScanState, ss); + AVERT_CRITICAL(Seg, seg); + AVER_CRITICAL(refIO != NULL); + + /* If the reference is ambiguous, set up the datastructures for */ + /* managing a nailed segment. This involves marking the segment */ + /* as nailed, and setting up a per-word mark table */ + if(ss->rank == RankAMBIG) { + /* .nail.new: Check to see whether we need a Nailboard for */ + /* this seg. We use "SegNailed(seg) == TraceSetEMPTY" */ + /* rather than "!amcSegHasNailboard(seg)" because this avoids */ + /* setting up a new nailboard when the segment was nailed, but */ + /* had no nailboard. This must be avoided because otherwise */ + /* assumptions in amcSegFixEmergency will be wrong (essentially */ + /* we will lose some pointer fixes because we introduced a */ + /* nailboard). */ + if(SegNailed(seg) == TraceSetEMPTY) { + res = amcSegCreateNailboard(seg); + if(res != ResOK) + return res; + STATISTIC(++ss->nailCount); + SegSetNailed(seg, TraceSetUnion(SegNailed(seg), ss->traces)); + } + amcSegFixInPlace(seg, ss, refIO); + return ResOK; + } + + pool = SegPool(seg); + amc = MustBeA_CRITICAL(AMCZPool, pool); + AVERT_CRITICAL(AMC, amc); + format = pool->format; + headerSize = format->headerSize; + ref = *refIO; + AVER_CRITICAL(AddrAdd(SegBase(seg), headerSize) <= ref); + base = AddrSub(ref, headerSize); + AVER_CRITICAL(AddrIsAligned(base, PoolAlignment(pool))); + AVER_CRITICAL(ref < SegLimit(seg)); /* see .ref-limit */ + arena = pool->arena; + + /* .exposed.seg: Statements tagged ".exposed.seg" below require */ + /* that "seg" (that is: the 'from' seg) has been ShieldExposed. */ + ShieldExpose(arena, seg); + newRef = (*format->isMoved)(ref); /* .exposed.seg */ + + if(newRef == (Addr)0) { + Addr clientQ; + clientQ = (*format->skip)(ref); + + /* If object is nailed already then we mustn't copy it: */ + if (SegNailed(seg) != TraceSetEMPTY + && !(amcSegHasNailboard(seg) + && !(*amc->pinned)(amc, amcSegNailboard(seg), ref, clientQ))) + { + /* Segment only needs greying if there are new traces for */ + /* which we are nailing. */ + if(!TraceSetSub(ss->traces, SegNailed(seg))) { + if(SegRankSet(seg) != RankSetEMPTY) /* not for AMCZ */ + SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces)); + SegSetNailed(seg, TraceSetUnion(SegNailed(seg), ss->traces)); + } + res = ResOK; + goto returnRes; + } else if(ss->rank == RankWEAK) { + /* Object is not preserved (neither moved, nor nailed) */ + /* hence, reference should be splatted. */ + goto updateReference; + } + /* Object is not preserved yet (neither moved, nor nailed) */ + /* so should be preserved by forwarding. */ + + ss->wasMarked = FALSE; /* */ + + /* Get the forwarding buffer from the object's generation. */ + gen = amcSegGen(seg); + buffer = gen->forward; + AVER_CRITICAL(buffer != NULL); + + length = AddrOffset(ref, clientQ); /* .exposed.seg */ + STATISTIC(++ss->forwardedCount); + do { + res = BUFFER_RESERVE(&newBase, buffer, length); + if (res != ResOK) + goto returnRes; + newRef = AddrAdd(newBase, headerSize); + + toSeg = BufferSeg(buffer); + ShieldExpose(arena, toSeg); + + /* Since we're moving an object from one segment to another, */ + /* union the greyness and the summaries together. */ + grey = SegGrey(seg); + if(SegRankSet(seg) != RankSetEMPTY) { /* not for AMCZ */ + grey = TraceSetUnion(grey, ss->traces); + SegSetSummary(toSeg, RefSetUnion(SegSummary(toSeg), SegSummary(seg))); + } else { + AVER_CRITICAL(SegRankSet(toSeg) == RankSetEMPTY); + } + SegSetGrey(toSeg, TraceSetUnion(SegGrey(toSeg), grey)); + + /* */ + (void)AddrCopy(newBase, base, length); /* .exposed.seg */ + + ShieldCover(arena, toSeg); + } while (!BUFFER_COMMIT(buffer, newBase, length)); + + STATISTIC(ss->copiedSize += length); + TRACE_SET_ITER(ti, trace, ss->traces, ss->arena) + MustBeA(amcSeg, seg)->forwarded[ti] += length; + TRACE_SET_ITER_END(ti, trace, ss->traces, ss->arena); + + (*format->move)(ref, newRef); /* .exposed.seg */ + } else { + /* reference to broken heart (which should be snapped out -- */ + /* consider adding to (non-existent) snap-out cache here) */ + STATISTIC(++ss->snapCount); + } + + /* .fix.update: update the reference to whatever the above code */ + /* decided it should be */ +updateReference: + *refIO = newRef; + res = ResOK; + +returnRes: + ShieldCover(arena, seg); /* .exposed.seg */ + return res; +} + + +/* amcSegReclaimNailed -- reclaim what you can from a nailed segment */ + +static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg) +{ + Addr p, limit; + Arena arena; + Format format; + STATISTIC_DECL(Size bytesReclaimed = (Size)0) + Count preservedInPlaceCount = (Count)0; + Size preservedInPlaceSize = (Size)0; + AMC amc = MustBeA(AMCZPool, pool); + PoolGen pgen; + Size headerSize; + Addr padBase; /* base of next padding object */ + Size padLength; /* length of next padding object */ + Buffer buffer; + + /* All arguments AVERed by AMCReclaim */ + + format = pool->format; + + arena = PoolArena(pool); + AVERT(Arena, arena); + + /* see for improvements */ + headerSize = format->headerSize; + ShieldExpose(arena, seg); + p = SegBase(seg); + limit = SegBufferScanLimit(seg); + padBase = p; + padLength = 0; + while(p < limit) { + Addr clientP, q, clientQ; + Size length; + Bool preserve; + clientP = AddrAdd(p, headerSize); + clientQ = (*format->skip)(clientP); + q = AddrSub(clientQ, headerSize); + length = AddrOffset(p, q); + if(amcSegHasNailboard(seg)) { + preserve = (*amc->pinned)(amc, amcSegNailboard(seg), clientP, clientQ); + } else { + /* There's no nailboard, so preserve everything that hasn't been + * forwarded. In this case, preservedInPlace* become somewhat + * overstated. */ + preserve = !(*format->isMoved)(clientP); + } + if(preserve) { + ++preservedInPlaceCount; + preservedInPlaceSize += length; + if (padLength > 0) { + /* Replace run of forwarding pointers and unreachable objects + * with a padding object. */ + (*format->pad)(padBase, padLength); + STATISTIC(bytesReclaimed += padLength); + padLength = 0; + } + padBase = q; + } else { + padLength += length; + } + + AVER(p < q); + p = q; + } + AVER(p == limit); + AVER(AddrAdd(padBase, padLength) == limit); + if (padLength > 0) { + /* Replace final run of forwarding pointers and unreachable + * objects with a padding object. */ + (*format->pad)(padBase, padLength); + STATISTIC(bytesReclaimed += padLength); + } + ShieldCover(arena, seg); + + SegSetNailed(seg, TraceSetDel(SegNailed(seg), trace)); + SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); + if(SegNailed(seg) == TraceSetEMPTY && amcSegHasNailboard(seg)) { + NailboardDestroy(amcSegNailboard(seg), arena); + MustBeA(amcSeg, seg)->board = NULL; + } + + STATISTIC(AVER(bytesReclaimed <= SegSize(seg))); + STATISTIC(trace->reclaimSize += bytesReclaimed); + STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount); + pgen = &amcSegGen(seg)->pgen; + if (SegBuffer(&buffer, seg)) { + /* Any allocation in the buffer was white, so needs to be + * accounted as condemned now. */ + GenDescCondemned(pgen->gen, trace, + AddrOffset(BufferBase(buffer), BufferLimit(buffer))); + } + GenDescSurvived(pgen->gen, trace, MustBeA(amcSeg, seg)->forwarded[trace->ti], + preservedInPlaceSize); + + /* Free the seg if we can; fixes .nailboard.limitations.middle. */ + if(preservedInPlaceCount == 0 + && (!SegHasBuffer(seg)) + && (SegNailed(seg) == TraceSetEMPTY)) { + + /* We may not free a buffered seg. */ + AVER(!SegHasBuffer(seg)); + + PoolGenFree(pgen, seg, 0, SegSize(seg), 0, MustBeA(amcSeg, seg)->deferred); + } +} + + +/* amcSegReclaim -- recycle a segment if it is still white + * + * . + */ +static void amcSegReclaim(Seg seg, Trace trace) +{ + amcSeg amcseg = MustBeA_CRITICAL(amcSeg, seg); + Pool pool = SegPool(seg); + AMC amc = MustBeA_CRITICAL(AMCZPool, pool); + amcGen gen; + + AVERT_CRITICAL(Trace, trace); + gen = amcSegGen(seg); + AVERT_CRITICAL(amcGen, gen); + + /* This switching needs to be more complex for multiple traces. */ + AVER_CRITICAL(TraceSetIsSingle(PoolArena(pool)->busyTraces)); + if(amc->rampMode == RampCOLLECTING) { + if(amc->rampCount > 0) { + /* Entered ramp mode before previous one was cleaned up */ + amc->rampMode = RampBEGIN; + } else { + amc->rampMode = RampOUTSIDE; + } + } + + if(SegNailed(seg) != TraceSetEMPTY) { + amcSegReclaimNailed(pool, trace, seg); + return; + } + + /* We may not free a buffered seg. (But all buffered + condemned */ + /* segs should have been nailed anyway). */ + AVER(!SegHasBuffer(seg)); + + STATISTIC(trace->reclaimSize += SegSize(seg)); + + GenDescSurvived(gen->pgen.gen, trace, amcseg->forwarded[trace->ti], 0); + PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, amcseg->deferred); +} + + +/* amcSegWalk -- Apply function to (black) objects in segment */ + +static void amcSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AVERT(Seg, seg); + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures so can't be checked */ + + /* Avoid applying the function to grey or white objects. */ + /* White objects might not be alive, and grey objects */ + /* may have pointers to old-space. */ + + /* NB, segments containing a mix of colours (i.e., nailed segs) */ + /* are not handled properly: No objects are walked. See */ + /* job001682. */ + if(SegWhite(seg) == TraceSetEMPTY && SegGrey(seg) == TraceSetEMPTY + && SegNailed(seg) == TraceSetEMPTY) + { + Addr object, nextObject, limit; + Pool pool = SegPool(seg); + + limit = AddrAdd(SegBufferScanLimit(seg), format->headerSize); + object = AddrAdd(SegBase(seg), format->headerSize); + while(object < limit) { + /* Check not a broken heart. */ + AVER((*format->isMoved)(object) == NULL); + (*f)(object, format, pool, p, s); + nextObject = (*format->skip)(object); + AVER(nextObject > object); + object = nextObject; + } + AVER(object == limit); + } +} + + +/* amcWalkAll -- Apply a function to all (black) objects in a pool */ + +static void amcWalkAll(Pool pool, FormattedObjectsVisitor f, void *p, size_t s) +{ + Arena arena; + Ring ring, next, node; + Format format = NULL; + Bool b; + + AVER(IsA(AMCZPool, pool)); + b = PoolFormat(&format, pool); + AVER(b); + + arena = PoolArena(pool); + ring = PoolSegRing(pool); + node = RingNext(ring); + RING_FOR(node, ring, next) { + Seg seg = SegOfPoolRing(node); + + ShieldExpose(arena, seg); + amcSegWalk(seg, format, f, p, s); + ShieldCover(arena, seg); + } +} + +/* AMCAddrObject -- return base pointer from interior pointer + * + * amcAddrObjectSearch implements the scan for an object containing + * the interior pointer by skipping using format methods. + * + * AMCAddrObject locates the segment containing the interior pointer + * and wraps amcAddrObjectSearch in the necessary shield operations to + * give it access. + */ + +static Res amcAddrObjectSearch(Addr *pReturn, + Pool pool, + Addr objBase, + Addr searchLimit, + Addr addr) +{ + Format format; + Size hdrSize; + + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVER(objBase <= searchLimit); + + format = pool->format; + hdrSize = format->headerSize; + while (objBase < searchLimit) { + Addr objRef = AddrAdd(objBase, hdrSize); + Addr objLimit = AddrSub((*format->skip)(objRef), hdrSize); + AVER(objBase < objLimit); + + if (addr < objLimit) { + AVER(objBase <= addr); + AVER(addr < objLimit); + + /* Don't return base pointer if object is moved */ + if (NULL == (*format->isMoved)(objRef)) { + *pReturn = objRef; + return ResOK; + } + break; + } + objBase = objLimit; + } + return ResFAIL; +} + +static Res AMCAddrObject(Addr *pReturn, Pool pool, Addr addr) +{ + Res res; + Arena arena; + Addr base, limit; + Buffer buffer; + Seg seg; + + AVER(pReturn != NULL); + AVERT(Pool, pool); + + arena = PoolArena(pool); + if (!SegOfAddr(&seg, arena, addr) || SegPool(seg) != pool) + return ResFAIL; + + base = SegBase(seg); + if (SegBuffer(&buffer, seg)) + /* We use BufferGetInit here (and not BufferScanLimit) because we + * want to be able to find objects that have been allocated and + * committed since the last flip. These objects lie between the + * addresses returned by BufferScanLimit (which returns the value + * of init at the last flip) and BufferGetInit. + * + * Strictly speaking we only need a limit that is at least the + * maximum of the objects on the segments. This is because addr + * *must* point inside a live object and we stop skipping once we + * have found it. The init pointer serves this purpose. + */ + limit = BufferGetInit(buffer); + else + limit = SegLimit(seg); + + ShieldExpose(arena, seg); + res = amcAddrObjectSearch(pReturn, pool, base, limit, addr); + ShieldCover(arena, seg); + return res; +} + + +/* AMCTotalSize -- total memory allocated from the arena */ + +static Size AMCTotalSize(Pool pool) +{ + AMC amc = MustBeA(AMCZPool, pool); + Size size = 0; + Ring node, nextNode; + + RING_FOR(node, &amc->genRing, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + AVERT(amcGen, gen); + size += gen->pgen.totalSize; + } + + return size; +} + + +/* AMCFreeSize -- free memory (unused by client program) */ + +static Size AMCFreeSize(Pool pool) +{ + AMC amc = MustBeA(AMCZPool, pool); + Size size = 0; + Ring node, nextNode; + + RING_FOR(node, &amc->genRing, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + AVERT(amcGen, gen); + size += gen->pgen.freeSize; + } + + return size; +} + + +/* AMCDescribe -- describe the contents of the AMC pool + * + * . + */ + +static Res AMCDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + AMC amc = CouldBeA(AMCZPool, pool); + Res res; + Ring node, nextNode; + const char *rampmode; + + if (!TESTC(AMCZPool, amc)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, AMCZPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + switch(amc->rampMode) { +#define RAMP_DESCRIBE(e, s) \ + case e: \ + rampmode = s; \ + break; + RAMP_RELATION(RAMP_DESCRIBE) +#undef RAMP_DESCRIBE + default: + rampmode = "unknown ramp mode"; + break; + } + res = WriteF(stream, depth + 2, + rampmode, " ($U)\n", (WriteFU)amc->rampCount, + NULL); + if(res != ResOK) + return res; + + RING_FOR(node, &amc->genRing, nextNode) { + amcGen gen = RING_ELT(amcGen, amcRing, node); + res = amcGenDescribe(gen, stream, depth + 2); + if(res != ResOK) + return res; + } + + if (0) { + /* SegDescribes */ + RING_FOR(node, &pool->segRing, nextNode) { + Seg seg = RING_ELT(Seg, poolRing, node); + res = SegDescribe(seg, stream, depth + 2); + if(res != ResOK) + return res; + } + } + + return ResOK; +} + + +/* AMCZPoolClass -- the class definition */ + +DEFINE_CLASS(Pool, AMCZPool, klass) +{ + INHERIT_CLASS(klass, AMCZPool, AbstractCollectPool); + klass->instClassStruct.describe = AMCDescribe; + klass->instClassStruct.finish = AMCFinish; + klass->size = sizeof(AMCStruct); + klass->attr |= AttrMOVINGGC; + klass->varargs = AMCVarargs; + klass->init = AMCZInit; + klass->bufferFill = AMCBufferFill; + klass->rampBegin = AMCRampBegin; + klass->rampEnd = AMCRampEnd; + klass->segPoolGen = amcSegPoolGen; + klass->bufferClass = amcBufClassGet; + klass->totalSize = AMCTotalSize; + klass->freeSize = AMCFreeSize; + klass->addrObject = AMCAddrObject; + AVERT(PoolClass, klass); +} + + +/* AMCPoolClass -- the class definition */ + +DEFINE_CLASS(Pool, AMCPool, klass) +{ + INHERIT_CLASS(klass, AMCPool, AMCZPool); + klass->init = AMCInit; + AVERT(PoolClass, klass); +} + + +/* mps_class_amc -- return the pool class descriptor to the client */ + +mps_pool_class_t mps_class_amc(void) +{ + return (mps_pool_class_t)CLASS(AMCPool); +} + +/* mps_class_amcz -- return the pool class descriptor to the client */ + +mps_pool_class_t mps_class_amcz(void) +{ + return (mps_pool_class_t)CLASS(AMCZPool); +} + + +/* mps_amc_apply -- apply function to all objects in pool + * + * The iterator that is passed by the client is stored in a closure + * structure which is passed to a local iterator in order to ensure + * that any type conversion necessary between Addr and mps_addr_t + * happen. They are almost certainly the same on all platforms, but + * this is the correct way to do it. +*/ + +typedef struct mps_amc_apply_closure_s { + mps_amc_apply_stepper_t f; + void *p; + size_t s; +} mps_amc_apply_closure_s; + +static void mps_amc_apply_iter(Addr addr, Format format, Pool pool, + void *p, size_t s) +{ + mps_amc_apply_closure_s *closure = p; + /* Can't check addr */ + AVERT(Format, format); + AVERT(Pool, pool); + /* We could check that s is the sizeof *p, but it would be slow */ + UNUSED(format); + UNUSED(pool); + UNUSED(s); + (*closure->f)(addr, closure->p, closure->s); +} + +void mps_amc_apply(mps_pool_t mps_pool, + mps_amc_apply_stepper_t f, + void *p, size_t s) +{ + Pool pool = (Pool)mps_pool; + mps_amc_apply_closure_s closure_s; + Arena arena; + + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + ArenaEnter(arena); + AVERT(Pool, pool); + + closure_s.f = f; + closure_s.p = p; + closure_s.s = s; + amcWalkAll(pool, mps_amc_apply_iter, &closure_s, sizeof(closure_s)); + + ArenaLeave(arena); +} + + +/* AMCCheck -- check consistency of the AMC pool + * + * . + */ + +ATTRIBUTE_UNUSED +static Bool AMCCheck(AMC amc) +{ + CHECKS(AMC, amc); + CHECKC(AMCZPool, amc); + CHECKD(Pool, MustBeA(AbstractPool, amc)); + CHECKL(RankSetCheck(amc->rankSet)); + CHECKD_NOSIG(Ring, &amc->genRing); + CHECKL(BoolCheck(amc->gensBooted)); + if(amc->gensBooted) { + CHECKD(amcGen, amc->nursery); + CHECKL(amc->gen != NULL); + CHECKD(amcGen, amc->rampGen); + CHECKD(amcGen, amc->afterRampGen); + } + + CHECKL(amc->rampMode >= RampOUTSIDE); + CHECKL(amc->rampMode <= RampCOLLECTING); + + /* if OUTSIDE, count must be zero. */ + CHECKL((amc->rampCount == 0) || (amc->rampMode != RampOUTSIDE)); + /* if BEGIN or RAMPING, count must not be zero. */ + CHECKL((amc->rampCount != 0) || ((amc->rampMode != RampBEGIN) && + (amc->rampMode != RampRAMPING))); + + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolams.c b/mps/code/poolams.c new file mode 100644 index 00000000000..a76c5a86d8a --- /dev/null +++ b/mps/code/poolams.c @@ -0,0 +1,1882 @@ +/* poolams.c: AUTOMATIC MARK & SWEEP POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * + * .design: . + * + * + * TRANSGRESSSIONS + * + * .no-check.local: We have decided to omit checks in local functions of + * structure arguments that are simply passed down through the caller + * (as opposed to being constructed by the caller). + */ + +#include "poolams.h" +#include "dbgpool.h" +#include "mpm.h" +#include + +SRCID(poolams, "$Id$"); + + +#define AMSSig ((Sig)0x519A3599) /* SIGnature AMS */ +#define AMSSegSig ((Sig)0x519A3559) /* SIGnature AMS SeG */ + +static Bool amsSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet); +static void amsSegBufferEmpty(Seg seg, Buffer buffer); +static void amsSegBlacken(Seg seg, TraceSet traceSet); +static Res amsSegWhiten(Seg seg, Trace trace); +static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss); +static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO); +static void amsSegReclaim(Seg seg, Trace trace); +static void amsSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s); + + +/* AMSDebugStruct -- structure for a debug subclass */ + +typedef struct AMSDebugStruct { + AMSStruct amsStruct; /* AMS structure */ + PoolDebugMixinStruct debug; /* debug mixin */ +} AMSDebugStruct; + +typedef struct AMSDebugStruct *AMSDebug; + + +#define AMS2AMSDebug(ams) PARENT(AMSDebugStruct, amsStruct, ams) +#define AMSDebug2AMS(amsd) (&((amsd)->amsStruct)) + + + +/* AMSSegCheck -- check an AMS segment */ + +Bool AMSSegCheck(AMSSeg amsseg) +{ + Seg seg = MustBeA(Seg, amsseg); + Pool pool = SegPool(seg); + CHECKS(AMSSeg, amsseg); + CHECKD(GCSeg, &amsseg->gcSegStruct); + CHECKU(AMS, amsseg->ams); + CHECKL(AMSPool(amsseg->ams) == SegPool(seg)); + + CHECKL(amsseg->grains == PoolSizeGrains(pool, SegSize(seg))); + CHECKL(amsseg->grains > 0); + CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->bufferedGrains + + amsseg->oldGrains + amsseg->newGrains); + + CHECKL(BoolCheck(amsseg->allocTableInUse)); + if (!amsseg->allocTableInUse) + CHECKL(amsseg->firstFree <= amsseg->grains); + CHECKD_NOSIG(BT, amsseg->allocTable); + + if (SegWhite(seg) != TraceSetEMPTY) { + /* */ + CHECKL(TraceSetIsSingle(SegWhite(seg))); + CHECKL(amsseg->colourTablesInUse); + } + + CHECKL(BoolCheck(amsseg->marksChanged)); + CHECKL(BoolCheck(amsseg->ambiguousFixes)); + CHECKL(BoolCheck(amsseg->colourTablesInUse)); + CHECKD_NOSIG(BT, amsseg->nongreyTable); + CHECKD_NOSIG(BT, amsseg->nonwhiteTable); + + /* If tables are shared, they mustn't both be in use. */ + CHECKL(!(amsseg->ams->shareAllocTable + && amsseg->allocTableInUse + && amsseg->colourTablesInUse)); + + return TRUE; +} + + +/* AMSSegFreeWalk -- walk the free space in a segment */ + +void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockVisitor f, void *p) +{ + Pool pool; + Seg seg; + + AVERT(AMSSeg, amsseg); + pool = SegPool(AMSSeg2Seg(amsseg)); + seg = AMSSeg2Seg(amsseg); + + if (amsseg->freeGrains == 0) + return; + if (amsseg->allocTableInUse) { + Index base, limit, next; + + next = 0; + while (next < amsseg->grains) { + Bool found = BTFindLongResRange(&base, &limit, amsseg->allocTable, + next, amsseg->grains, 1); + if (!found) + break; + (*f)(PoolAddrOfIndex(SegBase(seg), pool, base), + PoolAddrOfIndex(SegBase(seg), pool, limit), pool, p); + next = limit + 1; + } + } else if (amsseg->firstFree < amsseg->grains) + (*f)(PoolAddrOfIndex(SegBase(seg), pool, amsseg->firstFree), + SegLimit(seg), pool, p); +} + + +/* AMSSegFreeCheck -- check the free space in a segment */ + +static void amsFreeBlockCheckStep(Addr base, Addr limit, Pool pool, void *p) +{ + UNUSED(p); + DebugPoolFreeCheck(pool, base, limit); +} + +void AMSSegFreeCheck(AMSSeg amsseg) +{ + Pool pool; + PoolDebugMixin debug; + + AVERT(AMSSeg, amsseg); + + if (amsseg->freeGrains == 0) + return; + + /* If it's not a debug class, don't bother walking. */ + pool = SegPool(AMSSeg2Seg(amsseg)); + AVERT(Pool, pool); + debug = Method(Pool, pool, debugMixin)(pool); + if (debug == NULL) + return; + + AMSSegFreeWalk(amsseg, amsFreeBlockCheckStep, NULL); +} + + +/* amsCreateTables -- create the tables for an AMS seg */ + +static Res amsCreateTables(AMS ams, BT *allocReturn, + BT *nongreyReturn, BT *nonwhiteReturn, + Arena arena, Count length) +{ + Res res; + BT allocTable, nongreyTable, nonwhiteTable; + + AVER(allocReturn != NULL); + AVER(nongreyReturn != NULL); + AVER(nonwhiteReturn != NULL); + AVERT(Arena, arena); + AVER(length > 0); + + res = BTCreate(&allocTable, arena, length); + if (res != ResOK) + goto failAlloc; + res = BTCreate(&nongreyTable, arena, length); + if (res != ResOK) + goto failGrey; + if (ams->shareAllocTable) + nonwhiteTable = allocTable; + else { + res = BTCreate(&nonwhiteTable, arena, length); + if (res != ResOK) + goto failWhite; + } + +#if defined(AVER_AND_CHECK_ALL) + /* Invalidate the colour tables in checking varieties. The algorithm + * is designed not to depend on the initial values of these tables, + * so by invalidating them we get some checking of this. + */ + BTResRange(nongreyTable, 0, length); + BTSetRange(nonwhiteTable, 0, length); +#endif + + *allocReturn = allocTable; + *nongreyReturn = nongreyTable; + *nonwhiteReturn = nonwhiteTable; + return ResOK; + +failWhite: + BTDestroy(nongreyTable, arena, length); +failGrey: + BTDestroy(allocTable, arena, length); +failAlloc: + return res; +} + + +/* amsDestroyTables -- destroy the tables for an AMS seg */ + +static void amsDestroyTables(AMS ams, BT allocTable, + BT nongreyTable, BT nonwhiteTable, + Arena arena, Count length) +{ + AVER(allocTable != NULL); + AVER(nongreyTable != NULL); + AVER(nonwhiteTable != NULL); + AVERT(Arena, arena); + AVER(length > 0); + + if (!ams->shareAllocTable) + BTDestroy(nonwhiteTable, arena, length); + BTDestroy(nongreyTable, arena, length); + BTDestroy(allocTable, arena, length); +} + + +/* AMSSegInit -- Init method for AMS segments */ + +static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + AMSSeg amsseg; + Res res; + Arena arena; + AMS ams; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, AMSSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + goto failNextMethod; + amsseg = CouldBeA(AMSSeg, seg); + + AVERT(Pool, pool); + ams = PoolAMS(pool); + AVERT(AMS, ams); + arena = PoolArena(pool); + /* no useful checks for base and size */ + + amsseg->grains = PoolSizeGrains(pool, size); + amsseg->freeGrains = amsseg->grains; + amsseg->bufferedGrains = (Count)0; + amsseg->newGrains = (Count)0; + amsseg->oldGrains = (Count)0; + amsseg->marksChanged = FALSE; /* */ + amsseg->ambiguousFixes = FALSE; + + res = amsCreateTables(ams, &amsseg->allocTable, + &amsseg->nongreyTable, &amsseg->nonwhiteTable, + arena, amsseg->grains); + if (res != ResOK) + goto failCreateTables; + + /* start off using firstFree, see */ + amsseg->allocTableInUse = FALSE; + amsseg->firstFree = 0; + amsseg->colourTablesInUse = FALSE; + amsseg->ams = ams; + SetClassOfPoly(seg, CLASS(AMSSeg)); + amsseg->sig = AMSSegSig; + AVERC(AMSSeg, amsseg); + + return ResOK; + +failCreateTables: + NextMethod(Inst, AMSSeg, finish)(MustBeA(Inst, seg)); +failNextMethod: + AVER(res != ResOK); + return res; +} + + +/* AMSSegFinish -- Finish method for AMS segments */ + +static void AMSSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + AMSSeg amsseg = MustBeA(AMSSeg, seg); + AMS ams = amsseg->ams; + Arena arena = PoolArena(AMSPool(ams)); + + AVERT(AMSSeg, amsseg); + AVER(!SegHasBuffer(seg)); + + /* keep the destructions in step with AMSSegInit failure cases */ + amsDestroyTables(ams, amsseg->allocTable, amsseg->nongreyTable, + amsseg->nonwhiteTable, arena, amsseg->grains); + + amsseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, AMSSeg, finish)(inst); +} + + +/* AMSSegMerge & AMSSegSplit -- AMSSeg split & merge methods + * + * .empty: segment merging and splitting is limited to simple cases + * where the high segment is empty. + * . + * + * .grain-align: segment merging and splitting is limited to cases + * where the join is aligned with the grain alignment + * . + * + * .alloc-early: Allocations are performed before calling the + * next method to simplify the fail cases. See + * + * + * .table-names: The names of local variables holding the new + * allocation and colour tables are chosen to have names which + * are derivable from the field names for tables in AMSSegStruct. + * (I.e. allocTable, nongreyTable, nonwhiteTable). This simplifies + * processing of all such tables by a macro. + */ + +static Res AMSSegMerge(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + Count loGrains, hiGrains, allGrains; + AMSSeg amsseg, amssegHi; + Pool pool; + Arena arena; + AMS ams; + BT allocTable, nongreyTable, nonwhiteTable; /* .table-names */ + Res res; + + AVERT(Seg, seg); + AVERT(Seg, segHi); + amsseg = Seg2AMSSeg(seg); + amssegHi = Seg2AMSSeg(segHi); + AVERT(AMSSeg, amsseg); + AVERT(AMSSeg, amssegHi); + /* other parameters are checked by next-method */ + pool = SegPool(seg); + arena = PoolArena(pool); + ams = PoolAMS(pool); + + loGrains = amsseg->grains; + hiGrains = amssegHi->grains; + allGrains = loGrains + hiGrains; + + /* checks for .grain-align */ + AVER(allGrains == PoolSizeGrains(pool, AddrOffset(base, limit))); + /* checks for .empty */ + AVER(amssegHi->freeGrains == hiGrains); + AVER(!amssegHi->marksChanged); + + /* .alloc-early */ + res = amsCreateTables(ams, &allocTable, &nongreyTable, &nonwhiteTable, + arena, allGrains); + if (res != ResOK) + goto failCreateTables; + + /* Merge the superclass fields via next-method call */ + res = NextMethod(Seg, AMSSeg, merge)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + /* Update fields of seg. Finish segHi. */ + +#define MERGE_TABLES(table, setHighRangeFn) \ + /* Implementation depends on .table-names */ \ + BEGIN \ + BTCopyRange(amsseg->table, (table), 0, loGrains); \ + setHighRangeFn((table), loGrains, allGrains); \ + BTDestroy(amsseg->table, arena, loGrains); \ + BTDestroy(amssegHi->table, arena, hiGrains); \ + amsseg->table = (table); \ + END + + MERGE_TABLES(allocTable, BTResRange); + MERGE_TABLES(nongreyTable, BTSetRange); + if (!ams->shareAllocTable) + MERGE_TABLES(nonwhiteTable, BTSetRange); + + amsseg->grains = allGrains; + amsseg->freeGrains = amsseg->freeGrains + amssegHi->freeGrains; + amsseg->bufferedGrains = amsseg->bufferedGrains + amssegHi->bufferedGrains; + amsseg->newGrains = amsseg->newGrains + amssegHi->newGrains; + amsseg->oldGrains = amsseg->oldGrains + amssegHi->oldGrains; + /* other fields in amsseg are unaffected */ + + amssegHi->sig = SigInvalid; + + AVERT(AMSSeg, amsseg); + PoolGenAccountForSegMerge(ams->pgen); + return ResOK; + +failSuper: + amsDestroyTables(ams, allocTable, nongreyTable, nonwhiteTable, + arena, allGrains); +failCreateTables: + AVERT(AMSSeg, amsseg); + AVERT(AMSSeg, amssegHi); + return res; +} + + +static Res AMSSegSplit(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + Count loGrains, hiGrains, allGrains; + AMSSeg amsseg, amssegHi; + Pool pool; + Arena arena; + AMS ams; + BT allocTableLo, nongreyTableLo, nonwhiteTableLo; /* .table-names */ + BT allocTableHi, nongreyTableHi, nonwhiteTableHi; /* .table-names */ + Res res; + + AVERT(Seg, seg); + AVER(segHi != NULL); /* can't check fully, it's not initialized */ + amsseg = Seg2AMSSeg(seg); + amssegHi = Seg2AMSSeg(segHi); + AVERT(AMSSeg, amsseg); + /* other parameters are checked by next-method */ + pool = SegPool(seg); + arena = PoolArena(pool); + ams = PoolAMS(pool); + + loGrains = PoolSizeGrains(pool, AddrOffset(base, mid)); + hiGrains = PoolSizeGrains(pool, AddrOffset(mid, limit)); + allGrains = loGrains + hiGrains; + + /* checks for .grain-align */ + AVER(allGrains == amsseg->grains); + /* checks for .empty */ + AVER(amsseg->freeGrains >= hiGrains); + if (amsseg->allocTableInUse) { + AVER(BTIsResRange(amsseg->allocTable, loGrains, allGrains)); + } else { + AVER(amsseg->firstFree <= loGrains); + } + + /* .alloc-early */ + res = amsCreateTables(ams, &allocTableLo, &nongreyTableLo, &nonwhiteTableLo, + arena, loGrains); + if (res != ResOK) + goto failCreateTablesLo; + res = amsCreateTables(ams, &allocTableHi, &nongreyTableHi, &nonwhiteTableHi, + arena, hiGrains); + if (res != ResOK) + goto failCreateTablesHi; + + /* Split the superclass fields via next-method call */ + res = NextMethod(Seg, AMSSeg, split)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + /* Update seg. Full initialization for segHi. */ + +#define SPLIT_TABLES(table, setHighRangeFn) \ + /* Implementation depends on .table-names */ \ + BEGIN \ + BTCopyRange(amsseg->table, table ## Lo, 0, loGrains); \ + setHighRangeFn(table ## Hi, 0, hiGrains); \ + BTDestroy(amsseg->table, arena, allGrains); \ + amsseg->table = table ## Lo; \ + amssegHi->table = table ## Hi; \ + END + + SPLIT_TABLES(nonwhiteTable, BTSetRange); + SPLIT_TABLES(nongreyTable, BTSetRange); + SPLIT_TABLES(allocTable, BTResRange); + + amsseg->grains = loGrains; + amssegHi->grains = hiGrains; + AVER(amsseg->freeGrains >= hiGrains); + amsseg->freeGrains -= hiGrains; + amssegHi->freeGrains = hiGrains; + amssegHi->bufferedGrains = (Count)0; + amssegHi->newGrains = (Count)0; + amssegHi->oldGrains = (Count)0; + amssegHi->marksChanged = FALSE; /* */ + amssegHi->ambiguousFixes = FALSE; + + /* start off using firstFree, see */ + amssegHi->allocTableInUse = FALSE; + amssegHi->firstFree = 0; + /* use colour tables if the segment is white */ + amssegHi->colourTablesInUse = (SegWhite(segHi) != TraceSetEMPTY); + amssegHi->ams = ams; + amssegHi->sig = AMSSegSig; + AVERT(AMSSeg, amsseg); + AVERT(AMSSeg, amssegHi); + PoolGenAccountForSegSplit(ams->pgen); + return ResOK; + +failSuper: + amsDestroyTables(ams, allocTableHi, nongreyTableHi, nonwhiteTableHi, + arena, hiGrains); +failCreateTablesHi: + amsDestroyTables(ams, allocTableLo, nongreyTableLo, nonwhiteTableLo, + arena, loGrains); +failCreateTablesLo: + AVERT(AMSSeg, amsseg); + return res; +} + + +/* AMSSegDescribe -- describe an AMS segment */ + +#define WRITE_BUFFER_LIMIT(i, accessor, code) \ + BEGIN \ + if (hasBuffer && \ + (i) == PoolIndexOfAddr(SegBase(seg), SegPool(seg), accessor(buffer))) \ + { \ + Res _res = WriteF(stream, 0, code, NULL); \ + if (_res != ResOK) return _res; \ + } \ + END + +static Res AMSSegDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + AMSSeg amsseg = CouldBeA(AMSSeg, inst); + Seg seg = CouldBeA(Seg, amsseg); + Res res; + Buffer buffer; + Bool hasBuffer; + Index i; + + if (!TESTC(AMSSeg, amsseg)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + /* Describe the superclass fields first via next-method call */ + res = NextMethod(Inst, AMSSeg, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + hasBuffer = SegBuffer(&buffer, seg); + + res = WriteF(stream, depth + 2, + "AMS $P\n", (WriteFP)amsseg->ams, + "grains $W\n", (WriteFW)amsseg->grains, + "freeGrains $W\n", (WriteFW)amsseg->freeGrains, + "buffferedGrains $W\n", (WriteFW)amsseg->bufferedGrains, + "newGrains $W\n", (WriteFW)amsseg->newGrains, + "oldGrains $W\n", (WriteFW)amsseg->oldGrains, + NULL); + if (res != ResOK) + return res; + if (amsseg->allocTableInUse) + res = WriteF(stream, depth, + "alloctable $P\n", (WriteFP)amsseg->allocTable, + NULL); + else + res = WriteF(stream, depth, + "firstFree $W\n", (WriteFW)amsseg->firstFree, + NULL); + if (res != ResOK) + return res; + res = WriteF(stream, depth, + "tables: nongrey $P, nonwhite $P\n", + (WriteFP)amsseg->nongreyTable, + (WriteFP)amsseg->nonwhiteTable, + "map:", + NULL); + if (res != ResOK) + return res; + + for (i=0; i < amsseg->grains; ++i) { + char c = 0; + + if (i % 64 == 0) { + res = WriteF(stream, 0, "\n", NULL); + if (res != ResOK) + return res; + res = WriteF(stream, depth, " ", NULL); + if (res != ResOK) + return res; + } + + WRITE_BUFFER_LIMIT(i, BufferBase, "["); + WRITE_BUFFER_LIMIT(i, BufferGetInit, "|"); + WRITE_BUFFER_LIMIT(i, BufferAlloc, ">"); + + if (AMS_ALLOCED(seg, i)) { + if (amsseg->colourTablesInUse) { + if (AMS_IS_INVALID_COLOUR(seg, i)) + c = '!'; + else if (AMS_IS_WHITE(seg, i)) + c = '-'; + else if (AMS_IS_GREY(seg, i)) + c = '+'; + else /* must be black */ + c = '*'; + } else + c = '.'; + } else + c = ' '; + res = WriteF(stream, 0, "$C", (WriteFC)c, NULL); + if (res != ResOK) + return res; + + WRITE_BUFFER_LIMIT(i+1, BufferScanLimit, "<"); + WRITE_BUFFER_LIMIT(i+1, BufferLimit, "]"); + } + + return ResOK; +} + + +/* AMSSegClass -- Class definition for AMS segments */ + +DEFINE_CLASS(Seg, AMSSeg, klass) +{ + INHERIT_CLASS(klass, AMSSeg, MutatorSeg); + klass->instClassStruct.describe = AMSSegDescribe; + klass->instClassStruct.finish = AMSSegFinish; + klass->size = sizeof(AMSSegStruct); + klass->init = AMSSegInit; + klass->bufferFill = amsSegBufferFill; + klass->bufferEmpty = amsSegBufferEmpty; + klass->merge = AMSSegMerge; + klass->split = AMSSegSplit; + klass->whiten = amsSegWhiten; + klass->blacken = amsSegBlacken; + klass->scan = amsSegScan; + klass->fix = amsSegFix; + klass->fixEmergency = amsSegFix; + klass->reclaim = amsSegReclaim; + klass->walk = amsSegWalk; + AVERT(SegClass, klass); +} + + +/* AMSSegSizePolicy + * + * Picks a segment size. This policy simply rounds the size + * up to the arena grain size. + */ +static Res AMSSegSizePolicy(Size *sizeReturn, + Pool pool, Size size, RankSet rankSet) +{ + Arena arena; + + AVER(sizeReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + AVERT(RankSet, rankSet); + + arena = PoolArena(pool); + + size = SizeArenaGrains(size, arena); + if (size == 0) { + /* overflow */ + return ResMEMORY; + } + *sizeReturn = size; + return ResOK; +} + + +/* AMSSegCreate -- create a single AMSSeg */ + +static Res AMSSegCreate(Seg *segReturn, Pool pool, Size size, + RankSet rankSet) +{ + Seg seg; + AMS ams; + Res res; + Arena arena; + Size prefSize; + + AVER(segReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + AVERT(RankSet, rankSet); + + ams = PoolAMS(pool); + AVERT(AMS,ams); + arena = PoolArena(pool); + + res = ams->segSize(&prefSize, pool, size, rankSet); + if (res != ResOK) + goto failSize; + + res = PoolGenAlloc(&seg, ams->pgen, (*ams->segClass)(), prefSize, + argsNone); + if (res != ResOK) { /* try to allocate one that's just large enough */ + Size minSize = SizeArenaGrains(size, arena); + if (minSize == prefSize) + goto failSeg; + res = PoolGenAlloc(&seg, ams->pgen, (*ams->segClass)(), prefSize, + argsNone); + if (res != ResOK) + goto failSeg; + } + + /* see */ + if (rankSet != RankSetEMPTY) { + SegSetRankAndSummary(seg, rankSet, RefSetUNIV); + } else { + SegSetRankAndSummary(seg, rankSet, RefSetEMPTY); + } + DebugPoolFreeSplat(pool, SegBase(seg), SegLimit(seg)); + + AVERT(AMSSeg, Seg2AMSSeg(seg)); + + *segReturn = seg; + return ResOK; + +failSeg: +failSize: + return res; +} + + +/* AMSSegsDestroy -- destroy all the segments */ + +static void AMSSegsDestroy(AMS ams) +{ + Pool pool = AMSPool(ams); + Ring ring, node, next; /* for iterating over the segments */ + + ring = PoolSegRing(AMSPool(ams)); + RING_FOR(node, ring, next) { + Seg seg = SegOfPoolRing(node); + AMSSeg amsseg = Seg2AMSSeg(seg); + AVER(!SegHasBuffer(seg)); + AVERT(AMSSeg, amsseg); + AVER(amsseg->ams == ams); + AVER(amsseg->bufferedGrains == 0); + AMSSegFreeCheck(amsseg); + PoolGenFree(ams->pgen, seg, + PoolGrainsSize(pool, amsseg->freeGrains), + PoolGrainsSize(pool, amsseg->oldGrains), + PoolGrainsSize(pool, amsseg->newGrains), + FALSE); + } +} + + +/* AMSVarargs -- decode obsolete varargs */ + +static void AMSVarargs(ArgStruct args[MPS_ARGS_MAX - 1], va_list varargs) +{ + args[0].key = MPS_KEY_FORMAT; + args[0].val.format = va_arg(varargs, Format); + args[1].key = MPS_KEY_CHAIN; + args[1].val.chain = va_arg(varargs, Chain); + args[2].key = MPS_KEY_AMS_SUPPORT_AMBIGUOUS; + args[2].val.b = va_arg(varargs, Bool); + args[3].key = MPS_KEY_ARGS_END; + AVER(MPS_ARGS_MAX - 1 > 3); + AVERT(ArgList, args); +} + +static void AMSDebugVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_POOL_DEBUG_OPTIONS; + args[0].val.pool_debug_options = va_arg(varargs, mps_pool_debug_option_s *); + AMSVarargs(args + 1, varargs); +} + + +/* AMSInit -- the pool class initialization method + * + * Takes one additional argument: the format of the objects + * allocated in the pool. . + */ + +ARG_DEFINE_KEY(AMS_SUPPORT_AMBIGUOUS, Bool); + +static Res AMSInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Res res; + Chain chain; + Bool supportAmbiguous = AMS_SUPPORT_AMBIGUOUS_DEFAULT; + unsigned gen = AMS_GEN_DEFAULT; + ArgStruct arg; + AMS ams; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + if (ArgPick(&arg, args, MPS_KEY_CHAIN)) + chain = arg.val.chain; + else { + chain = ArenaGlobals(arena)->defaultChain; + gen = 1; /* avoid the nursery of the default chain by default */ + } + if (ArgPick(&arg, args, MPS_KEY_GEN)) + gen = arg.val.u; + if (ArgPick(&arg, args, MPS_KEY_AMS_SUPPORT_AMBIGUOUS)) + supportAmbiguous = arg.val.b; + + AVERT(Chain, chain); + AVER(gen <= ChainGens(chain)); + AVER(chain->arena == arena); + + res = NextMethod(Pool, AMSPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + ams = CouldBeA(AMSPool, pool); + + /* Ensure a format was supplied in the argument list. */ + AVER(pool->format != NULL); + pool->alignment = pool->format->alignment; + pool->alignShift = SizeLog2(pool->alignment); + /* .ambiguous.noshare: If the pool is required to support ambiguous */ + /* references, the alloc and white tables cannot be shared. */ + ams->shareAllocTable = !supportAmbiguous; + ams->pgen = NULL; + + /* The next four might be overridden by a subclass. */ + ams->segSize = AMSSegSizePolicy; + ams->segsDestroy = AMSSegsDestroy; + ams->segClass = AMSSegClassGet; + + SetClassOfPoly(pool, CLASS(AMSPool)); + ams->sig = AMSSig; + AVERC(AMS, ams); + + res = PoolGenInit(&ams->pgenStruct, ChainGen(chain, gen), pool); + if (res != ResOK) + goto failGenInit; + ams->pgen = &ams->pgenStruct; + + EVENT2(PoolInitAMS, pool, pool->format); + + return ResOK; + +failGenInit: + NextMethod(Inst, AMSPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* AMSFinish -- the pool class finishing method + * + * Destroys all the segs in the pool. Can't invalidate the AMS until + * we've destroyed all the segments, as it may be checked. + */ +void AMSFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + AMS ams = MustBeA(AMSPool, pool); + + AVERT(AMS, ams); + + ams->segsDestroy(ams); + /* can't invalidate the AMS until we've destroyed all the segs */ + ams->sig = SigInvalid; + PoolGenFinish(ams->pgen); + ams->pgen = NULL; + + NextMethod(Inst, AMSPool, finish)(inst); +} + + +/* amsSegBufferFill -- try filling buffer from segment */ + +static Bool amsSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet) +{ + Index baseIndex, limitIndex; + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + Count requestedGrains, segGrains, allocatedGrains; + Addr segBase, base, limit; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + AVER(size > 0); + AVERT(RankSet, rankSet); + + requestedGrains = PoolSizeGrains(pool, size); + if (amsseg->freeGrains < requestedGrains) + /* Not enough space to satisfy the request. */ + return FALSE; + + if (SegHasBuffer(seg)) + /* Don't bother trying to allocate from a buffered segment */ + return FALSE; + + if (RefSetUnion(SegWhite(seg), SegGrey(seg)) != TraceSetEMPTY) + /* Can't use a white or grey segment, see */ + return FALSE; + + if (rankSet != SegRankSet(seg)) + /* Can't satisfy required rank set. */ + return FALSE; + + segGrains = PoolSizeGrains(pool, SegSize(seg)); + if (amsseg->freeGrains == segGrains) { + /* Whole segment is free: no need for a search. */ + baseIndex = 0; + limitIndex = segGrains; + goto found; + } + + /* We don't place buffers on white segments, so no need to adjust colour. */ + AVER(!amsseg->colourTablesInUse); + + if (amsseg->allocTableInUse) { + if (!BTFindLongResRange(&baseIndex, &limitIndex, amsseg->allocTable, + 0, segGrains, requestedGrains)) + return FALSE; + } else { + if (amsseg->firstFree > segGrains - requestedGrains) + return FALSE; + baseIndex = amsseg->firstFree; + limitIndex = segGrains; + } + +found: + AVER(baseIndex < limitIndex); + if (amsseg->allocTableInUse) { + BTSetRange(amsseg->allocTable, baseIndex, limitIndex); + } else { + amsseg->firstFree = limitIndex; + } + allocatedGrains = limitIndex - baseIndex; + AVER(requestedGrains <= allocatedGrains); + AVER(amsseg->freeGrains >= allocatedGrains); + amsseg->freeGrains -= allocatedGrains; + amsseg->bufferedGrains += allocatedGrains; + + segBase = SegBase(seg); + base = PoolAddrOfIndex(segBase, pool, baseIndex); + limit = PoolAddrOfIndex(segBase, pool, limitIndex); + PoolGenAccountForFill(PoolSegPoolGen(pool, seg), AddrOffset(base, limit)); + DebugPoolFreeCheck(pool, base, limit); + + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* AMSBufferFill -- the pool class buffer fill method + * + * Iterates over the segments looking for space. See + * . + */ +static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + Res res; + Ring node, nextNode; + RankSet rankSet; + Seg seg; + Bool b; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERC(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(size > 0); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + /* Check that we're not in the grey mutator phase */ + /* . */ + AVER(PoolArena(pool)->busyTraces == PoolArena(pool)->flippedTraces); + + /* */ + rankSet = BufferRankSet(buffer); + RING_FOR(node, &pool->segRing, nextNode) { + seg = SegOfPoolRing(node); + if (SegBufferFill(baseReturn, limitReturn, seg, size, rankSet)) + return ResOK; + } + + /* No segment had enough space, so make a new one. */ + res = AMSSegCreate(&seg, pool, size, BufferRankSet(buffer)); + if (res != ResOK) + return res; + b = SegBufferFill(baseReturn, limitReturn, seg, size, rankSet); + AVER(b); + return ResOK; +} + + +/* amsSegBufferEmpty -- empty buffer to segment + * + * Frees the unused part of the buffer. The colour of the area doesn't + * need to be changed. . + */ +static void amsSegBufferEmpty(Seg seg, Buffer buffer) +{ + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + Addr segBase, bufferBase, init, limit; + Index initIndex, limitIndex; + Count usedGrains, unusedGrains; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + segBase = SegBase(seg); + bufferBase = BufferBase(buffer); + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(segBase <= bufferBase); + AVER(bufferBase <= init); + AVER(init <= limit); + AVER(limit <= SegLimit(seg)); + + initIndex = PoolIndexOfAddr(segBase, pool, init); + limitIndex = PoolIndexOfAddr(segBase, pool, limit); + + if (initIndex < limitIndex) { + AMS ams = MustBeA(AMSPool, pool); + + /* Tripped allocations might have scribbled on it, need to splat again. */ + DebugPoolFreeSplat(pool, init, limit); + + if (amsseg->allocTableInUse) { + /* check that it's allocated */ + AVER(BTIsSetRange(amsseg->allocTable, initIndex, limitIndex)); + BTResRange(amsseg->allocTable, initIndex, limitIndex); + } else { + /* check that it's allocated */ + AVER(limitIndex <= amsseg->firstFree); + if (limitIndex == amsseg->firstFree) /* is it at the end? */ { + amsseg->firstFree = initIndex; + } else if (ams->shareAllocTable && amsseg->colourTablesInUse) { + /* The nonwhiteTable is shared with allocTable and in use, so we + * mustn't start using allocTable. In this case we know: 1. the + * segment has been condemned (because colour tables are turned on + * in amsSegWhiten); 2. the segment has not yet been reclaimed + * (because colour tables are turned off in amsSegReclaim); 3. the + * unused portion of the buffer is black (see amsSegWhiten). So we + * need to whiten the unused portion of the buffer. The allocTable + * will be turned back on (if necessary) in amsSegReclaim, when we + * know that the nonwhite grains are exactly the allocated grains. + */ + } else { + /* start using allocTable */ + amsseg->allocTableInUse = TRUE; + BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); + if (amsseg->firstFree < amsseg->grains) + BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); + BTResRange(amsseg->allocTable, initIndex, limitIndex); + } + } + + if (amsseg->colourTablesInUse) + AMS_RANGE_WHITEN(seg, initIndex, limitIndex); + } + + unusedGrains = limitIndex - initIndex; + AVER(unusedGrains <= amsseg->bufferedGrains); + usedGrains = amsseg->bufferedGrains - unusedGrains; + amsseg->freeGrains += unusedGrains; + amsseg->bufferedGrains = 0; + amsseg->newGrains += usedGrains; + + PoolGenAccountForEmpty(PoolSegPoolGen(pool, seg), + PoolGrainsSize(pool, usedGrains), + PoolGrainsSize(pool, unusedGrains), FALSE); +} + + +/* amsSegPoolGen -- get pool generation for an AMS segment */ + +static PoolGen amsSegPoolGen(Pool pool, Seg seg) +{ + AMS ams = MustBeA(AMSPool, pool); + AVERT(Seg, seg); + return ams->pgen; +} + + +/* amsSegRangeWhiten -- Condemn a part of an AMS segment + * Allow calling it with base = limit, to simplify the callers. + */ +static void amsSegRangeWhiten(Seg seg, Index base, Index limit) +{ + if (base != limit) { + AMSSeg amsseg = Seg2AMSSeg(seg); + + AVER(base < limit); + AVER(limit <= amsseg->grains); + + AMS_RANGE_WHITEN(seg, base, limit); + } +} + + +/* amsSegWhiten -- the pool class segment condemning method */ + +static Res amsSegWhiten(Seg seg, Trace trace) +{ + Buffer buffer; /* the seg's buffer, if it has one */ + Count agedGrains, uncondemnedGrains; + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + + AVERT(Trace, trace); + + /* */ + AVER(SegWhite(seg) == TraceSetEMPTY); + AVER(!amsseg->colourTablesInUse); + + amsseg->colourTablesInUse = TRUE; + + /* Init allocTable, if necessary. */ + if (!amsseg->allocTableInUse) { + if (0 < amsseg->firstFree) + BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); + if (amsseg->firstFree < amsseg->grains) + BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); + } + + /* Start using allocTable as the white table, if so configured. */ + if (amsseg->ams->shareAllocTable) { + if (amsseg->allocTableInUse) { + /* During the collection, it can't use allocTable for AMS_ALLOCED, so */ + /* make it use firstFree. */ + amsseg->allocTableInUse = FALSE; + /* Could find a better value for firstFree, but probably not worth it. */ + amsseg->firstFree = amsseg->grains; + } + } else { /* Otherwise, use it as alloc table. */ + amsseg->allocTableInUse = TRUE; + } + + if (SegBuffer(&buffer, seg)) { /* */ + Index scanLimitIndex, limitIndex; + scanLimitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferScanLimit(buffer)); + limitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferLimit(buffer)); + + amsSegRangeWhiten(seg, 0, scanLimitIndex); + if (scanLimitIndex < limitIndex) + AMS_RANGE_BLACKEN(seg, scanLimitIndex, limitIndex); + amsSegRangeWhiten(seg, limitIndex, amsseg->grains); + /* We didn't condemn the buffer, subtract it from the count. */ + uncondemnedGrains = limitIndex - scanLimitIndex; + } else { /* condemn whole seg */ + amsSegRangeWhiten(seg, 0, amsseg->grains); + uncondemnedGrains = (Count)0; + } + + /* The unused part of the buffer remains buffered: the rest becomes old. */ + AVER(amsseg->bufferedGrains >= uncondemnedGrains); + agedGrains = amsseg->bufferedGrains - uncondemnedGrains; + PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains), + PoolGrainsSize(pool, amsseg->newGrains), FALSE); + amsseg->oldGrains += agedGrains + amsseg->newGrains; + amsseg->bufferedGrains = uncondemnedGrains; + amsseg->newGrains = 0; + amsseg->marksChanged = FALSE; /* */ + amsseg->ambiguousFixes = FALSE; + + if (amsseg->oldGrains > 0) { + GenDescCondemned(pgen->gen, trace, + PoolGrainsSize(pool, amsseg->oldGrains)); + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + } else { + amsseg->colourTablesInUse = FALSE; + } + + return ResOK; +} + + +/* AMSObjectFunction is the type of the method that an */ +/* amsIterate applies to each object in a segment. */ +typedef Res (*AMSObjectFunction)( + /* the segment */ Seg seg, + /* the object grain index */ Index i, + /* the address of the object */Addr p, + /* " " after the object */Addr next, + /* the iteration closure */ void *closure); + +#define AMSObjectFunctionCheck(f) \ + ((f) != NULL) /* that's the best we can do */ + + +/* semSegIterate -- applies a function to each object in a segment + * + * semSegIterate(seg, f, closure) applies f to all the objects in the + * segment. It skips the buffer, if any (from BufferScanLimit to + * BufferLimit). */ + +static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure) +{ + Res res; + Pool pool; + AMSSeg amsseg; + Format format; + Align alignment; + Index i; + Addr p, next, limit; + Buffer buffer; + Bool hasBuffer; + + AVERT(Seg, seg); + AVERT(AMSObjectFunction, f); + /* Can't check closure */ + + amsseg = Seg2AMSSeg(seg); + AVERT(AMSSeg, amsseg); + pool = SegPool(seg); + AVERT(Pool, pool); + format = pool->format; + AVERT(Format, format); + alignment = PoolAlignment(pool); + + /* If we're using the alloc table as a white table, we can't use it to */ + /* determine where there are objects. */ + AVER(!amsseg->ams->shareAllocTable || !amsseg->colourTablesInUse); + + p = SegBase(seg); + limit = SegLimit(seg); + hasBuffer = SegBuffer(&buffer, seg); + + while (p < limit) { /* loop over the objects in the segment */ + if (hasBuffer && p == BufferScanLimit(buffer) && p != BufferLimit(buffer)) { + /* skip buffer */ + next = BufferLimit(buffer); + AVER(AddrIsAligned(next, alignment)); + } else { + AVER(!hasBuffer + || (p < BufferScanLimit(buffer)) + || (p >= BufferLimit(buffer))); /* not in the buffer */ + + i = PoolIndexOfAddr(SegBase(seg), pool, p); + if (!AMS_ALLOCED(seg, i)) { /* no object here */ + if (amsseg->allocTableInUse) { + Index dummy, nextIndex; + Bool more; + + /* Find out how large the free block is. */ + more = BTFindLongResRange(&dummy, &nextIndex, amsseg->allocTable, + i, amsseg->grains, 1); + AVER(more); + AVER(dummy == i); + next = PoolAddrOfIndex(SegBase(seg), pool, nextIndex); + } else { + /* If there's no allocTable, this is the free block at the end. */ + next = limit; + } + } else { /* there is an object here */ + if (format->skip != NULL) { + next = (*format->skip)(AddrAdd(p, format->headerSize)); + next = AddrSub(next, format->headerSize); + } else { + next = AddrAdd(p, alignment); + } + AVER(AddrIsAligned(next, alignment)); + res = (*f)(seg, i, p, next, closure); + if (res != ResOK) + return res; + } + } + AVER(next > p); /* make sure we make progress */ + p = next; + } + AVER(p == limit); + return ResOK; +} + + +/* amsScanObject -- scan a single object + * + * This is the object function passed to semSegIterate by amsSegScan. */ + +struct amsScanClosureStruct { + ScanState ss; + Bool scanAllObjects; +}; + +typedef struct amsScanClosureStruct *amsScanClosure; + +static Res amsScanObject(Seg seg, Index i, Addr p, Addr next, void *clos) +{ + amsScanClosure closure; + AMSSeg amsseg; + Format format; + Res res; + + amsseg = Seg2AMSSeg(seg); + /* seg & amsseg have already been checked, in semSegIterate. */ + AVER(i < amsseg->grains); + AVER(p != 0); + AVER(p < next); + AVER(clos != NULL); + closure = (amsScanClosure)clos; + AVERT(ScanState, closure->ss); + AVERT(Bool, closure->scanAllObjects); + + format = AMSPool(amsseg->ams)->format; + AVERT(Format, format); + + /* @@@@ This isn't quite right for multiple traces. */ + if (closure->scanAllObjects || AMS_IS_GREY(seg, i)) { + res = TraceScanFormat(closure->ss, + AddrAdd(p, format->headerSize), + AddrAdd(next, format->headerSize)); + if (res != ResOK) + return res; + if (!closure->scanAllObjects) { + Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next); + AVER(!AMS_IS_INVALID_COLOUR(seg, i)); + AMS_GREY_BLACKEN(seg, i); + if (i+1 < j) + AMS_RANGE_WHITE_BLACKEN(seg, i+1, j); + } + } + + return ResOK; +} + + +/* amsSegScan -- the segment scanning method + * + * + */ +static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + Res res; + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + AMS ams = MustBeA(AMSPool, pool); + Arena arena = PoolArena(pool); + struct amsScanClosureStruct closureStruct; + Format format; + Align alignment; + + AVER(totalReturn != NULL); + AVERT(ScanState, ss); + + /* Check that we're not in the grey mutator phase */ + /* . */ + AVER(TraceSetSub(ss->traces, arena->flippedTraces)); + + closureStruct.scanAllObjects = + (TraceSetDiff(ss->traces, SegWhite(seg)) != TraceSetEMPTY); + closureStruct.ss = ss; + /* @@@@ This isn't quite right for multiple traces. */ + if (closureStruct.scanAllObjects) { + /* The whole seg (except the buffer) is grey for some trace. */ + res = semSegIterate(seg, amsScanObject, &closureStruct); + if (res != ResOK) { + *totalReturn = FALSE; + return res; + } + *totalReturn = TRUE; + } else { + AVER(amsseg->marksChanged); /* something must have changed */ + AVER(amsseg->colourTablesInUse); + format = pool->format; + AVERT(Format, format); + alignment = PoolAlignment(AMSPool(ams)); + do { /* */ + amsseg->marksChanged = FALSE; /* */ + /* */ + if (amsseg->ambiguousFixes) { + res = semSegIterate(seg, amsScanObject, &closureStruct); + if (res != ResOK) { + /* */ + amsseg->marksChanged = TRUE; + *totalReturn = FALSE; + return res; + } + } else { + Index i, j = 0; + Addr p, next; + + while(j < amsseg->grains + && AMSFindGrey(&i, &j, seg, j, amsseg->grains)) { + Addr clientP, clientNext; + AVER(!AMS_IS_INVALID_COLOUR(seg, i)); + p = PoolAddrOfIndex(SegBase(seg), pool, i); + clientP = AddrAdd(p, format->headerSize); + if (format->skip != NULL) { + clientNext = (*format->skip)(clientP); + next = AddrSub(clientNext, format->headerSize); + } else { + clientNext = AddrAdd(clientP, alignment); + next = AddrAdd(p, alignment); + } + j = PoolIndexOfAddr(SegBase(seg), pool, next); + res = TraceScanFormat(ss, clientP, clientNext); + if (res != ResOK) { + /* */ + amsseg->marksChanged = TRUE; + *totalReturn = FALSE; + return res; + } + /* Check that there haven't been any ambiguous fixes during the */ + /* scan, because AMSFindGrey won't work otherwise. */ + AVER_CRITICAL(!amsseg->ambiguousFixes); + AMS_GREY_BLACKEN(seg, i); + if (i+1 < j) + AMS_RANGE_WHITE_BLACKEN(seg, i+1, j); + } + } + } while(amsseg->marksChanged); + *totalReturn = FALSE; + } + + return ResOK; +} + + +/* amsSegFix -- the segment fixing method */ + +static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO) +{ + AMSSeg amsseg = MustBeA_CRITICAL(AMSSeg, seg); + Pool pool = SegPool(seg); + Index i; /* the index of the fixed grain */ + Addr base; + Ref clientRef; + Format format; + + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(refIO != NULL); + + format = pool->format; + AVERT_CRITICAL(Format, format); + + amsseg = Seg2AMSSeg(seg); + AVERT_CRITICAL(AMSSeg, amsseg); + /* It's a white seg, so it must have colour tables. */ + AVER_CRITICAL(amsseg->colourTablesInUse); + + /* @@@@ We should check that we're not in the grey mutator phase */ + /* , but there's no way of */ + /* doing that here (this can be called from RootScan, during flip). */ + + clientRef = *refIO; + AVER_CRITICAL(SegBase(seg) <= clientRef); + AVER_CRITICAL(clientRef < SegLimit(seg)); /* see .ref-limit */ + base = AddrSub((Addr)clientRef, format->headerSize); + + /* Not a real reference if out of bounds. This can happen if an + ambiguous reference is closer to the base of the segment than the + header size. */ + if (base < SegBase(seg)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + /* Not a real reference if unaligned. */ + if (!AddrIsAligned(base, PoolAlignment(pool))) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + i = PoolIndexOfAddr(SegBase(seg), pool, base); + AVER_CRITICAL(i < amsseg->grains); + AVER_CRITICAL(!AMS_IS_INVALID_COLOUR(seg, i)); + + /* Not a real reference if unallocated. */ + if (!AMS_ALLOCED(seg, i)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + switch (ss->rank) { + case RankAMBIG: + if (PoolAMS(pool)->shareAllocTable) + /* In this state, the pool doesn't support ambiguous references (see */ + /* .ambiguous.noshare), so this is not a reference. */ + break; + amsseg->ambiguousFixes = TRUE; + /* falls through */ + case RankEXACT: + case RankFINAL: + case RankWEAK: + if (AMS_IS_WHITE(seg, i)) { + ss->wasMarked = FALSE; /* */ + if (ss->rank == RankWEAK) { /* then splat the reference */ + *refIO = (Ref)0; + } else { + STATISTIC(++ss->preservedInPlaceCount); /* Size updated on reclaim */ + if (SegRankSet(seg) == RankSetEMPTY && ss->rank != RankAMBIG) { + /* */ + Addr clientNext, next; + + ShieldExpose(PoolArena(pool), seg); + clientNext = (*pool->format->skip)(clientRef); + ShieldCover(PoolArena(pool), seg); + next = AddrSub(clientNext, format->headerSize); + /* Part of the object might be grey, because of ambiguous */ + /* fixes, but that's OK, because scan will ignore that. */ + AMS_RANGE_WHITE_BLACKEN(seg, i, PoolIndexOfAddr(SegBase(seg), pool, next)); + } else { /* turn it grey */ + AMS_WHITE_GREYEN(seg, i); + SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces)); + /* mark it for scanning - */ + amsseg->marksChanged = TRUE; + } + } + } + break; + default: + NOTREACHED; + } + + return ResOK; +} + + +/* amsSegBlacken -- the segment blackening method + * + * Turn all grey objects black. */ + +static Res amsSegBlackenObject(Seg seg, Index i, Addr p, Addr next, void *clos) +{ + UNUSED(p); + AVER(clos == UNUSED_POINTER); + /* Do what amsScanObject does, minus the scanning. */ + if (AMS_IS_GREY(seg, i)) { + Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next); + AVER(!AMS_IS_INVALID_COLOUR(seg, i)); + AMS_GREY_BLACKEN(seg, i); + if (i+1 < j) + AMS_RANGE_BLACKEN(seg, i+1, j); + } + return ResOK; +} + +static void amsSegBlacken(Seg seg, TraceSet traceSet) +{ + Res res; + + AVERT(TraceSet, traceSet); + AVERT(Seg, seg); + + /* If it's white for any of these traces, turn grey to black without scanning. */ + if (TraceSetInter(traceSet, SegWhite(seg)) != TraceSetEMPTY) { + AMSSeg amsseg = Seg2AMSSeg(seg); + AVERT(AMSSeg, amsseg); + AVER(amsseg->marksChanged); /* there must be something grey */ + amsseg->marksChanged = FALSE; + res = semSegIterate(seg, amsSegBlackenObject, UNUSED_POINTER); + AVER(res == ResOK); + } +} + + +/* amsSegReclaim -- the segment reclamation method */ + +static void amsSegReclaim(Seg seg, Trace trace) +{ + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + Count nowFree, grains, reclaimedGrains; + Size preservedInPlaceSize; + PoolDebugMixin debug; + + AVERT(Trace, trace); + + /* It's a white seg, so it must have colour tables. */ + AVER(amsseg->colourTablesInUse); + AVER(!amsseg->marksChanged); /* there must be nothing grey */ + grains = amsseg->grains; + + /* Loop over all white blocks and splat them, if it's a debug class. */ + debug = Method(Pool, pool, debugMixin)(pool); + if (debug != NULL) { + Index i, j = 0; + + while(j < grains && AMS_FIND_WHITE_RANGE(&i, &j, seg, j, grains)) { + AVER(!AMS_IS_INVALID_COLOUR(seg, i)); + DebugPoolFreeSplat(pool, PoolAddrOfIndex(SegBase(seg), pool, i), + PoolAddrOfIndex(SegBase(seg), pool, j)); + ++j; /* we know next grain is not white */ + } + } + + nowFree = BTCountResRange(amsseg->nonwhiteTable, 0, grains); + + /* If the free space is all after firstFree, keep on using firstFree. */ + /* It could have a more complicated condition, but not worth the trouble. */ + if (!amsseg->allocTableInUse && amsseg->firstFree + nowFree == grains) { + AVER(amsseg->firstFree == grains + || BTIsResRange(amsseg->nonwhiteTable, + amsseg->firstFree, grains)); + } else { + if (amsseg->ams->shareAllocTable) { + /* Stop using allocTable as the white table. */ + amsseg->allocTableInUse = TRUE; + } else { + AVER(amsseg->allocTableInUse); + BTCopyRange(amsseg->nonwhiteTable, amsseg->allocTable, 0, grains); + } + } + + reclaimedGrains = nowFree - amsseg->freeGrains; + AVER(amsseg->oldGrains >= reclaimedGrains); + amsseg->oldGrains -= reclaimedGrains; + amsseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE); + STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains)); + /* preservedInPlaceCount is updated on fix */ + preservedInPlaceSize = PoolGrainsSize(pool, amsseg->oldGrains); + GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize); + + /* Ensure consistency of segment even if are just about to free it */ + amsseg->colourTablesInUse = FALSE; + SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); + + if (amsseg->freeGrains == grains && !SegHasBuffer(seg)) { + /* No survivors */ + AVER(amsseg->bufferedGrains == 0); + PoolGenFree(pgen, seg, + PoolGrainsSize(pool, amsseg->freeGrains), + PoolGrainsSize(pool, amsseg->oldGrains), + PoolGrainsSize(pool, amsseg->newGrains), + FALSE); + } +} + + +/* amsSegWalk -- walk formatted objects in AMC segment */ + +static void amsSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Pool pool = SegPool(seg); + Addr object, base, limit; + + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures and can't be checked */ + + base = SegBase(seg); + object = base; + limit = SegLimit(seg); + + while (object < limit) { + /* object is a slight misnomer because it might point to a free grain */ + Addr next; + Index i; + Buffer buffer; + + if (SegBuffer(&buffer, seg)) { + if (object == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) { + /* skip over buffered area */ + object = BufferLimit(buffer); + continue; + } + /* since we skip over the buffered area we are always */ + /* either before the buffer, or after it, never in it */ + AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object); + } + i = PoolIndexOfAddr(SegBase(seg), pool, object); + if (!AMS_ALLOCED(seg, i)) { + /* This grain is free */ + object = AddrAdd(object, PoolAlignment(pool)); + continue; + } + object = AddrAdd(object, format->headerSize); + next = format->skip(object); + next = AddrSub(next, format->headerSize); + AVER(AddrIsAligned(next, PoolAlignment(pool))); + if (!amsseg->colourTablesInUse || !AMS_IS_WHITE(seg, i)) + (*f)(object, pool->format, pool, p, s); + object = next; + } +} + + +/* AMSFreeWalk -- free block walking method of the pool class */ + +static void AMSFreeWalk(Pool pool, FreeBlockVisitor f, void *p) +{ + AMS ams; + Ring node, ring, nextNode; /* for iterating over the segments */ + + AVERT(Pool, pool); + ams = PoolAMS(pool); + AVERT(AMS, ams); + + ring = PoolSegRing(AMSPool(ams)); + RING_FOR(node, ring, nextNode) { + AMSSegFreeWalk(Seg2AMSSeg(SegOfPoolRing(node)), f, p); + } +} + + +/* AMSTotalSize -- total memory allocated from the arena */ + +static Size AMSTotalSize(Pool pool) +{ + AMS ams; + + AVERT(Pool, pool); + ams = PoolAMS(pool); + AVERT(AMS, ams); + + return ams->pgen->totalSize; +} + + +/* AMSFreeSize -- free memory (unused by client program) */ + +static Size AMSFreeSize(Pool pool) +{ + AMS ams; + + AVERT(Pool, pool); + ams = PoolAMS(pool); + AVERT(AMS, ams); + + return ams->pgen->freeSize; +} + + +/* AMSDescribe -- the pool class description method + * + * Iterates over the segments, describing all of them. + */ + +static Res AMSDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + AMS ams = CouldBeA(AMSPool, pool); + Ring ring, node, nextNode; + Res res; + + if (!TESTC(AMSPool, ams)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, AMSPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "segments: * black + grey - white . alloc ! bad\n" + "buffers: [ base < scan limit | init > alloc ] limit\n", + NULL); + if (res != ResOK) + return res; + + ring = PoolSegRing(AMSPool(ams)); + RING_FOR(node, ring, nextNode) { + res = SegDescribe(SegOfPoolRing(node), stream, depth + 2); + if (res != ResOK) + return res; + } + + return ResOK; +} + + +/* AMSPoolClass -- the class definition */ + +/* contains the type definition. Hence the use */ +/* of DEFINE_CLASS rather than DEFINE_POOL_CLASS */ + +DEFINE_CLASS(Pool, AMSPool, klass) +{ + INHERIT_CLASS(klass, AMSPool, AbstractCollectPool); + klass->instClassStruct.describe = AMSDescribe; + klass->instClassStruct.finish = AMSFinish; + klass->size = sizeof(AMSStruct); + klass->varargs = AMSVarargs; + klass->init = AMSInit; + klass->bufferClass = RankBufClassGet; + klass->bufferFill = AMSBufferFill; + klass->segPoolGen = amsSegPoolGen; + klass->freewalk = AMSFreeWalk; + klass->totalSize = AMSTotalSize; + klass->freeSize = AMSFreeSize; + AVERT(PoolClass, klass); +} + + +/* AMSDebugMixin - find debug mixin in class AMSDebug */ + +static PoolDebugMixin AMSDebugMixin(Pool pool) +{ + AMS ams; + + AVERT(Pool, pool); + ams = PoolAMS(pool); + AVERT(AMS, ams); + /* Can't check AMSDebug, because this is called during init */ + return &(AMS2AMSDebug(ams)->debug); +} + + +/* AMSDebugPoolClass -- the class definition for the debug version */ + +DEFINE_CLASS(Pool, AMSDebugPool, klass) +{ + INHERIT_CLASS(klass, AMSDebugPool, AMSPool); + PoolClassMixInDebug(klass); + klass->size = sizeof(AMSDebugStruct); + klass->varargs = AMSDebugVarargs; + klass->debugMixin = AMSDebugMixin; + AVERT(PoolClass, klass); +} + + +/* mps_class_ams -- return the AMS pool class descriptor */ + +mps_pool_class_t mps_class_ams(void) +{ + return (mps_pool_class_t)CLASS(AMSPool); +} + + +/* mps_class_ams_debug -- return the AMS (debug) pool class descriptor */ + +mps_pool_class_t mps_class_ams_debug(void) +{ + return (mps_pool_class_t)CLASS(AMSDebugPool); +} + + +/* AMSCheck -- the check method for an AMS */ + +Bool AMSCheck(AMS ams) +{ + CHECKS(AMS, ams); + CHECKC(AMSPool, ams); + CHECKD(Pool, AMSPool(ams)); + CHECKL(IsA(AMSPool, ams)); + CHECKL(PoolAlignment(AMSPool(ams)) == AMSPool(ams)->format->alignment); + if (ams->pgen != NULL) { + CHECKL(ams->pgen == &ams->pgenStruct); + CHECKD(PoolGen, ams->pgen); + } + CHECKL(FUNCHECK(ams->segSize)); + CHECKL(FUNCHECK(ams->segsDestroy)); + CHECKL(FUNCHECK(ams->segClass)); + + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolams.h b/mps/code/poolams.h new file mode 100644 index 00000000000..f27957b73bd --- /dev/null +++ b/mps/code/poolams.h @@ -0,0 +1,208 @@ +/* poolams.h: AUTOMATIC MARK & SWEEP POOL CLASS INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .purpose: Internal interface to AMS functionality. */ + +#ifndef poolams_h +#define poolams_h + +#include "mpmtypes.h" +#include "mpm.h" +#include "mpmst.h" +#include "ring.h" +#include "bt.h" +#include "mpscams.h" +#include + + +typedef struct AMSStruct *AMS; +typedef struct AMSSegStruct *AMSSeg; + + +/* AMSRingFunction is the type of the method to find the ring that */ +/* the AMS pool is allocating on. */ +typedef Ring (*AMSRingFunction)(AMS ams, RankSet rankSet, Size size); +/* AMSSegClassFunction is the type of the method to indicate */ +/* the segment class of an AMS pool. Returns a subclass of AMSSegClass. */ +/* The type is congruent with SegClassGet functions. */ +typedef SegClass (*AMSSegClassFunction)(void); +/* AMSSegsDestroyFunction is the type of the method to destroy all */ +/* segs of an AMS pool. */ +typedef void (*AMSSegsDestroyFunction)(AMS ams); +/* AMSSegSizePolicyFunction is the type of the method which picks */ +/* a segment size given an object size. */ +typedef Res (*AMSSegSizePolicyFunction)(Size *sizeReturn, + Pool pool, Size size, + RankSet rankSet); + + +typedef struct AMSStruct { + PoolStruct poolStruct; /* generic pool structure */ + PoolGenStruct pgenStruct; /* generation representing the pool */ + PoolGen pgen; /* NULL or pointer to pgenStruct field */ + Size size; /* total segment size of the pool */ + AMSSegSizePolicyFunction segSize; /* SegSize policy */ + AMSSegsDestroyFunction segsDestroy; + AMSSegClassFunction segClass;/* fn to get the class for segments */ + Bool shareAllocTable; /* the alloc table is also used as white table */ + Sig sig; /* design.mps.sig.field.end.outer */ +} AMSStruct; + + +typedef struct AMSSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + AMS ams; /* owning ams */ + Count grains; /* total grains */ + Count freeGrains; /* free grains */ + Count bufferedGrains; /* grains in buffers */ + Count newGrains; /* grains allocated since last collection */ + Count oldGrains; /* grains allocated prior to last collection */ + Bool allocTableInUse; /* allocTable is used */ + Index firstFree; /* 1st free grain, if allocTable is not used */ + BT allocTable; /* set if grain is allocated */ + /* */ + Bool marksChanged; /* seg has been marked since last scan */ + Bool ambiguousFixes; /* seg has been ambiguously marked since last scan */ + Bool colourTablesInUse;/* the colour tables are in use */ + BT nonwhiteTable; /* set if grain not white */ + BT nongreyTable; /* set if not first grain of grey object */ + Sig sig; /* design.mps.sig.field.end.outer */ +} AMSSegStruct; + + +/* macros to get between child and parent structures */ + +#define Seg2AMSSeg(seg) ((AMSSeg)(seg)) +#define AMSSeg2Seg(amsseg) ((Seg)(amsseg)) + +#define PoolAMS(pool) PARENT(AMSStruct, poolStruct, pool) +#define AMSPool(ams) (&(ams)->poolStruct) + + +/* colour ops */ + +#define AMS_IS_WHITE(seg, index) \ + (!BTGet(Seg2AMSSeg(seg)->nonwhiteTable, index)) + +#define AMS_IS_GREY(seg, index) \ + (!BTGet(Seg2AMSSeg(seg)->nongreyTable, index)) + +#define AMS_IS_BLACK(seg, index) \ + (!AMS_IS_GREY(seg, index) && !AMS_IS_WHITE(seg, index)) + +#define AMS_IS_INVALID_COLOUR(seg, index) \ + (AMS_IS_GREY(seg, index) && !AMS_IS_WHITE(seg, index)) + +#define AMS_WHITE_GREYEN(seg, index) \ + BEGIN \ + BTRes(Seg2AMSSeg(seg)->nongreyTable, index); \ + END + +#define AMS_GREY_BLACKEN(seg, index) \ + BEGIN \ + BTSet(Seg2AMSSeg(seg)->nongreyTable, index); \ + BTSet(Seg2AMSSeg(seg)->nonwhiteTable, index); \ + END + +#define AMS_WHITE_BLACKEN(seg, index) \ + BEGIN \ + BTSet(Seg2AMSSeg(seg)->nonwhiteTable, index); \ + END + +#define AMS_RANGE_WHITE_BLACKEN(seg, base, limit) \ + BEGIN \ + BTSetRange(Seg2AMSSeg(seg)->nonwhiteTable, base, limit); \ + END + +#define AMS_RANGE_BLACKEN(seg, base, limit) \ + BEGIN \ + BTSetRange(Seg2AMSSeg(seg)->nonwhiteTable, base, limit); \ + BTSetRange(Seg2AMSSeg(seg)->nongreyTable, base, limit); \ + END + +#define AMS_RANGE_WHITEN(seg, base, limit) \ + BEGIN \ + BTResRange(Seg2AMSSeg(seg)->nonwhiteTable, base, limit); \ + BTSetRange(Seg2AMSSeg(seg)->nongreyTable, base, limit); \ + END + +#define AMSFindGrey(pos, dummy, seg, base, limit) \ + BTFindShortResRange(pos, dummy, Seg2AMSSeg(seg)->nongreyTable, \ + base, limit, 1) + +#define AMSFindWhite(pos, dummy, seg, base, limit) \ + BTFindShortResRange(pos, dummy, Seg2AMSSeg(seg)->nonwhiteTable, \ + base, limit, 1) + +#define AMS_FIND_WHITE_RANGE(baseOut, limitOut, seg, base, limit) \ + BTFindLongResRange(baseOut, limitOut, Seg2AMSSeg(seg)->nonwhiteTable, \ + base, limit, 1) + +#define AMS_ALLOCED(seg, index) \ + (Seg2AMSSeg(seg)->allocTableInUse \ + ? BTGet(Seg2AMSSeg(seg)->allocTable, index) \ + : (Seg2AMSSeg(seg)->firstFree > (index))) + + +/* the rest */ + +extern Res AMSInitInternal(AMS ams, Arena arena, PoolClass klass, + Chain chain, unsigned gen, + Bool shareAllocTable, ArgList args); +extern void AMSFinish(Inst inst); +extern Bool AMSCheck(AMS ams); + +#define AMSChain(ams) ((ams)->chain) + +extern void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockVisitor f, void *p); + +extern void AMSSegFreeCheck(AMSSeg amsseg); + +extern Bool AMSSegCheck(AMSSeg seg); + + +/* class declarations */ + +typedef AMS AMSPool; +DECLARE_CLASS(Pool, AMSPool, AbstractCollectPool); + +typedef AMS AMSDebugPool; +DECLARE_CLASS(Pool, AMSDebugPool, AMSPool); + +DECLARE_CLASS(Seg, AMSSeg, MutatorSeg); + + +#endif /* poolams_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolawl.c b/mps/code/poolawl.c new file mode 100644 index 00000000000..77b8fb27bd3 --- /dev/null +++ b/mps/code/poolawl.c @@ -0,0 +1,1308 @@ +/* poolawl.c: AUTOMATIC WEAK LINKED POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * + * DESIGN + * + * .design: . This is Dylan-specific pool. + * + * + * ASSUMPTIONS (about when to scan single references on accesses) + * + * .assume.purpose: The purpose of scanning refs singly is to limit the + * amount of scanning of weak references which must be performed when + * the mutator hits a barrier. Weak references which are scanned at this + * time are not "weak splatted". Minimizing any loss of weak splats + * potentially reduces conservatism in the collector. + * + * .assume.noweak: It follows (from .assume.purpose) that there is no + * benefit from scanning single refs on barrier accesses for segments + * which don't contain any weak references. However, if a segment + * contains either all weak refs or a mixture of weak and non-weak + * references then there is a potential benefit. + * + * .assume.mixedrank: If a segment contains a mixture of references + * at different ranks (e.g. weak and strong references), there is + * no way to determine whether or not references at a rank other than + * the scan state rank will be scanned as a result of normal + * (non-barrier) scanning activity. (@@@@ This is a deficiency in MPS). + * Assume that such references will, in fact, be scanned at the + * incorrect rank. + * + * .assume.samerank: The pool doesn't support segments with mixed + * rank segments in any case (despite .assume.mixedrank). + * + * .assume.alltraceable: The pool assumes that all objects are entirely + * traceable. This must be documented elsewhere for the benefit of the + * client. + */ + +#include "mpscawl.h" +#include "mpm.h" +#include "locus.h" + +SRCID(poolawl, "$Id$"); + + +#define AWLSig ((Sig)0x519B7A37) /* SIGnature PooL AWL */ + +static Bool awlSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet); +static void awlSegBufferEmpty(Seg seg, Buffer buffer); +static Res awlSegAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context); +static Res awlSegWhiten(Seg seg, Trace trace); +static void awlSegGreyen(Seg seg, Trace trace); +static void awlSegBlacken(Seg seg, TraceSet traceSet); +static Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss); +static Res awlSegFix(Seg seg, ScanState ss, Ref *refIO); +static void awlSegReclaim(Seg seg, Trace trace); +static void awlSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s); + + +/* awlStat* -- Statistics gathering about instruction emulation + * + * To support change.dylan.2.0.160044. + */ + + +/* Per-segment statistics maintained between segment scans */ + +typedef struct awlStatSegStruct { + Count sameAccesses; /* accesses involving same address as last access */ + Addr lastAccess; /* the address of last access */ +} awlStatSegStruct, *awlStatSeg; + +/* Per-pool statistics updated at segment scans */ + +typedef struct awlStatTotalStruct { + Count goodScans; /* total times a segment scanned at proper rank */ + Count badScans; /* total times a segment scanned at improper rank */ + Count savedScans; /* total times an entire segment scan was avoided */ + Count savedAccesses; /* total single references leading to a saved scan */ + Count declined; /* number of declined single accesses */ +} awlStatTotalStruct, *awlStatTotal; + +/* the type of a function to find an object's dependent object */ + +typedef Addr (*FindDependentFunction)(Addr object); + +/* AWLStruct -- AWL pool structure + * + * + */ + +typedef struct AWLPoolStruct { + PoolStruct poolStruct; + PoolGenStruct pgenStruct; /* generation representing the pool */ + PoolGen pgen; /* NULL or pointer to pgenStruct */ + Count succAccesses; /* number of successive single accesses */ + FindDependentFunction findDependent; /* to find a dependent object */ + awlStatTotalStruct stats; + Sig sig; /* design.mps.sig.field.end.outer */ +} AWLPoolStruct, *AWL; + + +static Bool AWLCheck(AWL awl); + + +typedef AWL AWLPool; +#define AWLPoolCheck AWLCheck +DECLARE_CLASS(Pool, AWLPool, AbstractCollectPool); + + +/* AWLSegStruct -- AWL segment subclass + * + * Colour is represented as follows: + * Black: +alloc +mark +scanned + * White: +alloc -mark -scanned + * Grey: +alloc +mark -scanned + * Free: -alloc ?mark ?scanned + */ + +#define AWLSegSig ((Sig)0x519A3759) /* SIGnature AWL SeG */ + +/* */ +typedef struct AWLSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + BT mark; + BT scanned; + BT alloc; + Count grains; + Count freeGrains; /* free grains */ + Count bufferedGrains; /* grains in buffers */ + Count newGrains; /* grains allocated since last collection */ + Count oldGrains; /* grains allocated prior to last collection */ + Count singleAccesses; /* number of accesses processed singly */ + awlStatSegStruct stats; + Sig sig; /* design.mps.sig.field.end.outer */ +} AWLSegStruct, *AWLSeg; + +DECLARE_CLASS(Seg, AWLSeg, MutatorSeg); + +ATTRIBUTE_UNUSED +static Bool AWLSegCheck(AWLSeg awlseg) +{ + CHECKS(AWLSeg, awlseg); + CHECKD(GCSeg, &awlseg->gcSegStruct); + CHECKL(awlseg->mark != NULL); + CHECKL(awlseg->scanned != NULL); + CHECKL(awlseg->alloc != NULL); + CHECKL(awlseg->grains > 0); + CHECKL(awlseg->grains == awlseg->freeGrains + awlseg->bufferedGrains + + awlseg->newGrains + awlseg->oldGrains); + return TRUE; +} + + +/* Management of statistics for monitoring protection-driven accesses */ + +static void awlStatSegInit(AWLSeg awlseg) +{ + awlseg->stats.sameAccesses = 0; + awlseg->stats.lastAccess = NULL; +} + +static void awlStatTotalInit(AWL awl) +{ + awl->stats.goodScans = 0; + awl->stats.badScans = 0; + awl->stats.savedAccesses = 0; + awl->stats.savedScans = 0; + awl->stats.declined = 0; +} + + +/* AWLSegInit -- Init method for AWL segments */ + +ARG_DEFINE_KEY(awl_seg_rank_set, RankSet); +#define awlKeySegRankSet (&_mps_key_awl_seg_rank_set) + +static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + AWLSeg awlseg; + Arena arena; + RankSet rankSet; + Count bits; /* number of grains */ + Res res; + Size tableSize; + void *v; + ArgStruct arg; + + ArgRequire(&arg, args, awlKeySegRankSet); + rankSet = arg.val.u; + AVERT(RankSet, rankSet); + /* .assume.samerank */ + /* AWL only accepts two ranks */ + AVER(RankSetSingle(RankEXACT) == rankSet + || RankSetSingle(RankWEAK) == rankSet); + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, AWLSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + goto failSuperInit; + awlseg = CouldBeA(AWLSeg, seg); + + AVERT(Pool, pool); + arena = PoolArena(pool); + /* no useful checks for base and size */ + + bits = PoolSizeGrains(pool, size); + tableSize = BTSize(bits); + res = ControlAlloc(&v, arena, 3 * tableSize); + if (res != ResOK) + goto failControlAlloc; + awlseg->mark = v; + awlseg->scanned = PointerAdd(v, tableSize); + awlseg->alloc = PointerAdd(v, 2 * tableSize); + awlseg->grains = bits; + BTResRange(awlseg->mark, 0, bits); + BTResRange(awlseg->scanned, 0, bits); + BTResRange(awlseg->alloc, 0, bits); + SegSetRankAndSummary(seg, rankSet, RefSetUNIV); + awlseg->freeGrains = bits; + awlseg->bufferedGrains = (Count)0; + awlseg->newGrains = (Count)0; + awlseg->oldGrains = (Count)0; + awlseg->singleAccesses = 0; + awlStatSegInit(awlseg); + + SetClassOfPoly(seg, CLASS(AWLSeg)); + awlseg->sig = AWLSegSig; + AVERC(AWLSeg, awlseg); + + return ResOK; + +failControlAlloc: + NextMethod(Inst, AWLSeg, finish)(MustBeA(Inst, seg)); +failSuperInit: + AVER(res != ResOK); + return res; +} + + +/* AWLSegFinish -- Finish method for AWL segments */ + +static void AWLSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + Arena arena = PoolArena(pool); + Size tableSize; + Count segGrains; + + /* This is one of the few places where it is easy to check */ + /* awlseg->grains, so we do */ + segGrains = PoolSizeGrains(pool, SegSize(seg)); + AVER(segGrains == awlseg->grains); + tableSize = BTSize(segGrains); + ControlFree(arena, awlseg->mark, 3 * tableSize); + awlseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, AWLSeg, finish)(inst); +} + + +/* AWLSegClass -- Class definition for AWL segments */ + +DEFINE_CLASS(Seg, AWLSeg, klass) +{ + INHERIT_CLASS(klass, AWLSeg, MutatorSeg); + SegClassMixInNoSplitMerge(klass); /* no support for this (yet) */ + klass->instClassStruct.finish = AWLSegFinish; + klass->size = sizeof(AWLSegStruct); + klass->init = AWLSegInit; + klass->bufferFill = awlSegBufferFill; + klass->bufferEmpty = awlSegBufferEmpty; + klass->access = awlSegAccess; + klass->whiten = awlSegWhiten; + klass->greyen = awlSegGreyen; + klass->blacken = awlSegBlacken; + klass->scan = awlSegScan; + klass->fix = awlSegFix; + klass->fixEmergency = awlSegFix; + klass->reclaim = awlSegReclaim; + klass->walk = awlSegWalk; + AVERT(SegClass, klass); +} + + +/* Single access pattern control parameters + * + * These control the number of expensive emulated single-accesses we allow + * before we give up and scan a segment at whatever rank, possibly causing + * retention of weak objects. + * + * AWLSegSALimit is the number of accesses for a single segment in a GC cycle. + * AWLTotalSALimit is the total number of accesses during a GC cycle. + * + * These should be set in config.h, but are here in global variables so that + * it's possible to tweak them in a debugger. + */ + +extern Count AWLSegSALimit; +Count AWLSegSALimit = AWL_SEG_SA_LIMIT; +extern Bool AWLHaveSegSALimit; +Bool AWLHaveSegSALimit = AWL_HAVE_SEG_SA_LIMIT; + +extern Count AWLTotalSALimit; +Count AWLTotalSALimit = AWL_TOTAL_SA_LIMIT; +extern Bool AWLHaveTotalSALimit; +Bool AWLHaveTotalSALimit = AWL_HAVE_TOTAL_SA_LIMIT; + + +/* Determine whether to permit scanning a single ref. */ + +static Bool awlSegCanTrySingleAccess(Arena arena, Seg seg, Addr addr) +{ + AWLSeg awlseg; + AWL awl; + + AVERT(Arena, arena); + AVERT(Seg, seg); + AVER(addr != NULL); + + /* .assume.noweak */ + /* .assume.alltraceable */ + if (!RankSetIsMember(SegRankSet(seg), RankWEAK)) + return FALSE; + + /* If there are no traces in progress then the segment isn't read + protected and this is just an ordinary write barrier hit. No need to + scan at all. */ + if (arena->flippedTraces == TraceSetEMPTY) { + AVER(!(SegSM(seg) & AccessREAD)); + return FALSE; + } + + /* The trace is already in the weak band, so we can scan the whole + segment without retention anyway. Go for it. */ + if (TraceRankForAccess(arena, seg) == RankWEAK) + return FALSE; + + awlseg = MustBeA(AWLSeg, seg); + awl = MustBeA(AWLPool, SegPool(seg)); + + /* If there have been too many single accesses in a row then don't + keep trying them, even if it means retaining objects. */ + if(AWLHaveTotalSALimit) { + if(awl->succAccesses >= AWLTotalSALimit) { + STATISTIC(awl->stats.declined++); + EVENT2(AWLDeclineTotal, seg, (EventFU)awl->succAccesses); + return FALSE; /* decline single access because of total limit */ + } + } + + /* If there have been too many single accesses to this segment + then don't keep trying them, even if it means retaining objects. + (Observed behaviour in Open Dylan 2012-09-10 by RB.) */ + if(AWLHaveSegSALimit) { + if(awlseg->singleAccesses >= AWLSegSALimit) { + STATISTIC(awl->stats.declined++); + EVENT2(AWLDeclineSeg, seg, (EventFU)awlseg->singleAccesses); + return FALSE; /* decline single access because of segment limit */ + } + } + + return TRUE; +} + + +/* Record an access to a segment which required scanning a single ref */ + +static void AWLNoteRefAccess(AWL awl, Seg seg, Addr addr) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + + AVERT(AWL, awl); + AVER(addr != NULL); + + awlseg->singleAccesses++; /* increment seg count of ref accesses */ + STATISTIC({ + if (addr == awlseg->stats.lastAccess) { + /* If this is a repeated access, increment count */ + ++ awlseg->stats.sameAccesses; + } + awlseg->stats.lastAccess = addr; + }); + awl->succAccesses++; /* Note a new successive access */ +} + + +/* Record an access to a segment which required scanning the entire seg */ + +static void AWLNoteSegAccess(AWL awl, Seg seg, Addr addr) +{ + AVERT(AWL, awl); + AVERT(Seg, seg); + AVER(addr != NULL); + + awl->succAccesses = 0; /* reset count of successive accesses */ +} + + +/* Record a scan of a segment which wasn't provoked by an access */ + +static void AWLNoteScan(Seg seg, ScanState ss) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + UNUSED(ss); + + /* .assume.mixedrank */ + /* .assume.samerank */ + if (RankSetIsMember(SegRankSet(seg), RankWEAK)) { + STATISTIC({ + /* If this segment has any RankWEAK references, then record + * statistics about whether weak splatting is being lost. */ + AWL awl = MustBeA(AWLPool, SegPool(seg)); + if (RankWEAK == ss->rank) { + /* This is "successful" scan at proper rank. */ + ++ awl->stats.goodScans; + if (0 < awlseg->singleAccesses) { + /* Accesses have been processed singly. Record that we + * genuinely did save a protection-provoked scan */ + ++ awl->stats.savedScans; + awl->stats.savedAccesses += awlseg->singleAccesses; + } + } else { + /* This is "failed" scan at improper rank. */ + ++ awl->stats.badScans; + } + awlStatSegInit(awlseg); + }); + /* Reinitialize the segment statistics */ + awlseg->singleAccesses = 0; + } +} + + +/* awlSegBufferFill -- try filling buffer from segment */ + +static Bool awlSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet) +{ + Index baseIndex, limitIndex; + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + Count requestedGrains, segGrains, allocatedGrains; + Addr segBase, base, limit; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + AVER(size > 0); + AVERT(RankSet, rankSet); + + requestedGrains = PoolSizeGrains(pool, size); + if (awlseg->freeGrains < requestedGrains) + /* Not enough space to satisfy the request. */ + return FALSE; + + if (SegHasBuffer(seg)) + /* Don't bother trying to allocate from a buffered segment */ + return FALSE; + + if (rankSet != SegRankSet(seg)) + /* Can't satisfy required rank set. */ + return FALSE; + + segGrains = PoolSizeGrains(pool, SegSize(seg)); + if (awlseg->freeGrains == segGrains) { + /* Whole segment is free: no need for a search. */ + baseIndex = 0; + limitIndex = segGrains; + goto found; + } + + if (!BTFindLongResRange(&baseIndex, &limitIndex, awlseg->alloc, + 0, segGrains, requestedGrains)) + return FALSE; + +found: + AVER(baseIndex < limitIndex); + allocatedGrains = limitIndex - baseIndex; + AVER(requestedGrains <= allocatedGrains); + AVER(BTIsResRange(awlseg->alloc, baseIndex, limitIndex)); + BTSetRange(awlseg->alloc, baseIndex, limitIndex); + /* Objects are allocated black. */ + /* TODO: This should depend on trace phase. */ + BTSetRange(awlseg->mark, baseIndex, limitIndex); + BTSetRange(awlseg->scanned, baseIndex, limitIndex); + AVER(awlseg->freeGrains >= allocatedGrains); + awlseg->freeGrains -= allocatedGrains; + awlseg->bufferedGrains += allocatedGrains; + + segBase = SegBase(seg); + base = PoolAddrOfIndex(segBase, pool, baseIndex); + limit = PoolAddrOfIndex(segBase, pool, limitIndex); + PoolGenAccountForFill(PoolSegPoolGen(pool, seg), AddrOffset(base, limit)); + + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* AWLVarargs -- decode obsolete varargs */ + +static void AWLVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_FORMAT; + args[0].val.format = va_arg(varargs, Format); + args[1].key = MPS_KEY_AWL_FIND_DEPENDENT; + args[1].val.addr_method = va_arg(varargs, mps_awl_find_dependent_t); + args[2].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* awlNoDependent -- no dependent object */ + +static Addr awlNoDependent(Addr addr) +{ + UNUSED(addr); + return NULL; +} + + +/* AWLInit -- initialize an AWL pool */ + +ARG_DEFINE_KEY(AWL_FIND_DEPENDENT, Fun); + +static Res AWLInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + AWL awl; + FindDependentFunction findDependent = awlNoDependent; + Chain chain; + Res res; + ArgStruct arg; + unsigned gen = AWL_GEN_DEFAULT; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + if (ArgPick(&arg, args, MPS_KEY_AWL_FIND_DEPENDENT)) + findDependent = (FindDependentFunction)arg.val.addr_method; + if (ArgPick(&arg, args, MPS_KEY_CHAIN)) + chain = arg.val.chain; + else { + chain = ArenaGlobals(arena)->defaultChain; + gen = 1; /* avoid the nursery of the default chain by default */ + } + if (ArgPick(&arg, args, MPS_KEY_GEN)) + gen = arg.val.u; + + res = NextMethod(Pool, AWLPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + awl = CouldBeA(AWLPool, pool); + + /* Ensure a format was supplied in the argument list. */ + AVER(pool->format != NULL); + pool->alignment = pool->format->alignment; + pool->alignShift = SizeLog2(pool->alignment); + + AVER(FUNCHECK(findDependent)); + awl->findDependent = findDependent; + + AVERT(Chain, chain); + AVER(gen <= ChainGens(chain)); + AVER(chain->arena == PoolArena(pool)); + + awl->pgen = NULL; + + awl->succAccesses = 0; + awlStatTotalInit(awl); + + SetClassOfPoly(pool, CLASS(AWLPool)); + awl->sig = AWLSig; + AVERC(AWLPool, awl); + + res = PoolGenInit(&awl->pgenStruct, ChainGen(chain, gen), pool); + if (res != ResOK) + goto failGenInit; + awl->pgen = &awl->pgenStruct; + + EVENT2(PoolInitAWL, pool, pool->format); + + return ResOK; + +failGenInit: + NextMethod(Inst, AWLPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* AWLFinish -- finish an AWL pool */ + +static void AWLFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + AWL awl = MustBeA(AWLPool, pool); + Ring ring, node, nextNode; + + ring = &pool->segRing; + RING_FOR(node, ring, nextNode) { + Seg seg = SegOfPoolRing(node); + AWLSeg awlseg = MustBeA(AWLSeg, seg); + AVER(!SegHasBuffer(seg)); + AVERT(AWLSeg, awlseg); + AVER(awlseg->bufferedGrains == 0); + PoolGenFree(awl->pgen, seg, + PoolGrainsSize(pool, awlseg->freeGrains), + PoolGrainsSize(pool, awlseg->oldGrains), + PoolGrainsSize(pool, awlseg->newGrains), + FALSE); + } + awl->sig = SigInvalid; + PoolGenFinish(awl->pgen); + + NextMethod(Inst, AWLPool, finish)(inst); +} + + +/* awlBufferFill -- BufferFill method for AWL */ + +static Res awlBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + AWL awl = MustBeA(AWLPool, pool); + Res res; + Ring node, nextNode; + RankSet rankSet; + Seg seg; + Bool b; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERC(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(size > 0); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + rankSet = BufferRankSet(buffer); + RING_FOR(node, &pool->segRing, nextNode) { + seg = SegOfPoolRing(node); + if (SegBufferFill(baseReturn, limitReturn, seg, size, rankSet)) + return ResOK; + } + + /* No segment had enough space, so make a new one. */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD_FIELD(args, awlKeySegRankSet, u, BufferRankSet(buffer)); + res = PoolGenAlloc(&seg, awl->pgen, CLASS(AWLSeg), + SizeArenaGrains(size, PoolArena(pool)), args); + } MPS_ARGS_END(args); + if (res != ResOK) + return res; + b = SegBufferFill(baseReturn, limitReturn, seg, size, rankSet); + AVER(b); + return ResOK; +} + + +/* awlSegBufferEmpty -- empty buffer to segment */ + +static void awlSegBufferEmpty(Seg seg, Buffer buffer) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + Addr segBase, bufferBase, init, limit; + Index initIndex, limitIndex; + Count unusedGrains, usedGrains; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + segBase = SegBase(seg); + bufferBase = BufferBase(buffer); + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(segBase <= bufferBase); + AVER(bufferBase <= init); + AVER(init <= limit); + AVER(limit <= SegLimit(seg)); + + initIndex = PoolIndexOfAddr(segBase, pool, init); + limitIndex = PoolIndexOfAddr(segBase, pool, limit); + + if (initIndex < limitIndex) + BTResRange(awlseg->alloc, initIndex, limitIndex); + + unusedGrains = limitIndex - initIndex; + AVER(unusedGrains <= awlseg->bufferedGrains); + usedGrains = awlseg->bufferedGrains - unusedGrains; + awlseg->freeGrains += unusedGrains; + awlseg->bufferedGrains = 0; + awlseg->newGrains += usedGrains; + + PoolGenAccountForEmpty(PoolSegPoolGen(pool, seg), + PoolGrainsSize(pool, usedGrains), + PoolGrainsSize(pool, unusedGrains), FALSE); +} + + +/* awlSegPoolGen -- get pool generation for an AWL segment */ + +static PoolGen awlSegPoolGen(Pool pool, Seg seg) +{ + AWL awl = MustBeA(AWLPool, pool); + AVERT(Seg, seg); + return awl->pgen; +} + + +/* awlSegWhiten -- segment condemning method */ + +/* awlSegRangeWhiten -- helper function that works on a range. + * + * This function abstracts common code from awlSegWhiten. + */ +static void awlSegRangeWhiten(AWLSeg awlseg, Index base, Index limit) +{ + if(base != limit) { + AVER(base < limit); + AVER(limit <= awlseg->grains); + BTResRange(awlseg->mark, base, limit); + BTResRange(awlseg->scanned, base, limit); + } +} + +static Res awlSegWhiten(Seg seg, Trace trace) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + Buffer buffer; + Count agedGrains, uncondemnedGrains; + + /* All parameters checked by generic SegWhiten. */ + + /* Can only whiten for a single trace, */ + /* see */ + AVER(SegWhite(seg) == TraceSetEMPTY); + + if (!SegBuffer(&buffer, seg)) { + awlSegRangeWhiten(awlseg, 0, awlseg->grains); + uncondemnedGrains = (Count)0; + } else { + /* Whiten everything except the buffer. */ + Addr base = SegBase(seg); + Index scanLimitIndex = PoolIndexOfAddr(base, pool, BufferScanLimit(buffer)); + Index limitIndex = PoolIndexOfAddr(base, pool, BufferLimit(buffer)); + uncondemnedGrains = limitIndex - scanLimitIndex; + awlSegRangeWhiten(awlseg, 0, scanLimitIndex); + awlSegRangeWhiten(awlseg, limitIndex, awlseg->grains); + + /* Check the buffer is black. */ + /* This really ought to change when we have a non-trivial */ + /* pre-flip phase. @@@@ ('coz then we'll be allocating white) */ + if(scanLimitIndex != limitIndex) { + AVER(BTIsSetRange(awlseg->mark, scanLimitIndex, limitIndex)); + AVER(BTIsSetRange(awlseg->scanned, scanLimitIndex, limitIndex)); + } + } + + /* The unused part of the buffer remains buffered: the rest becomes old. */ + AVER(awlseg->bufferedGrains >= uncondemnedGrains); + agedGrains = awlseg->bufferedGrains - uncondemnedGrains; + PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains), + PoolGrainsSize(pool, awlseg->newGrains), FALSE); + awlseg->oldGrains += agedGrains + awlseg->newGrains; + awlseg->bufferedGrains = uncondemnedGrains; + awlseg->newGrains = 0; + + if (awlseg->oldGrains > 0) { + GenDescCondemned(pgen->gen, trace, + PoolGrainsSize(pool, awlseg->oldGrains)); + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + } + + return ResOK; +} + + +/* awlSegGreyen -- Greyen method for AWL segments */ + +/* awlSegRangeGreyen -- subroutine for awlSegGreyen */ +static void awlSegRangeGreyen(AWLSeg awlseg, Index base, Index limit) +{ + /* AWLSeg not checked as that's already been done */ + AVER(limit <= awlseg->grains); + /* copes with degenerate case as that makes caller simpler */ + if (base < limit) { + BTSetRange(awlseg->mark, base, limit); + BTResRange(awlseg->scanned, base, limit); + } else { + AVER(base == limit); + } +} + +static void awlSegGreyen(Seg seg, Trace trace) +{ + Buffer buffer; + Pool pool; + + AVERT(Seg, seg); + AVERT(Trace, trace); + pool = SegPool(seg); + AVER(PoolArena(pool) == trace->arena); + + if (!TraceSetIsMember(SegWhite(seg), trace)) { + AWLSeg awlseg = MustBeA(AWLSeg, seg); + + SegSetGrey(seg, TraceSetAdd(SegGrey(seg), trace)); + if (SegBuffer(&buffer, seg)) { + Addr base = SegBase(seg); + + awlSegRangeGreyen(awlseg, + 0, + PoolIndexOfAddr(base, pool, BufferScanLimit(buffer))); + awlSegRangeGreyen(awlseg, + PoolIndexOfAddr(base, pool, BufferLimit(buffer)), + awlseg->grains); + } else { + awlSegRangeGreyen(awlseg, 0, awlseg->grains); + } + } +} + + +/* awlSegBlacken -- Blacken method for AWL segments */ + +static void awlSegBlacken(Seg seg, TraceSet traceSet) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + + AVERT(TraceSet, traceSet); + + BTSetRange(awlseg->scanned, 0, awlseg->grains); +} + + +/* awlScanObject -- scan a single object */ +/* base and limit are both offset by the header size */ + +static Res awlScanObject(Arena arena, AWL awl, ScanState ss, + Addr base, Addr limit) +{ + Res res; + Bool dependent; /* is there a dependent object? */ + Addr dependentObject; /* base address of dependent object */ + Seg dependentSeg = NULL; /* segment of dependent object */ + + AVERT(Arena, arena); + AVERT(AWL, awl); + AVERT(ScanState, ss); + AVER(base != 0); + AVER(base < limit); + + dependentObject = awl->findDependent(base); + dependent = SegOfAddr(&dependentSeg, arena, dependentObject); + if (dependent) { + /* */ + ShieldExpose(arena, dependentSeg); + /* */ + SegSetSummary(dependentSeg, RefSetUNIV); + } + + res = TraceScanFormat(ss, base, limit); + + if (dependent) + ShieldCover(arena, dependentSeg); + + return res; +} + + +/* awlSegScanSinglePass -- a single scan pass over a segment */ + +static Res awlSegScanSinglePass(Bool *anyScannedReturn, ScanState ss, + Seg seg, Bool scanAllObjects) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + AWL awl = MustBeA(AWLPool, pool); + Arena arena = PoolArena(pool); + Buffer buffer; + Format format = pool->format; + Addr base = SegBase(seg); + Addr limit = SegLimit(seg); + Addr bufferScanLimit; + Addr p; + Addr hp; + + AVERT(ScanState, ss); + AVERT(Bool, scanAllObjects); + + *anyScannedReturn = FALSE; + p = base; + if (SegBuffer(&buffer, seg) && BufferScanLimit(buffer) != BufferLimit(buffer)) + bufferScanLimit = BufferScanLimit(buffer); + else + bufferScanLimit = limit; + + while(p < limit) { + Index i; /* the index into the bit tables corresponding to p */ + Addr objectLimit; + + /* */ + if (p == bufferScanLimit) { + p = BufferLimit(buffer); + continue; + } + + i = PoolIndexOfAddr(base, pool, p); + if (!BTGet(awlseg->alloc, i)) { + p = AddrAdd(p, PoolAlignment(pool)); + continue; + } + hp = AddrAdd(p, format->headerSize); + objectLimit = (format->skip)(hp); + /* */ + if (scanAllObjects + || (BTGet(awlseg->mark, i) && !BTGet(awlseg->scanned, i))) { + Res res = awlScanObject(arena, awl, ss, + hp, objectLimit); + if (res != ResOK) + return res; + *anyScannedReturn = TRUE; + BTSet(awlseg->scanned, i); + } + objectLimit = AddrSub(objectLimit, format->headerSize); + AVER(p < objectLimit); + AVER(AddrIsAligned(objectLimit, PoolAlignment(pool))); + p = objectLimit; + } + AVER(p == limit); + + return ResOK; +} + + +/* awlSegScan -- segment scan method for AWL */ + +static Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + Bool anyScanned; + Bool scanAllObjects; + Res res; + + AVER(totalReturn != NULL); + AVERT(ScanState, ss); + AVERT(Seg, seg); + + /* If the scanner isn't going to scan all the objects then the */ + /* summary of the unscanned objects must be added into the scan */ + /* state summary, so that it's a valid summary of the entire */ + /* segment on return. */ + + /* This pool assumes disjoint white sets and maintains mark and */ + /* scanned tables (effectively non-white and black tables) with */ + /* respect to the trace with respect to which the segment is */ + /* white. For any other trace, we cannot tell which objects */ + /* are grey and must therefore scan them all. */ + + scanAllObjects = + (TraceSetDiff(ss->traces, SegWhite(seg)) != TraceSetEMPTY); + + do { + res = awlSegScanSinglePass(&anyScanned, ss, seg, scanAllObjects); + if (res != ResOK) { + *totalReturn = FALSE; + return res; + } + /* we are done if we scanned all the objects or if we did a pass */ + /* and didn't scan any objects (since then, no new object can have */ + /* gotten fixed) */ + } while(!scanAllObjects && anyScanned); + + *totalReturn = scanAllObjects; + AWLNoteScan(seg, ss); + return ResOK; +} + + +/* awlSegFix -- Fix method for AWL segments */ + +static Res awlSegFix(Seg seg, ScanState ss, Ref *refIO) +{ + AWLSeg awlseg = MustBeA_CRITICAL(AWLSeg, seg); + Pool pool = SegPool(seg); + Ref clientRef; + Addr base; + Index i; + + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); + AVER_CRITICAL(refIO != NULL); + + clientRef = *refIO; + + base = AddrSub((Addr)clientRef, pool->format->headerSize); + + /* Not a real reference if out of bounds. This can happen if an + ambiguous reference is closer to the base of the segment than the + header size. */ + if (base < SegBase(seg)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + /* Not a real reference if unaligned. */ + if (!AddrIsAligned(base, PoolAlignment(pool))) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + i = PoolIndexOfAddr(SegBase(seg), pool, base); + + /* Not a real reference if unallocated. */ + if (!BTGet(awlseg->alloc, i)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + if (!BTGet(awlseg->mark, i)) { + ss->wasMarked = FALSE; /* */ + if (ss->rank == RankWEAK) { + *refIO = (Ref)0; + } else { + BTSet(awlseg->mark, i); + SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces)); + } + } + + return ResOK; +} + + +/* awlSegReclaim -- reclaim dead objects in an AWL segment */ + +static void awlSegReclaim(Seg seg, Trace trace) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + Addr base = SegBase(seg); + Buffer buffer; + Bool hasBuffer = SegBuffer(&buffer, seg); + Format format = pool->format; + Count reclaimedGrains = (Count)0; + STATISTIC_DECL(Count preservedInPlaceCount = (Count)0) + Size preservedInPlaceSize = (Size)0; + Index i; + + AVERT(Trace, trace); + + i = 0; + while(i < awlseg->grains) { + Addr p, q; + Index j; + + if(!BTGet(awlseg->alloc, i)) { + ++i; + continue; + } + p = PoolAddrOfIndex(base, pool, i); + if (hasBuffer + && p == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) + { + i = PoolIndexOfAddr(base, pool, BufferLimit(buffer)); + continue; + } + q = format->skip(AddrAdd(p, format->headerSize)); + q = AddrSub(q, format->headerSize); + AVER(AddrIsAligned(q, PoolAlignment(pool))); + j = PoolIndexOfAddr(base, pool, q); + AVER(j <= awlseg->grains); + if(BTGet(awlseg->mark, i)) { + AVER(BTGet(awlseg->scanned, i)); + BTSetRange(awlseg->mark, i, j); + BTSetRange(awlseg->scanned, i, j); + STATISTIC(++preservedInPlaceCount); + preservedInPlaceSize += AddrOffset(p, q); + } else { + BTResRange(awlseg->mark, i, j); + BTSetRange(awlseg->scanned, i, j); + BTResRange(awlseg->alloc, i, j); + reclaimedGrains += j - i; + } + i = j; + } + AVER(i == awlseg->grains); + + AVER(reclaimedGrains <= awlseg->grains); + AVER(awlseg->oldGrains >= reclaimedGrains); + awlseg->oldGrains -= reclaimedGrains; + awlseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE); + + STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains)); + STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount); + GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize); + SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); + + if (awlseg->freeGrains == awlseg->grains && !hasBuffer) { + /* No survivors */ + AVER(awlseg->bufferedGrains == 0); + PoolGenFree(pgen, seg, + PoolGrainsSize(pool, awlseg->freeGrains), + PoolGrainsSize(pool, awlseg->oldGrains), + PoolGrainsSize(pool, awlseg->newGrains), + FALSE); + } +} + + +/* awlSegAccess -- handle a barrier hit */ + +static Res awlSegAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context) +{ + AWL awl; + Res res; + + AVERT(Seg, seg); + AVER(SegBase(seg) <= addr); + AVER(addr < SegLimit(seg)); + AVERT(AccessSet, mode); + AVERT(MutatorContext, context); + + awl = MustBeA(AWLPool, SegPool(seg)); + + /* Attempt scanning a single reference if permitted */ + if(awlSegCanTrySingleAccess(arena, seg, addr)) { + res = SegSingleAccess(seg, arena, addr, mode, context); + switch(res) { + case ResOK: + AWLNoteRefAccess(awl, seg, addr); + return ResOK; + case ResFAIL: + /* Not all accesses can be managed singly. Default to segment */ + break; + default: + return res; + } + } + + /* Have to scan the entire seg anyway. */ + res = SegWholeAccess(seg, arena, addr, mode, context); + if(ResOK == res) { + AWLNoteSegAccess(awl, seg, addr); + } + + return res; +} + + +/* awlSegWalk -- walk all objects */ + +static void awlSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AWLSeg awlseg = MustBeA(AWLSeg, seg); + Pool pool = SegPool(seg); + Addr object, base, limit; + + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures and can't be checked */ + + base = SegBase(seg); + object = base; + limit = SegLimit(seg); + + while(object < limit) { + /* object is a slight misnomer because it might point to a */ + /* free grain */ + Addr next; + Index i; + Buffer buffer; + + if (SegBuffer(&buffer, seg)) { + if (object == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) { + /* skip over buffered area */ + object = BufferLimit(buffer); + continue; + } + /* since we skip over the buffered area we are always */ + /* either before the buffer, or after it, never in it */ + AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object); + } + i = PoolIndexOfAddr(base, pool, object); + if (!BTGet(awlseg->alloc, i)) { + /* This grain is free */ + object = AddrAdd(object, PoolAlignment(pool)); + continue; + } + object = AddrAdd(object, format->headerSize); + next = format->skip(object); + next = AddrSub(next, format->headerSize); + AVER(AddrIsAligned(next, PoolAlignment(pool))); + if (BTGet(awlseg->mark, i) && BTGet(awlseg->scanned, i)) + (*f)(object, pool->format, pool, p, s); + object = next; + } +} + + +/* AWLTotalSize -- total memory allocated from the arena */ +/* TODO: This code is repeated in AMS */ + +static Size AWLTotalSize(Pool pool) +{ + AWL awl = MustBeA(AWLPool, pool); + return awl->pgen->totalSize; +} + + +/* AWLFreeSize -- free memory (unused by client program) */ +/* TODO: This code is repeated in AMS */ + +static Size AWLFreeSize(Pool pool) +{ + AWL awl = MustBeA(AWLPool, pool); + return awl->pgen->freeSize; +} + + +/* AWLPoolClass -- the class definition */ + +DEFINE_CLASS(Pool, AWLPool, klass) +{ + INHERIT_CLASS(klass, AWLPool, AbstractCollectPool); + klass->instClassStruct.finish = AWLFinish; + klass->size = sizeof(AWLPoolStruct); + klass->varargs = AWLVarargs; + klass->init = AWLInit; + klass->bufferClass = RankBufClassGet; + klass->bufferFill = awlBufferFill; + klass->segPoolGen = awlSegPoolGen; + klass->totalSize = AWLTotalSize; + klass->freeSize = AWLFreeSize; + AVERT(PoolClass, klass); +} + + +mps_pool_class_t mps_class_awl(void) +{ + return (mps_pool_class_t)CLASS(AWLPool); +} + + +/* AWLCheck -- check an AWL pool */ + +ATTRIBUTE_UNUSED +static Bool AWLCheck(AWL awl) +{ + CHECKS(AWL, awl); + CHECKC(AWLPool, awl); + CHECKD(Pool, CouldBeA(Pool, awl)); + if (awl->pgen != NULL) + CHECKD(PoolGen, awl->pgen); + /* Nothing to check about succAccesses. */ + CHECKL(FUNCHECK(awl->findDependent)); + /* Don't bother to check stats. */ + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poollo.c b/mps/code/poollo.c new file mode 100644 index 00000000000..61311f049bc --- /dev/null +++ b/mps/code/poollo.c @@ -0,0 +1,846 @@ +/* poollo.c: LEAF POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: . This is a leaf pool class. + */ + +#include "mpsclo.h" +#include "mpm.h" +#include "mps.h" + +SRCID(poollo, "$Id$"); + + +/* LOStruct -- leaf object pool instance structure */ + +#define LOSig ((Sig)0x51970B07) /* SIGnature LO POoL */ + +typedef struct LOStruct *LO; + +typedef struct LOStruct { + PoolStruct poolStruct; /* generic pool structure */ + PoolGenStruct pgenStruct; /* generation representing the pool */ + PoolGen pgen; /* NULL or pointer to pgenStruct */ + Sig sig; /* design.mps.sig.field.end.outer */ +} LOStruct; + +typedef LO LOPool; +#define LOPoolCheck LOCheck +DECLARE_CLASS(Pool, LOPool, AbstractCollectPool); +DECLARE_CLASS(Seg, LOSeg, MutatorSeg); + + +/* forward declaration */ +static Bool LOCheck(LO lo); + + +/* LOGSegStruct -- LO segment structure + * + * Colour is represented as follows: + * Black: +alloc +mark + * White: +alloc -mark + * Grey: objects have no references so can't be grey + * Free: -alloc ?mark + */ + +typedef struct LOSegStruct *LOSeg; + +#define LOSegSig ((Sig)0x519705E9) /* SIGnature LO SEG */ + +typedef struct LOSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + BT mark; /* mark bit table */ + BT alloc; /* alloc bit table */ + Count freeGrains; /* free grains */ + Count bufferedGrains; /* grains in buffers */ + Count newGrains; /* grains allocated since last collection */ + Count oldGrains; /* grains allocated prior to last collection */ + Sig sig; /* design.mps.sig.field.end.outer */ +} LOSegStruct; + + +/* forward decls */ +static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args); +static void loSegFinish(Inst inst); +static Count loSegGrains(LOSeg loseg); +static Bool loSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet); +static void loSegBufferEmpty(Seg seg, Buffer buffer); +static Res loSegWhiten(Seg seg, Trace trace); +static Res loSegScan(Bool *totalReturn, Seg seg, ScanState ss); +static Res loSegFix(Seg seg, ScanState ss, Ref *refIO); +static void loSegReclaim(Seg seg, Trace trace); +static void loSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s); + + +/* LOSegClass -- Class definition for LO segments */ + +DEFINE_CLASS(Seg, LOSeg, klass) +{ + INHERIT_CLASS(klass, LOSeg, MutatorSeg); + SegClassMixInNoSplitMerge(klass); + klass->instClassStruct.finish = loSegFinish; + klass->size = sizeof(LOSegStruct); + klass->init = loSegInit; + klass->bufferFill = loSegBufferFill; + klass->bufferEmpty = loSegBufferEmpty; + klass->whiten = loSegWhiten; + klass->scan = loSegScan; + klass->fix = loSegFix; + klass->fixEmergency = loSegFix; + klass->reclaim = loSegReclaim; + klass->walk = loSegWalk; + AVERT(SegClass, klass); +} + + +/* LOSegCheck -- check an LO segment */ + +ATTRIBUTE_UNUSED +static Bool LOSegCheck(LOSeg loseg) +{ + Seg seg = MustBeA(Seg, loseg); + Pool pool = SegPool(seg); + CHECKS(LOSeg, loseg); + CHECKD(GCSeg, &loseg->gcSegStruct); + CHECKL(loseg->mark != NULL); + CHECKL(loseg->alloc != NULL); + /* Could check exactly how many bits are set in the alloc table. */ + CHECKL(loseg->freeGrains + loseg->bufferedGrains + loseg->newGrains + + loseg->oldGrains + == PoolSizeGrains(pool, SegSize(seg))); + return TRUE; +} + + +/* loSegInit -- Init method for LO segments */ + +static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + LOSeg loseg; + Res res; + Size tableSize; /* # bytes in each control array */ + Arena arena = PoolArena(pool); + /* number of bits needed in each control array */ + Count grains; + void *p; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, LOSeg, init)(seg, pool, base, size, args); + if(res != ResOK) + goto failSuperInit; + loseg = CouldBeA(LOSeg, seg); + + AVER(SegWhite(seg) == TraceSetEMPTY); + + grains = PoolSizeGrains(pool, size); + tableSize = BTSize(grains); + res = ControlAlloc(&p, arena, 2 * tableSize); + if(res != ResOK) + goto failControlAlloc; + loseg->mark = p; + loseg->alloc = PointerAdd(p, tableSize); + BTResRange(loseg->alloc, 0, grains); + loseg->freeGrains = grains; + loseg->bufferedGrains = (Count)0; + loseg->newGrains = (Count)0; + loseg->oldGrains = (Count)0; + + SetClassOfPoly(seg, CLASS(LOSeg)); + loseg->sig = LOSegSig; + AVERC(LOSeg, loseg); + + return ResOK; + +failControlAlloc: + NextMethod(Inst, LOSeg, finish)(MustBeA(Inst, seg)); +failSuperInit: + AVER(res != ResOK); + return res; +} + + +/* loSegFinish -- Finish method for LO segments */ + +static void loSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + Arena arena = PoolArena(pool); + Size tablesize; + Count grains; + + loseg->sig = SigInvalid; + + grains = loSegGrains(loseg); + tablesize = BTSize(grains); + ControlFree(arena, loseg->mark, 2 * tablesize); + + NextMethod(Inst, LOSeg, finish)(inst); +} + + +ATTRIBUTE_UNUSED +static Count loSegGrains(LOSeg loseg) +{ + Seg seg = MustBeA(Seg, loseg); + Pool pool = SegPool(seg); + Size size = SegSize(seg); + return PoolSizeGrains(pool, size); +} + + +/* loSegBufferFill -- try filling buffer from segment */ + +static Bool loSegBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet) +{ + Index baseIndex, limitIndex; + LOSeg loseg = MustBeA_CRITICAL(LOSeg, seg); + Pool pool = SegPool(seg); + Count requestedGrains, segGrains, allocatedGrains; + Addr segBase, base, limit; + + AVER_CRITICAL(baseReturn != NULL); + AVER_CRITICAL(limitReturn != NULL); + AVER_CRITICAL(SizeIsAligned(size, PoolAlignment(pool))); + AVER_CRITICAL(size > 0); + AVER_CRITICAL(rankSet == RankSetEMPTY); + + requestedGrains = PoolSizeGrains(pool, size); + if (loseg->freeGrains < requestedGrains) + /* Not enough space to satisfy the request. */ + return FALSE; + + if (SegHasBuffer(seg)) + /* Don't bother trying to allocate from a buffered segment */ + return FALSE; + + segGrains = PoolSizeGrains(pool, SegSize(seg)); + if (loseg->freeGrains == segGrains) { + /* Whole segment is free: no need for a search. */ + baseIndex = 0; + limitIndex = segGrains; + goto found; + } + + if (!BTFindLongResRange(&baseIndex, &limitIndex, loseg->alloc, + 0, segGrains, requestedGrains)) + return FALSE; + +found: + AVER(baseIndex < limitIndex); + allocatedGrains = limitIndex - baseIndex; + AVER(requestedGrains <= allocatedGrains); + AVER(BTIsResRange(loseg->alloc, baseIndex, limitIndex)); + /* Objects are allocated black. */ + /* TODO: This should depend on trace phase. */ + BTSetRange(loseg->alloc, baseIndex, limitIndex); + BTSetRange(loseg->mark, baseIndex, limitIndex); + AVER(loseg->freeGrains >= allocatedGrains); + loseg->freeGrains -= allocatedGrains; + loseg->bufferedGrains += allocatedGrains; + + segBase = SegBase(seg); + base = PoolAddrOfIndex(segBase, pool, baseIndex); + limit = PoolAddrOfIndex(segBase, pool, limitIndex); + PoolGenAccountForFill(PoolSegPoolGen(pool, seg), AddrOffset(base, limit)); + + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* loSegBufferEmpty -- empty buffer to segment */ + +static void loSegBufferEmpty(Seg seg, Buffer buffer) +{ + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + Addr segBase, bufferBase, init, limit; + Index initIndex, limitIndex; + Count unusedGrains, usedGrains; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + segBase = SegBase(seg); + bufferBase = BufferBase(buffer); + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(segBase <= bufferBase); + AVER(bufferBase <= init); + AVER(init <= limit); + AVER(limit <= SegLimit(seg)); + + initIndex = PoolIndexOfAddr(segBase, pool, init); + limitIndex = PoolIndexOfAddr(segBase, pool, limit); + + if (initIndex < limitIndex) + BTResRange(loseg->alloc, initIndex, limitIndex); + + unusedGrains = limitIndex - initIndex; + AVER(unusedGrains <= loseg->bufferedGrains); + usedGrains = loseg->bufferedGrains - unusedGrains; + loseg->freeGrains += unusedGrains; + loseg->bufferedGrains = 0; + loseg->newGrains += usedGrains; + + PoolGenAccountForEmpty(PoolSegPoolGen(pool, seg), + PoolGrainsSize(pool, usedGrains), + PoolGrainsSize(pool, unusedGrains), FALSE); +} + + +/* loSegReclaim -- reclaim white objects in an LO segment + * + * Could consider implementing this using Walk. + */ + +static void loSegReclaim(Seg seg, Trace trace) +{ + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + Addr p, base, limit; + Buffer buffer; + Bool hasBuffer = SegBuffer(&buffer, seg); + Count reclaimedGrains = (Count)0; + Format format = NULL; /* suppress "may be used uninitialized" warning */ + STATISTIC_DECL(Count preservedInPlaceCount = (Count)0) + Size preservedInPlaceSize = (Size)0; + Bool b; + + AVERT(Trace, trace); + + base = SegBase(seg); + limit = SegLimit(seg); + + b = PoolFormat(&format, pool); + AVER(b); + + /* i is the index of the current pointer, + * p is the actual address that is being considered. + * j and q act similarly for a pointer which is used to + * point at the end of the current object. + */ + p = base; + while(p < limit) { + Addr q; + Index i; + + if (hasBuffer) { + if (p == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) { + /* skip over buffered area */ + p = BufferLimit(buffer); + continue; + } + /* since we skip over the buffered area we are always */ + /* either before the buffer, or after it, never in it */ + AVER(p < BufferGetInit(buffer) || BufferLimit(buffer) <= p); + } + i = PoolIndexOfAddr(base, pool, p); + if(!BTGet(loseg->alloc, i)) { + /* This grain is free */ + p = AddrAdd(p, pool->alignment); + continue; + } + q = (*format->skip)(AddrAdd(p, format->headerSize)); + q = AddrSub(q, format->headerSize); + if(BTGet(loseg->mark, i)) { + STATISTIC(++preservedInPlaceCount); + preservedInPlaceSize += AddrOffset(p, q); + } else { + Index j = PoolIndexOfAddr(base, pool, q); + /* This object is not marked, so free it */ + BTResRange(loseg->alloc, i, j); + reclaimedGrains += j - i; + } + p = q; + } + AVER(p == limit); + + AVER(reclaimedGrains <= loSegGrains(loseg)); + AVER(loseg->oldGrains >= reclaimedGrains); + loseg->oldGrains -= reclaimedGrains; + loseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE); + + STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains)); + STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount); + GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize); + SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); + + if (loseg->freeGrains == PoolSizeGrains(pool, SegSize(seg)) && !hasBuffer) { + AVER(loseg->bufferedGrains == 0); + PoolGenFree(pgen, seg, + PoolGrainsSize(pool, loseg->freeGrains), + PoolGrainsSize(pool, loseg->oldGrains), + PoolGrainsSize(pool, loseg->newGrains), + FALSE); + } +} + +/* Walks over _all_ objects in the segnent: whether they are black or + * white, they are still validly formatted as this is a leaf pool, so + * there can't be any dangling references. + */ +static void loSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + Addr base; + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + Index i, grains; + + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures and can't be checked */ + + base = SegBase(seg); + grains = loSegGrains(loseg); + i = 0; + + while(i < grains) { + /* object is a slight misnomer because it might point to a */ + /* free grain */ + Addr object = PoolAddrOfIndex(base, pool, i); + Addr next; + Index j; + Buffer buffer; + + if (SegBuffer(&buffer, seg)) { + if(object == BufferScanLimit(buffer) && + BufferScanLimit(buffer) != BufferLimit(buffer)) { + /* skip over buffered area */ + object = BufferLimit(buffer); + i = PoolIndexOfAddr(base, pool, object); + continue; + } + /* since we skip over the buffered area we are always */ + /* either before the buffer, or after it, never in it */ + AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object); + } + if(!BTGet(loseg->alloc, i)) { + /* This grain is free */ + ++i; + continue; + } + object = AddrAdd(object, format->headerSize); + next = (*format->skip)(object); + next = AddrSub(next, format->headerSize); + j = PoolIndexOfAddr(base, pool, next); + AVER(i < j); + (*f)(object, pool->format, pool, p, s); + i = j; + } +} + + +/* LOVarargs -- decode obsolete varargs */ + +static void LOVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_FORMAT; + args[0].val.format = va_arg(varargs, Format); + args[1].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* LOInit -- initialize an LO pool */ + +static Res LOInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + LO lo; + Res res; + ArgStruct arg; + Chain chain; + unsigned gen = LO_GEN_DEFAULT; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + res = NextMethod(Pool, LOPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + lo = CouldBeA(LOPool, pool); + + /* Ensure a format was supplied in the argument list. */ + AVER(pool->format != NULL); + + if (ArgPick(&arg, args, MPS_KEY_CHAIN)) + chain = arg.val.chain; + else { + chain = ArenaGlobals(arena)->defaultChain; + gen = 1; /* avoid the nursery of the default chain by default */ + } + if (ArgPick(&arg, args, MPS_KEY_GEN)) + gen = arg.val.u; + + AVERT(Format, pool->format); + AVER(FormatArena(pool->format) == arena); + AVERT(Chain, chain); + AVER(gen <= ChainGens(chain)); + AVER(chain->arena == arena); + + pool->alignment = pool->format->alignment; + pool->alignShift = SizeLog2(pool->alignment); + + lo->pgen = NULL; + + SetClassOfPoly(pool, CLASS(LOPool)); + lo->sig = LOSig; + AVERC(LOPool, lo); + + res = PoolGenInit(&lo->pgenStruct, ChainGen(chain, gen), pool); + if (res != ResOK) + goto failGenInit; + lo->pgen = &lo->pgenStruct; + + EVENT2(PoolInitLO, pool, pool->format); + + return ResOK; + +failGenInit: + NextMethod(Inst, LOPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* LOFinish -- finish an LO pool */ + +static void LOFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + LO lo = MustBeA(LOPool, pool); + Ring node, nextNode; + + RING_FOR(node, &pool->segRing, nextNode) { + Seg seg = SegOfPoolRing(node); + LOSeg loseg = MustBeA(LOSeg, seg); + AVER(!SegHasBuffer(seg)); + AVERT(LOSeg, loseg); + AVER(loseg->bufferedGrains == 0); + PoolGenFree(lo->pgen, seg, + PoolGrainsSize(pool, loseg->freeGrains), + PoolGrainsSize(pool, loseg->oldGrains), + PoolGrainsSize(pool, loseg->newGrains), + FALSE); + } + PoolGenFinish(lo->pgen); + + lo->sig = SigInvalid; + + NextMethod(Inst, LOPool, finish)(inst); +} + + +static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, + Size size) +{ + LO lo = MustBeA(LOPool, pool); + Res res; + Ring node, nextNode; + RankSet rankSet; + Seg seg; + Bool b; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERC(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(BufferRankSet(buffer) == RankSetEMPTY); + AVER(size > 0); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + /* Try to find a segment with enough space already. */ + rankSet = BufferRankSet(buffer); + RING_FOR(node, PoolSegRing(pool), nextNode) { + seg = SegOfPoolRing(node); + if (SegBufferFill(baseReturn, limitReturn, seg, size, rankSet)) + return ResOK; + } + + /* No segment had enough space, so make a new one. */ + res = PoolGenAlloc(&seg, lo->pgen, CLASS(LOSeg), + SizeArenaGrains(size, PoolArena(pool)), + argsNone); + if (res != ResOK) + return res; + b = SegBufferFill(baseReturn, limitReturn, seg, size, rankSet); + AVER(b); + return ResOK; +} + + +/* Synchronise the buffer with the alloc Bit Table in the segment. */ + + +/* loSegPoolGen -- get pool generation for an LO segment */ + +static PoolGen loSegPoolGen(Pool pool, Seg seg) +{ + LO lo = MustBeA(LOPool, pool); + AVERT(Seg, seg); + return lo->pgen; +} + + +/* loSegWhiten -- whiten a segment */ + +static Res loSegWhiten(Seg seg, Trace trace) +{ + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + PoolGen pgen = PoolSegPoolGen(pool, seg); + Buffer buffer; + Count grains, agedGrains, uncondemnedGrains; + + AVERT(Trace, trace); + AVER(SegWhite(seg) == TraceSetEMPTY); + + grains = loSegGrains(loseg); + + /* Whiten allocated objects; leave free areas black. */ + if (SegBuffer(&buffer, seg)) { + Addr base = SegBase(seg); + Index scanLimitIndex = PoolIndexOfAddr(base, pool, BufferScanLimit(buffer)); + Index limitIndex = PoolIndexOfAddr(base, pool, BufferLimit(buffer)); + uncondemnedGrains = limitIndex - scanLimitIndex; + if (0 < scanLimitIndex) + BTCopyInvertRange(loseg->alloc, loseg->mark, 0, scanLimitIndex); + if (limitIndex < grains) + BTCopyInvertRange(loseg->alloc, loseg->mark, limitIndex, grains); + } else { + uncondemnedGrains = (Count)0; + BTCopyInvertRange(loseg->alloc, loseg->mark, 0, grains); + } + + /* The unused part of the buffer remains buffered: the rest becomes old. */ + AVER(loseg->bufferedGrains >= uncondemnedGrains); + agedGrains = loseg->bufferedGrains - uncondemnedGrains; + PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains), + PoolGrainsSize(pool, loseg->newGrains), FALSE); + loseg->oldGrains += agedGrains + loseg->newGrains; + loseg->bufferedGrains = uncondemnedGrains; + loseg->newGrains = 0; + + if (loseg->oldGrains > 0) { + GenDescCondemned(pgen->gen, trace, + PoolGrainsSize(pool, loseg->oldGrains)); + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + } + + return ResOK; +} + + +static Res loSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + LOSeg loseg = MustBeA(LOSeg, seg); + Pool pool = SegPool(seg); + Addr p, base, limit; + Buffer buffer; + Bool hasBuffer = SegBuffer(&buffer, seg); + Format format = NULL; /* suppress "may be used uninitialized" warning */ + Bool b; + + AVER(totalReturn != NULL); + AVERT(Seg, seg); + AVERT(ScanState, ss); + + base = SegBase(seg); + limit = SegLimit(seg); + + b = PoolFormat(&format, pool); + AVER(b); + + p = base; + while (p < limit) { + Addr q; + Index i; + + if (hasBuffer) { + if (p == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) { + /* skip over buffered area */ + p = BufferLimit(buffer); + continue; + } + /* since we skip over the buffered area we are always */ + /* either before the buffer, or after it, never in it */ + AVER(p < BufferGetInit(buffer) || BufferLimit(buffer) <= p); + } + i = PoolIndexOfAddr(base, pool, p); + if (!BTGet(loseg->alloc, i)) { + p = AddrAdd(p, PoolAlignment(pool)); + continue; + } + q = (*format->skip)(AddrAdd(p, format->headerSize)); + q = AddrSub(q, format->headerSize); + if (BTGet(loseg->mark, i)) { + Res res = TraceScanFormat(ss, p, q); + if (res != ResOK) + return res; + } + p = q; + } + AVER(p == limit); + + return ResOK; +} + + +static Res loSegFix(Seg seg, ScanState ss, Ref *refIO) +{ + LOSeg loseg = MustBeA_CRITICAL(LOSeg, seg); + Pool pool = SegPool(seg); + Ref clientRef; + Addr base; + Index i; + + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); + AVER_CRITICAL(refIO != NULL); + + clientRef = *refIO; + base = AddrSub((Addr)clientRef, pool->format->headerSize); + + /* Not a real reference if out of bounds. This can happen if an + ambiguous reference is closer to the base of the segment than the + header size. */ + if (base < SegBase(seg)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + /* Not a real reference if unaligned. */ + if (!AddrIsAligned(base, PoolAlignment(pool))) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + i = PoolIndexOfAddr(SegBase(seg), pool, base); + + /* Not a real reference if unallocated. */ + if (!BTGet(loseg->alloc, i)) { + AVER(ss->rank == RankAMBIG); + return ResOK; + } + + if(!BTGet(loseg->mark, i)) { + ss->wasMarked = FALSE; /* */ + if(ss->rank == RankWEAK) { + *refIO = (Addr)0; + } else { + BTSet(loseg->mark, i); + } + } + + return ResOK; +} + + +/* LOTotalSize -- total memory allocated from the arena */ +/* TODO: This code is repeated in AMS */ + +static Size LOTotalSize(Pool pool) +{ + LO lo = MustBeA(LOPool, pool); + return lo->pgen->totalSize; +} + + +/* LOFreeSize -- free memory (unused by client program) */ +/* TODO: This code is repeated in AMS */ + +static Size LOFreeSize(Pool pool) +{ + LO lo = MustBeA(LOPool, pool); + return lo->pgen->freeSize; +} + + +/* LOPoolClass -- the class definition */ + +DEFINE_CLASS(Pool, LOPool, klass) +{ + INHERIT_CLASS(klass, LOPool, AbstractCollectPool); + klass->instClassStruct.finish = LOFinish; + klass->size = sizeof(LOStruct); + klass->varargs = LOVarargs; + klass->init = LOInit; + klass->bufferFill = LOBufferFill; + klass->segPoolGen = loSegPoolGen; + klass->totalSize = LOTotalSize; + klass->freeSize = LOFreeSize; + AVERT(PoolClass, klass); +} + + +/* mps_class_lo -- the external interface to get the LO pool class */ + +mps_pool_class_t mps_class_lo(void) +{ + return (mps_pool_class_t)CLASS(LOPool); +} + + +/* LOCheck -- check an LO pool */ + +ATTRIBUTE_UNUSED +static Bool LOCheck(LO lo) +{ + CHECKS(LO, lo); + CHECKC(LOPool, lo); + CHECKD(Pool, &lo->poolStruct); + CHECKC(LOPool, lo); + if (lo->pgen != NULL) { + CHECKL(lo->pgen == &lo->pgenStruct); + CHECKD(PoolGen, lo->pgen); + } + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmfs.c b/mps/code/poolmfs.c new file mode 100644 index 00000000000..684d1481031 --- /dev/null +++ b/mps/code/poolmfs.c @@ -0,0 +1,424 @@ +/* poolmfs.c: MANUAL FIXED SMALL UNIT POOL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This is the implementation of the MFS pool class. + * + * . + * + * .restriction: This pool cannot allocate from the arena control + * pool (as the control pool is an instance of PoolClassMV and MV uses + * MFS in its implementation), nor can it allocate sub-pools, as that + * causes allocation in the control pool. + * + * Notes + * + * .freelist.fragments: The simple freelist policy might lead to poor + * locality of allocation if the list gets fragmented. + * + * .buffer.not: This pool doesn't support fast cache allocation, which + * is a shame. + */ + +#include "mpscmfs.h" +#include "dbgpool.h" +#include "poolmfs.h" +#include "mpm.h" + +SRCID(poolmfs, "$Id$"); + + +/* ROUND -- Round up + * + * Rounds n up to the nearest multiple of unit. + */ + +#define ROUND(unit, n) ((n)+(unit)-1 - ((n)+(unit)-1)%(unit)) + + +/* HeaderStruct -- Freelist structure */ + +typedef struct MFSHeaderStruct { + struct MFSHeaderStruct *next; +} HeaderStruct, *Header; + + +#define UNIT_MIN sizeof(HeaderStruct) + + +/* MFSVarargs -- decode obsolete varargs */ + +static void MFSVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_EXTEND_BY; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_MFS_UNIT_SIZE; + args[1].val.size = va_arg(varargs, Size); + args[2].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + +ARG_DEFINE_KEY(MFS_UNIT_SIZE, Size); +ARG_DEFINE_KEY(MFSExtendSelf, Bool); + +static Res MFSInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Size extendBy = MFS_EXTEND_BY_DEFAULT; + Bool extendSelf = TRUE; + Size unitSize, ringSize, minExtendBy; + MFS mfs; + ArgStruct arg; + Res res; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + ArgRequire(&arg, args, MPS_KEY_MFS_UNIT_SIZE); + unitSize = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY)) + extendBy = arg.val.size; + if (ArgPick(&arg, args, MFSExtendSelf)) + extendSelf = arg.val.b; + + AVER(unitSize > 0); + AVER(extendBy > 0); + AVERT(Bool, extendSelf); + + res = NextMethod(Pool, MFSPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + mfs = CouldBeA(MFSPool, pool); + + mfs->unroundedUnitSize = unitSize; + + if (unitSize < UNIT_MIN) + unitSize = UNIT_MIN; + unitSize = SizeAlignUp(unitSize, MPS_PF_ALIGN); + ringSize = SizeAlignUp(sizeof(RingStruct), MPS_PF_ALIGN); + minExtendBy = ringSize + unitSize; + if (extendBy < minExtendBy) + extendBy = minExtendBy; + + extendBy = SizeArenaGrains(extendBy, arena); + + mfs->extendBy = extendBy; + mfs->extendSelf = extendSelf; + mfs->unitSize = unitSize; + mfs->freeList = NULL; + RingInit(&mfs->extentRing); + mfs->total = 0; + mfs->free = 0; + + SetClassOfPoly(pool, CLASS(MFSPool)); + mfs->sig = MFSSig; + AVERC(MFS, mfs); + + EVENT4(PoolInitMFS, pool, extendBy, BOOLOF(extendSelf), unitSize); + return ResOK; + +failNextInit: + AVER(res != ResOK); + return res; +} + + +void MFSFinishExtents(Pool pool, MFSExtentVisitor visitor, + void *closure) +{ + MFS mfs = MustBeA(MFSPool, pool); + Ring ring, node, next; + + AVER(FUNCHECK(visitor)); + /* Can't check closure */ + + ring = &mfs->extentRing; + node = RingNext(ring); + RING_FOR(node, ring, next) { + Addr base = (Addr)node; /* See .ring-node.at-base. */ + RingRemove(node); + visitor(pool, base, mfs->extendBy, closure); + } +} + + +static void MFSExtentFreeVisitor(Pool pool, Addr base, Size size, + void *closure) +{ + AVER(closure == UNUSED_POINTER); + UNUSED(closure); + ArenaFree(base, size, pool); +} + + +static void MFSFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + MFS mfs = MustBeA(MFSPool, pool); + + MFSFinishExtents(pool, MFSExtentFreeVisitor, UNUSED_POINTER); + + mfs->sig = SigInvalid; + + NextMethod(Inst, MFSPool, finish)(inst); +} + + +void MFSExtend(Pool pool, Addr base, Addr limit) +{ + MFS mfs = MustBeA(MFSPool, pool); + Word i, unitsPerExtent; + Size size; + Size unitSize; + Size ringSize; + Header header = NULL; + Ring mfsRing; + + AVER(base < limit); + AVER(AddrOffset(base, limit) == mfs->extendBy); + + /* Ensure that the memory we're adding belongs to this pool. This is + automatic if it was allocated using ArenaAlloc, but if the memory is + being inserted from elsewhere then it must have been set up correctly. */ + AVER(PoolHasAddr(pool, base)); + + /* .ring-node.at-base: Store the extent ring node at the base of the + extent. This transgresses the rule that pools should allocate + control structures from another pool, because an MFS is required + during bootstrap when no other pools are available. See + */ + mfsRing = (Ring)base; + RingInit(mfsRing); + RingAppend(&mfs->extentRing, mfsRing); + + ringSize = SizeAlignUp(sizeof(RingStruct), MPS_PF_ALIGN); + base = AddrAdd(base, ringSize); + AVER(base < limit); + size = AddrOffset(base, limit); + + /* Update accounting */ + mfs->total += size; + mfs->free += size; + + /* Sew together all the new empty units in the region, working down */ + /* from the top so that they are in ascending order of address on the */ + /* free list. */ + + unitSize = mfs->unitSize; + unitsPerExtent = size/unitSize; + AVER(unitsPerExtent > 0); + +#define SUB(b, s, i) ((Header)AddrAdd(b, (s)*(i))) + + for(i = 0; i < unitsPerExtent; ++i) + { + header = SUB(base, unitSize, unitsPerExtent-i - 1); + AVER(AddrIsAligned(header, pool->alignment)); + AVER(AddrAdd((Addr)header, unitSize) <= AddrAdd(base, size)); + header->next = mfs->freeList; + mfs->freeList = header; + } + +#undef SUB +} + + +/* == Allocate == + * + * Allocation simply involves taking a unit from the front of the freelist + * and returning it. If there are none, a new region is allocated from the + * arena. + */ + +static Res MFSAlloc(Addr *pReturn, Pool pool, Size size) +{ + MFS mfs = MustBeA(MFSPool, pool); + Header f; + Res res; + + AVER(pReturn != NULL); + AVER(size == mfs->unroundedUnitSize); + + f = mfs->freeList; + + /* If the free list is empty then extend the pool with a new region. */ + + if(f == NULL) + { + Addr base; + + /* . */ + if (!mfs->extendSelf) + return ResLIMIT; + + /* Create a new extent and attach it to the pool. */ + res = ArenaAlloc(&base, LocusPrefDefault(), mfs->extendBy, pool); + if(res != ResOK) + return res; + + MFSExtend(pool, base, AddrAdd(base, mfs->extendBy)); + + /* The first unit in the region is now the head of the new free list. */ + f = mfs->freeList; + } + + AVER(f != NULL); + + /* Detach the first free unit from the free list and return its address. */ + + mfs->freeList = f->next; + AVER(mfs->free >= mfs->unitSize); + mfs->free -= mfs->unitSize; + + *pReturn = (Addr)f; + return ResOK; +} + + +/* == Free == + * + * Freeing a unit simply involves pushing it onto the front of the + * freelist. + */ + +static void MFSFree(Pool pool, Addr old, Size size) +{ + MFS mfs = MustBeA(MFSPool, pool); + Header h; + + AVER(old != (Addr)0); + AVER(size == mfs->unroundedUnitSize); + + /* .freelist.fragments */ + h = (Header)old; + h->next = mfs->freeList; + mfs->freeList = h; + mfs->free += mfs->unitSize; +} + + +/* MFSTotalSize -- total memory allocated from the arena */ + +static Size MFSTotalSize(Pool pool) +{ + MFS mfs = MustBeA(MFSPool, pool); + return mfs->total; +} + + +/* MFSFreeSize -- free memory (unused by client program) */ + +static Size MFSFreeSize(Pool pool) +{ + MFS mfs = MustBeA(MFSPool, pool); + return mfs->free; +} + + +static Res MFSDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + MFS mfs = CouldBeA(MFSPool, pool); + Res res; + + if (!TESTC(MFSPool, mfs)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, MFSPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + return WriteF(stream, depth + 2, + "unroundedUnitSize $W\n", (WriteFW)mfs->unroundedUnitSize, + "extendBy $W\n", (WriteFW)mfs->extendBy, + "extendSelf $S\n", WriteFYesNo(mfs->extendSelf), + "unitSize $W\n", (WriteFW)mfs->unitSize, + "freeList $P\n", (WriteFP)mfs->freeList, + "total $W\n", (WriteFW)mfs->total, + "free $W\n", (WriteFW)mfs->free, + NULL); +} + + +DEFINE_CLASS(Pool, MFSPool, klass) +{ + INHERIT_CLASS(klass, MFSPool, AbstractPool); + klass->instClassStruct.describe = MFSDescribe; + klass->instClassStruct.finish = MFSFinish; + klass->size = sizeof(MFSStruct); + klass->varargs = MFSVarargs; + klass->init = MFSInit; + klass->alloc = MFSAlloc; + klass->free = MFSFree; + klass->totalSize = MFSTotalSize; + klass->freeSize = MFSFreeSize; + AVERT(PoolClass, klass); +} + + +PoolClass PoolClassMFS(void) +{ + return CLASS(MFSPool); +} + + +mps_pool_class_t mps_class_mfs(void) +{ + return (mps_pool_class_t)PoolClassMFS(); +} + + +Bool MFSCheck(MFS mfs) +{ + Arena arena; + + CHECKS(MFS, mfs); + CHECKC(MFSPool, mfs); + CHECKD(Pool, MFSPool(mfs)); + CHECKC(MFSPool, mfs); + CHECKL(mfs->unitSize >= UNIT_MIN); + CHECKL(mfs->extendBy >= UNIT_MIN); + CHECKL(BoolCheck(mfs->extendSelf)); + arena = PoolArena(MFSPool(mfs)); + CHECKL(SizeIsArenaGrains(mfs->extendBy, arena)); + CHECKL(SizeAlignUp(mfs->unroundedUnitSize, PoolAlignment(MFSPool(mfs))) == + mfs->unitSize); + CHECKD_NOSIG(Ring, &mfs->extentRing); + CHECKL(mfs->free <= mfs->total); + CHECKL((mfs->total - mfs->free) % mfs->unitSize == 0); + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmfs.h b/mps/code/poolmfs.h new file mode 100644 index 00000000000..78fe319edc9 --- /dev/null +++ b/mps/code/poolmfs.h @@ -0,0 +1,85 @@ +/* poolmfs.h: MANUAL FIXED SMALL UNIT POOL + * + * $Id$ + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * The MFS pool is used to manage small fixed-size chunks of memory. It + * stores control structures in the memory it manages, rather than to one + * side. It therefore achieves better locality for small objects, but + * wastes memory for large objects. It should not be used unless you are + * packing a reasonable number of objects into an arena grain. + * + * Create and Init take the following arguments: + * + * Size extendBy + * + * extendBy is the default number of bytes reserved by the pool at a time. + * A large size will make allocation cheaper but have a higher resource + * overhead. A typical value might be 65536. See note 2. + * + * Size unitSize + * + * unitSize is the size in bytes of the objects you with to allocate. It + * must be larger than the minimum unit size returned by GetInfo, and not + * larger than extendBy. + */ + +#ifndef poolmfs_h +#define poolmfs_h + +#include "mpm.h" +#include "mpscmfs.h" + +typedef struct MFSStruct *MFS; +typedef MFS MFSPool; +DECLARE_CLASS(Pool, MFSPool, AbstractPool); + +#define MFSPool(mfs) (&(mfs)->poolStruct) + +extern PoolClass PoolClassMFS(void); + +extern Bool MFSCheck(MFS mfs); + +extern const struct mps_key_s _mps_key_MFSExtendSelf; +#define MFSExtendSelf (&_mps_key_MFSExtendSelf) +#define MFSExtendSelf_FIELD b + +extern void MFSExtend(Pool pool, Addr base, Addr limit); + +typedef void MFSExtentVisitor(Pool pool, Addr base, Size size, + void *closure); +extern void MFSFinishExtents(Pool pool, MFSExtentVisitor visitor, + void *closure); + +#endif /* poolmfs_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmrg.c b/mps/code/poolmrg.c new file mode 100644 index 00000000000..a898634f7d5 --- /dev/null +++ b/mps/code/poolmrg.c @@ -0,0 +1,892 @@ +/* poolmrg.c: MANUAL RANK GUARDIAN POOL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * + * DESIGN + * + * .design: . + * + * NOTES + * + * .improve.rank: At the moment, the pool is a guardian for the final + * rank. It could be generalized to be a guardian for an arbitrary + * rank (a guardian for RankEXACT would tell you if the object was + * ambiguously referenced, for example). The code that would need to be + * modified bears this tag. + * + * TRANSGRESSIONS + * + * .addr.void-star: Breaks all over the place, + * accessing the segments acquired from SegAlloc with C pointers. It + * would not be practical to use ArenaPeek/Poke everywhere. Blocks + * acquired from ControlAlloc must be directly accessible from C, or else + * none of the pools would work. Therefore, if we implement a variant + * where Addr != void*, we just use the same magic for the control pool + * and MRG pools, whatever that might be. + */ + +#include "ring.h" +#include "mpm.h" +#include "poolmrg.h" + +SRCID(poolmrg, "$Id$"); + + +/* Types */ + +/* enumerate the states of a guardian */ +enum { + MRGGuardianFREE = 1, + MRGGuardianPREFINAL, + MRGGuardianFINAL +}; + + +/* Link -- Unprotectable part of guardian */ + +typedef struct LinkStruct *Link; +typedef struct LinkStruct { + int state; /* Free, Prefinal, Final */ + union LinkStructUnion { + MessageStruct messageStruct; /* state = Final */ + RingStruct linkRing; /* state one of {Free, Prefinal} */ + } the; +} LinkStruct; + +#define linkOfMessage(message) \ + PARENT(LinkStruct, the, PARENT(union LinkStructUnion, messageStruct, (message))) + +#define linkOfRing(ring) \ + PARENT(LinkStruct, the, PARENT(union LinkStructUnion, linkRing, (ring))) + + +/* RefPart -- Protectable part of guardian + * + * This is trivial, but provides a useful abstraction + * at no performance cost. + */ +typedef struct RefPartStruct *RefPart; +typedef struct RefPartStruct { + Ref ref; +} RefPartStruct; + + +/* MRGRefPartRef,MRGRefPartSetRef -- read and write the reference + * using the software barrier + * + * Might be more efficient to take a seg, rather than calculate it + * every time. + * + * See also .ref.direct which accesses it directly. + */ +static Ref MRGRefPartRef(Arena arena, RefPart refPart) +{ + AVER(refPart != NULL); + + return ArenaRead(arena, &refPart->ref); +} + +static void MRGRefPartSetRef(Arena arena, RefPart refPart, Ref ref) +{ + AVER(refPart != NULL); + + ArenaWrite(arena, &refPart->ref, ref); +} + + +/* MRGStruct -- MRG pool structure */ + +#define MRGSig ((Sig)0x519369B0) /* SIGnature MRG POol */ + +typedef struct MRGStruct { + PoolStruct poolStruct; /* generic pool structure */ + RingStruct entryRing; /* */ + RingStruct freeRing; /* */ + RingStruct refRing; /* */ + Size extendBy; /* */ + Sig sig; /* design.mps.sig.field.end.outer */ +} MRGStruct; + +typedef MRG MRGPool; +#define MRGPoolCheck MRGCheck +DECLARE_CLASS(Pool, MRGPool, AbstractPool); + + +/* MRGCheck -- check an MRG pool */ + + +ATTRIBUTE_UNUSED +static Bool MRGCheck(MRG mrg) +{ + Pool pool = CouldBeA(AbstractPool, mrg); + CHECKS(MRG, mrg); + CHECKC(MRGPool, mrg); + CHECKD(Pool, pool); + CHECKC(MRGPool, mrg); + CHECKD_NOSIG(Ring, &mrg->entryRing); + CHECKD_NOSIG(Ring, &mrg->freeRing); + CHECKD_NOSIG(Ring, &mrg->refRing); + CHECKL(mrg->extendBy == ArenaGrainSize(PoolArena(pool))); + return TRUE; +} + + +#define MRGRefSegSig ((Sig)0x51936965) /* SIGnature MRG Ref Seg */ +#define MRGLinkSegSig ((Sig)0x51936915) /* SIGnature MRG Link Seg */ + +typedef struct MRGLinkSegStruct *MRGLinkSeg; +typedef struct MRGRefSegStruct *MRGRefSeg; + +typedef struct MRGLinkSegStruct { + SegStruct segStruct; /* superclass fields must come first */ + MRGRefSeg refSeg; /* */ + Sig sig; /* design.mps.sig.field.end.outer */ +} MRGLinkSegStruct; + +typedef struct MRGRefSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + RingStruct mrgRing; /* */ + MRGLinkSeg linkSeg; /* */ + Sig sig; /* design.mps.sig.field.end.outer */ +} MRGRefSegStruct; + + +/* forward declarations */ + +DECLARE_CLASS(Seg, MRGLinkSeg, Seg); +DECLARE_CLASS(Seg, MRGRefSeg, GCSeg); +static Res mrgRefSegScan(Bool *totalReturn, Seg seg, ScanState ss); + + +/* MRGLinkSegCheck -- check a link segment + * + * .link.nullref: During initialization of a link segment the refSeg + * field will be NULL. This will be initialized when the reference + * segment is initialized. . + */ + +ATTRIBUTE_UNUSED +static Bool MRGLinkSegCheck(MRGLinkSeg linkseg) +{ + Seg seg = CouldBeA(Seg, linkseg); + + CHECKS(MRGLinkSeg, linkseg); + CHECKD(Seg, seg); + if (NULL != linkseg->refSeg) { /* see .link.nullref */ + CHECKU(MRGRefSeg, linkseg->refSeg); + CHECKL(SegPool(seg) == SegPool(CouldBeA(Seg, linkseg->refSeg))); + CHECKL(linkseg->refSeg->linkSeg == linkseg); + } + return TRUE; +} + +ATTRIBUTE_UNUSED +static Bool MRGRefSegCheck(MRGRefSeg refseg) +{ + GCSeg gcseg = CouldBeA(GCSeg, refseg); + Seg seg = CouldBeA(Seg, gcseg); + + CHECKS(MRGRefSeg, refseg); + CHECKD(GCSeg, gcseg); + CHECKL(SegPool(seg) == SegPool(CouldBeA(Seg, refseg->linkSeg))); + CHECKD_NOSIG(Ring, &refseg->mrgRing); + CHECKD(MRGLinkSeg, refseg->linkSeg); + CHECKL(refseg->linkSeg->refSeg == refseg); + return TRUE; +} + + +/* MRGLinkSegInit -- initialise a link segment */ + +static Res MRGLinkSegInit(Seg seg, Pool pool, Addr base, Size size, + ArgList args) +{ + MRGLinkSeg linkseg; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, MRGLinkSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + return res; + linkseg = CouldBeA(MRGLinkSeg, seg); + + /* no useful checks for base and size */ + + linkseg->refSeg = NULL; /* .link.nullref */ + + SetClassOfPoly(seg, CLASS(MRGLinkSeg)); + linkseg->sig = MRGLinkSegSig; + AVERC(MRGLinkSeg, linkseg); + + return ResOK; +} + + +/* MRGLinkSegFinish -- finish a link segment */ + +static void mrgLinkSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + MRGLinkSeg linkseg = MustBeA(MRGLinkSeg, seg); + + linkseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, MRGLinkSeg, finish)(inst); +} + + +/* MRGRefSegInit -- initialise a ref segment */ + +ARG_DEFINE_KEY(mrg_seg_link_seg, Pointer); +#define mrgKeyLinkSeg (&_mps_key_mrg_seg_link_seg) + +static Res MRGRefSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + MRGLinkSeg linkseg; + MRGRefSeg refseg; + MRG mrg = MustBeA(MRGPool, pool); + Res res; + ArgStruct arg; + + /* .ref.initarg: The paired link segment is passed as a keyword + argument when creating the ref segment. Initially the + refSeg field of the link segment is NULL (see .link.nullref). + It's initialized here to the newly initialized ref segment. */ + ArgRequire(&arg, args, mrgKeyLinkSeg); + linkseg = arg.val.p; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, MRGRefSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + return res; + refseg = CouldBeA(MRGRefSeg, seg); + + /* no useful checks for base and size */ + AVERT(MRGLinkSeg, linkseg); + + /* , .improve.rank */ + SegSetRankSet(seg, RankSetSingle(RankFINAL)); + + RingInit(&refseg->mrgRing); + RingAppend(&mrg->refRing, &refseg->mrgRing); + refseg->linkSeg = linkseg; + AVER(NULL == linkseg->refSeg); /* .link.nullref */ + + SetClassOfPoly(seg, CLASS(MRGRefSeg)); + refseg->sig = MRGRefSegSig; + linkseg->refSeg = refseg; /* .ref.initarg */ + + AVERC(MRGRefSeg, refseg); + AVERT(MRGLinkSeg, linkseg); + + return ResOK; +} + + +/* MRGRefSegFinish -- finish a ref segment */ + +static void mrgRefSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + MRGRefSeg refseg = MustBeA(MRGRefSeg, seg); + + refseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, MRGRefSeg, finish)(inst); +} + + +/* MRGLinkSegClass -- Class definition */ + +DEFINE_CLASS(Seg, MRGLinkSeg, klass) +{ + INHERIT_CLASS(klass, MRGLinkSeg, Seg); + SegClassMixInNoSplitMerge(klass); /* no support for this */ + klass->instClassStruct.finish = mrgLinkSegFinish; + klass->size = sizeof(MRGLinkSegStruct); + klass->init = MRGLinkSegInit; + AVERT(SegClass, klass); +} + + +/* MRGRefSegClass -- Class definition */ + +DEFINE_CLASS(Seg, MRGRefSeg, klass) +{ + INHERIT_CLASS(klass, MRGRefSeg, GCSeg); + SegClassMixInNoSplitMerge(klass); /* no support for this */ + klass->instClassStruct.finish = mrgRefSegFinish; + klass->size = sizeof(MRGRefSegStruct); + klass->init = MRGRefSegInit; + klass->scan = mrgRefSegScan; + AVERT(SegClass, klass); +} + + +static Count MRGGuardiansPerSeg(MRG mrg) +{ + Count nGuardians; + AVERT(MRG, mrg); + + nGuardians = mrg->extendBy / sizeof(Ref); + AVER(nGuardians > 0); + + return nGuardians; +} + + +/* */ + + +#define refPartOfIndex(refseg, index) \ + ((RefPart)SegBase(MustBeA(Seg, refseg)) + (index)) + + +static RefPart MRGRefPartOfLink(Link link, Arena arena) +{ + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Bool b; + Link linkBase; + Index indx; + MRGLinkSeg linkseg; + + AVER(link != NULL); /* Better checks done by SegOfAddr */ + + b = SegOfAddr(&seg, arena, (Addr)link); + AVER(b); + AVERC(MRGPool, SegPool(seg)); + linkseg = MustBeA(MRGLinkSeg, seg); + linkBase = (Link)SegBase(seg); + AVER(link >= linkBase); + indx = (Index)(link - linkBase); + AVER(indx < MRGGuardiansPerSeg(MustBeA(MRGPool, SegPool(seg)))); + + return refPartOfIndex(linkseg->refSeg, indx); +} + + +#define linkOfIndex(linkseg, index) \ + ((Link)SegBase(MustBeA(Seg, linkseg)) + (index)) + + +#if 0 +static Link MRGLinkOfRefPart(RefPart refPart, Arena arena) +{ + Seg seg; + Bool b; + RefPart refPartBase; + Index indx; + MRGRefSeg refseg; + + AVER(refPart != NULL); /* Better checks done by SegOfAddr */ + + b = SegOfAddr(&seg, arena, (Addr)refPart); + AVER(b); + AVER(SegPool(seg)->klass == PoolClassMRG()); + refseg = Seg2RefSeg(seg); + AVERT(MRGRefSeg, refseg); + refPartBase = (RefPart)SegBase(seg); + AVER(refPart >= refPartBase); + indx = refPart - refPartBase; + AVER(indx < MRGGuardiansPerSeg(PoolMRG(SegPool(seg)))); + + return linkOfIndex(refseg->linkSeg, indx); +} +#endif + + +/* MRGGuardianInit -- Initialises both parts of a guardian */ + +static void MRGGuardianInit(MRG mrg, Link link, RefPart refPart) +{ + AVERT(MRG, mrg); + AVER(link != NULL); + AVER(refPart != NULL); + + RingInit(&link->the.linkRing); + link->state = MRGGuardianFREE; + RingAppend(&mrg->freeRing, &link->the.linkRing); + /* */ + MRGRefPartSetRef(PoolArena(MustBeA(AbstractPool, mrg)), refPart, 0); +} + + +/* MRGMessage* -- Implementation of MRG's MessageClass */ + + +/* MRGMessageDelete -- deletes the message (frees up the guardian) */ + +static void MRGMessageDelete(Message message) +{ + Pool pool = NULL; /* suppress "may be used uninitialized" */ + Arena arena; + Link link; + Bool b; + + AVERT(Message, message); + + arena = MessageArena(message); + b = PoolOfAddr(&pool, arena, (Addr)message); + AVER(b); + AVERC(MRGPool, pool); + + link = linkOfMessage(message); + AVER(link->state == MRGGuardianFINAL); + MessageFinish(message); + MRGGuardianInit(MustBeA(MRGPool, pool), link, MRGRefPartOfLink(link, arena)); +} + + +/* MRGMessageFinalizationRef -- extract the finalized reference from the msg */ + +static void MRGMessageFinalizationRef(Ref *refReturn, + Arena arena, Message message) +{ + Link link; + Ref ref; + RefPart refPart; + + AVER(refReturn != NULL); + AVERT(Arena, arena); + AVERT(Message, message); + + AVER(MessageGetType(message) == MessageTypeFINALIZATION); + + link = linkOfMessage(message); + AVER(link->state == MRGGuardianFINAL); + refPart = MRGRefPartOfLink(link, arena); + + ref = MRGRefPartRef(arena, refPart); + AVER(ref != 0); + *refReturn = ref; +} + + +static MessageClassStruct MRGMessageClassStruct = { + MessageClassSig, /* sig */ + "MRGFinal", /* name */ + MessageTypeFINALIZATION, /* Message Type */ + MRGMessageDelete, /* Delete */ + MRGMessageFinalizationRef, /* FinalizationRef */ + MessageNoGCLiveSize, /* GCLiveSize */ + MessageNoGCCondemnedSize, /* GCCondemnedSize */ + MessageNoGCNotCondemnedSize, /* GCNotCondemnedSize */ + MessageNoGCStartWhy, /* GCStartWhy */ + MessageClassSig /* */ +}; + + +/* MRGSegPairDestroy --- Destroys a pair of segments (link & ref) + * + * .segpair.destroy: We don't worry about the effect that destroying + * these segs has on any of the pool rings. + */ +static void MRGSegPairDestroy(MRGRefSeg refseg) +{ + RingRemove(&refseg->mrgRing); + RingFinish(&refseg->mrgRing); + SegFree(MustBeA(Seg, refseg->linkSeg)); + SegFree(MustBeA(Seg, refseg)); +} + + +/* MRGSegPairCreate -- create a pair of segments (link & ref) */ + +static Res MRGSegPairCreate(MRGRefSeg *refSegReturn, MRG mrg) +{ + Pool pool = MustBeA(AbstractPool, mrg); + Arena arena = PoolArena(pool); + RefPart refPartBase; + Count nGuardians; /* guardians per seg */ + Index i; + Link linkBase; + Res res; + Seg segLink, segRefPart; + MRGLinkSeg linkseg; + MRGRefSeg refseg; + Size linkSegSize; + + AVER(refSegReturn != NULL); + + nGuardians = MRGGuardiansPerSeg(mrg); + linkSegSize = nGuardians * sizeof(LinkStruct); + linkSegSize = SizeArenaGrains(linkSegSize, arena); + + res = SegAlloc(&segLink, CLASS(MRGLinkSeg), + LocusPrefDefault(), linkSegSize, pool, + argsNone); + if (res != ResOK) + goto failLinkSegAlloc; + linkseg = MustBeA(MRGLinkSeg, segLink); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD_FIELD(args, mrgKeyLinkSeg, p, linkseg); /* .ref.initarg */ + res = SegAlloc(&segRefPart, CLASS(MRGRefSeg), + LocusPrefDefault(), mrg->extendBy, pool, + args); + } MPS_ARGS_END(args); + if (res != ResOK) + goto failRefPartSegAlloc; + refseg = MustBeA(MRGRefSeg, segRefPart); + + linkBase = (Link)SegBase(segLink); + refPartBase = (RefPart)SegBase(segRefPart); + + for(i = 0; i < nGuardians; ++i) + MRGGuardianInit(mrg, linkBase + i, refPartBase + i); + AVER((Addr)(&linkBase[i]) <= SegLimit(segLink)); + AVER((Addr)(&refPartBase[i]) <= SegLimit(segRefPart)); + + *refSegReturn = refseg; + + return ResOK; + +failRefPartSegAlloc: + SegFree(segLink); +failLinkSegAlloc: + return res; +} + + +/* MRGFinalize -- finalize the indexth guardian in the segment */ + +static void MRGFinalize(Arena arena, MRGLinkSeg linkseg, Index indx) +{ + Link link; + Message message; + + AVER(indx < MRGGuardiansPerSeg(MustBeA(MRGPool, SegPool(MustBeA(Seg, linkseg))))); + + link = linkOfIndex(linkseg, indx); + + /* only finalize it if it hasn't been finalized already */ + if (link->state != MRGGuardianFINAL) { + AVER(link->state == MRGGuardianPREFINAL); + RingRemove(&link->the.linkRing); + RingFinish(&link->the.linkRing); + link->state = MRGGuardianFINAL; + message = &link->the.messageStruct; + MessageInit(arena, message, &MRGMessageClassStruct, MessageTypeFINALIZATION); + MessagePost(arena, message); + } +} + + +static Res mrgRefSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + MRGRefSeg refseg = MustBeA(MRGRefSeg, seg); + Pool pool = SegPool(seg); + MRG mrg = MustBeA(MRGPool, pool); + + Res res; + Arena arena; + MRGLinkSeg linkseg; + RefPart refPart; + Index i; + Count nGuardians; + + AVERT(ScanState, ss); + + arena = PoolArena(pool); + linkseg = refseg->linkSeg; + + nGuardians = MRGGuardiansPerSeg(mrg); + AVER(nGuardians > 0); + TRACE_SCAN_BEGIN(ss) { + for(i=0; i < nGuardians; ++i) { + refPart = refPartOfIndex(refseg, i); + + /* free guardians are not scanned */ + if (linkOfIndex(linkseg, i)->state != MRGGuardianFREE) { + ss->wasMarked = TRUE; + /* .ref.direct: We can access the reference directly */ + /* because we are in a scan and the shield is exposed. */ + if (TRACE_FIX1(ss, refPart->ref)) { + res = TRACE_FIX2(ss, &(refPart->ref)); + if (res != ResOK) { + *totalReturn = FALSE; + return res; + } + + if (ss->rank == RankFINAL && !ss->wasMarked) { /* .improve.rank */ + MRGFinalize(arena, linkseg, i); + } + } + ss->scannedSize += sizeof *refPart; + } + } + } TRACE_SCAN_END(ss); + + *totalReturn = TRUE; + return ResOK; +} + + +/* MRGInit -- init method for MRG */ + +static Res MRGInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + MRG mrg; + Res res; + + AVER(pool != NULL); + AVERT(ArgList, args); + UNUSED(args); + UNUSED(klass); /* used for debug pools only */ + + res = NextMethod(Pool, MRGPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + mrg = CouldBeA(MRGPool, pool); + + RingInit(&mrg->entryRing); + RingInit(&mrg->freeRing); + RingInit(&mrg->refRing); + mrg->extendBy = ArenaGrainSize(PoolArena(pool)); + + SetClassOfPoly(pool, CLASS(MRGPool)); + mrg->sig = MRGSig; + AVERC(MRGPool, mrg); + + return ResOK; + +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* MRGFinish -- finish a MRG pool */ + +static void MRGFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + MRG mrg = MustBeA(MRGPool, pool); + Ring node, nextNode; + + /* .finish.ring: Before destroying the segments, we isolate the */ + /* rings in the pool structure. The problem we are avoiding here */ + /* is when the rings point to memory that has been unmapped by one */ + /* segPairDestroy and a subsequent segPairDestroy calls MRGCheck which */ + /* checks the rings which causes the program to fault because */ + /* RingCheck will access unmapped memory. */ + + /* We call RingRemove on the master node for the rings, thereby */ + /* effectively emptying them, but leaving the rest of the ring */ + /* "dangling". This is okay as we are about to destroy all the */ + /* segments so the contents of the rings will disappear soon. */ + + /* .finish.no-final: Note that this relies on the fact that no */ + /* Guardians are in the FINAL state and hence on the Arena Message */ + /* Queue. We are guaranteed this because MRGFinish is only called */ + /* from ArenaDestroy, and the message queue has been emptied prior */ + /* to the call. See */ + + if (!RingIsSingle(&mrg->entryRing)) { + RingRemove(&mrg->entryRing); + } + if (!RingIsSingle(&mrg->freeRing)) { + RingRemove(&mrg->freeRing); + } + + RING_FOR(node, &mrg->refRing, nextNode) { + MRGRefSeg refseg = RING_ELT(MRGRefSeg, mrgRing, node); + MRGSegPairDestroy(refseg); + } + + mrg->sig = SigInvalid; + RingFinish(&mrg->refRing); + /* */ + + NextMethod(Inst, MRGPool, finish)(inst); +} + + +/* MRGRegister -- register an object for finalization */ + +Res MRGRegister(Pool pool, Ref ref) +{ + MRG mrg = MustBeA(MRGPool, pool); + Arena arena = PoolArena(pool); + Ring freeNode; + Link link; + RefPart refPart; + Res res; + MRGRefSeg junk; /* unused */ + + AVER(ref != 0); + + /* */ + if (RingIsSingle(&mrg->freeRing)) { + res = MRGSegPairCreate(&junk, mrg); + if (res != ResOK) + return res; + } + AVER(!RingIsSingle(&mrg->freeRing)); + freeNode = RingNext(&mrg->freeRing); + + link = linkOfRing(freeNode); + AVER(link->state == MRGGuardianFREE); + /* */ + RingRemove(freeNode); + link->state = MRGGuardianPREFINAL; + RingAppend(&mrg->entryRing, freeNode); + + /* */ + refPart = MRGRefPartOfLink(link, arena); + MRGRefPartSetRef(arena, refPart, ref); + + return ResOK; +} + + +/* MRGDeregister -- deregister (once) an object for finalization + * + * TODO: Definalization loops over all finalizable objects in the heap, + * and so using it could accidentally be disastrous for performance. + * See job003953 and back out changelist 187123 if this is fixed. + */ + +Res MRGDeregister(Pool pool, Ref obj) +{ + MRG mrg = MustBeA(MRGPool, pool); + Arena arena = PoolArena(pool); + Ring node, nextNode; + Count nGuardians; /* guardians per seg */ + + /* Can't check obj */ + + nGuardians = MRGGuardiansPerSeg(mrg); + + /* map over the segments */ + RING_FOR(node, &mrg->refRing, nextNode) { + MRGRefSeg refSeg = RING_ELT(MRGRefSeg, mrgRing, node); + MRGLinkSeg linkSeg; + Count i; + Link linkBase; + RefPart refPartBase; + + AVERT(MRGRefSeg, refSeg); + linkSeg = refSeg->linkSeg; + linkBase = (Link)SegBase(MustBeA(Seg, linkSeg)); + refPartBase = (RefPart)SegBase(MustBeA(Seg, refSeg)); + /* map over each guardian in the segment */ + for (i = 0; i < nGuardians; ++i) { + Link link = linkBase + i; + RefPart refPart = refPartBase + i; + /* check if it's allocated and points to obj */ + if (link->state == MRGGuardianPREFINAL + && MRGRefPartRef(arena, refPart) == obj) { + RingRemove(&link->the.linkRing); + RingFinish(&link->the.linkRing); + MRGGuardianInit(mrg, link, refPart); + return ResOK; + } + } + } + return ResFAIL; +} + + +/* MRGDescribe -- describe an MRG pool + * + * This could be improved by implementing MRGSegDescribe + * and having MRGDescribe iterate over all the pool's segments. + */ + +static Res MRGDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + MRG mrg = CouldBeA(MRGPool, pool); + Arena arena; + Ring node, nextNode; + RefPart refPart; + Res res; + + if (!TESTC(MRGPool, mrg)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, MRGPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, "extendBy $W\n", (WriteFW)mrg->extendBy, NULL); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, "Entry queue:\n", NULL); + if (res != ResOK) + return res; + arena = PoolArena(pool); + RING_FOR(node, &mrg->entryRing, nextNode) { + Bool outsideShield = !ArenaShield(arena)->inside; + refPart = MRGRefPartOfLink(linkOfRing(node), arena); + if (outsideShield) { + ShieldEnter(arena); + } + res = WriteF(stream, depth + 2, "at $A Ref $A\n", + (WriteFA)refPart, (WriteFA)MRGRefPartRef(arena, refPart), + NULL); + if (outsideShield) { + ShieldLeave(arena); + } + if (res != ResOK) + return res; + } + + return ResOK; +} + + +DEFINE_CLASS(Pool, MRGPool, klass) +{ + INHERIT_CLASS(klass, MRGPool, AbstractPool); + klass->instClassStruct.describe = MRGDescribe; + klass->instClassStruct.finish = MRGFinish; + klass->size = sizeof(MRGStruct); + klass->init = MRGInit; + AVERT(PoolClass, klass); +} + + +PoolClass PoolClassMRG(void) +{ + return CLASS(MRGPool); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmrg.h b/mps/code/poolmrg.h new file mode 100644 index 00000000000..7c9f48df285 --- /dev/null +++ b/mps/code/poolmrg.h @@ -0,0 +1,49 @@ +/* poolmrg.h: MANUAL RANK GUARDIAN POOL CLASS INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + */ + +#ifndef poolmrg_h +#define poolmrg_h + +#include "mpmtypes.h" + +typedef struct MRGStruct *MRG; + +extern PoolClass PoolClassMRG(void); +extern Res MRGRegister(Pool, Ref); +extern Res MRGDeregister(Pool, Ref); + +#endif /* poolmrg_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmv2.c b/mps/code/poolmv2.c new file mode 100644 index 00000000000..13a4af7cccf --- /dev/null +++ b/mps/code/poolmv2.c @@ -0,0 +1,1386 @@ +/* poolmv2.c: MANUAL VARIABLE-SIZED TEMPORAL POOL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: A manual-variable pool designed to take advantage of + * placement according to predicted deathtime. + * + * .design: . + */ + +#include "mpm.h" +#include "poolmv2.h" +#include "mpscmvt.h" +#include "abq.h" +#include "cbs.h" +#include "failover.h" +#include "freelist.h" +#include "meter.h" +#include "range.h" + +SRCID(poolmv2, "$Id$"); + + +/* Signatures */ + +#define MVTSig ((Sig)0x5193F299) /* SIGnature MVT */ + + +/* Private prototypes */ + +typedef struct MVTStruct *MVT; +static void MVTVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs); +static Res MVTInit(Pool pool, Arena arena, PoolClass klass, ArgList arg); +static Bool MVTCheck(MVT mvt); +static void MVTFinish(Inst inst); +static Res MVTBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size minSize); +static void MVTBufferEmpty(Pool pool, Buffer buffer); +static void MVTFree(Pool pool, Addr base, Size size); +static Res MVTDescribe(Inst inst, mps_lib_FILE *stream, Count depth); +static Size MVTTotalSize(Pool pool); +static Size MVTFreeSize(Pool pool); +static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size); + +static void MVTSegFree(MVT mvt, Seg seg); +static Bool MVTReturnSegs(MVT mvt, Range range, Arena arena); +static Res MVTInsert(MVT mvt, Addr base, Addr limit); +static Res MVTDelete(MVT mvt, Addr base, Addr limit); +static void MVTRefillABQIfEmpty(MVT mvt, Size size); +static Res MVTContingencySearch(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size min); +static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena); +static ABQ MVTABQ(MVT mvt); +static Land MVTFreePrimary(MVT mvt); +static Land MVTFreeSecondary(MVT mvt); +static Land MVTFreeLand(MVT mvt); + +typedef MVT MVTPool; +DECLARE_CLASS(Pool, MVTPool, AbstractBufferPool); + + +/* Types */ + +typedef struct MVTStruct +{ + PoolStruct poolStruct; + CBSStruct cbsStruct; /* The coalescing block structure */ + FreelistStruct flStruct; /* The emergency free list structure */ + FailoverStruct foStruct; /* The fail-over mechanism */ + ABQStruct abqStruct; /* The available block queue */ + /* */ + Size minSize; /* Pool parameter */ + Size meanSize; /* Pool parameter */ + Size maxSize; /* Pool parameter */ + Count fragLimit; /* Pool parameter */ + /* */ + Size reuseSize; /* Size at which blocks are recycled */ + /* */ + Size fillSize; /* Size of pool segments */ + /* */ + Size availLimit; /* Limit on available */ + /* */ + Bool abqOverflow; /* ABQ dropped some candidates */ + /* */ + Bool splinter; /* Saved splinter */ + Addr splinterBase; /* Saved splinter base */ + Addr splinterLimit; /* Saved splinter size */ + + /* pool accounting --- one of these first four is redundant, but + size and available are used to implement fragmentation policy */ + Size size; /* size of segs in pool */ + Size allocated; /* bytes allocated to mutator */ + Size available; /* bytes available for allocation */ + Size unavailable; /* bytes lost to fragmentation */ + + /* pool meters*/ + METER_DECL(segAllocs) + METER_DECL(segFrees) + METER_DECL(bufferFills) + METER_DECL(bufferEmpties) + METER_DECL(poolFrees) + METER_DECL(poolSize) + METER_DECL(poolAllocated) + METER_DECL(poolAvailable) + METER_DECL(poolUnavailable) + METER_DECL(poolUtilization) + /* abq meters */ + METER_DECL(finds) + METER_DECL(overflows) + METER_DECL(underflows) + METER_DECL(refills) + METER_DECL(refillPushes) + METER_DECL(returns) + /* fragmentation meters */ + METER_DECL(perfectFits) + METER_DECL(firstFits) + METER_DECL(secondFits) + METER_DECL(failures) + /* contingency meters */ + METER_DECL(emergencyContingencies) + METER_DECL(fragLimitContingencies) + METER_DECL(contingencySearches) + METER_DECL(contingencyHardSearches) + /* splinter meters */ + METER_DECL(splinters) + METER_DECL(splintersUsed) + METER_DECL(splintersDropped) + METER_DECL(sawdust) + /* exception meters */ + METER_DECL(exceptions) + METER_DECL(exceptionSplinters) + METER_DECL(exceptionReturns) + + Sig sig; /* design.mps.sig.field.end.outer */ +} MVTStruct; + + +DEFINE_CLASS(Pool, MVTPool, klass) +{ + INHERIT_CLASS(klass, MVTPool, AbstractBufferPool); + klass->instClassStruct.describe = MVTDescribe; + klass->instClassStruct.finish = MVTFinish; + klass->size = sizeof(MVTStruct); + klass->varargs = MVTVarargs; + klass->init = MVTInit; + klass->free = MVTFree; + klass->bufferFill = MVTBufferFill; + klass->bufferEmpty = MVTBufferEmpty; + klass->totalSize = MVTTotalSize; + klass->freeSize = MVTFreeSize; + AVERT(PoolClass, klass); +} + +/* Macros */ + +#define PoolMVT(pool) PARENT(MVTStruct, poolStruct, pool) +#define MVTPool(mvt) (&(mvt)->poolStruct) + + +/* Accessors */ + + +static ABQ MVTABQ(MVT mvt) +{ + return &mvt->abqStruct; +} + + +static Land MVTFreePrimary(MVT mvt) +{ + return CBSLand(&mvt->cbsStruct); +} + + +static Land MVTFreeSecondary(MVT mvt) +{ + return FreelistLand(&mvt->flStruct); +} + + +static Land MVTFreeLand(MVT mvt) +{ + return FailoverLand(&mvt->foStruct); +} + + +/* Methods */ + + +/* MVTVarargs -- decode obsolete varargs */ + +static void MVTVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_MIN_SIZE; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_MEAN_SIZE; + args[1].val.size = va_arg(varargs, Size); + args[2].key = MPS_KEY_MAX_SIZE; + args[2].val.size = va_arg(varargs, Size); + args[3].key = MPS_KEY_MVT_RESERVE_DEPTH; + args[3].val.count = va_arg(varargs, Count); + /* Divide the old "percentage" argument by 100, fixing job003319. */ + args[4].key = MPS_KEY_MVT_FRAG_LIMIT; + args[4].val.d = (double)va_arg(varargs, Count) / 100.0; + args[5].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* MVTInit -- initialize an MVT pool + * + * Parameters are: + * minSize, meanSize, maxSize, reserveDepth, fragLimit + */ + +ARG_DEFINE_KEY(MVT_MIN_SIZE, Size); +ARG_DEFINE_KEY(MVT_MEAN_SIZE, Size); +ARG_DEFINE_KEY(MVT_MAX_SIZE, Size); +ARG_DEFINE_KEY(MVT_RESERVE_DEPTH, Count); +ARG_DEFINE_KEY(MVT_FRAG_LIMIT, double); + +static Res MVTInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Size align = MVT_ALIGN_DEFAULT; + Size minSize = MVT_MIN_SIZE_DEFAULT; + Size meanSize = MVT_MEAN_SIZE_DEFAULT; + Size maxSize = MVT_MAX_SIZE_DEFAULT; + Count reserveDepth = MVT_RESERVE_DEPTH_DEFAULT; + Count fragLimit = MVT_FRAG_LIMIT_DEFAULT; + Size reuseSize, fillSize; + Count abqDepth; + MVT mvt; + Res res; + ArgStruct arg; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + if (ArgPick(&arg, args, MPS_KEY_ALIGN)) + align = arg.val.align; + if (ArgPick(&arg, args, MPS_KEY_MIN_SIZE)) + minSize = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE)) + meanSize = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE)) + maxSize = arg.val.size; + if (ArgPick(&arg, args, MPS_KEY_MVT_RESERVE_DEPTH)) + reserveDepth = arg.val.count; + if (ArgPick(&arg, args, MPS_KEY_MVT_FRAG_LIMIT)) { + /* pending complete fix for job003319 */ + AVER(0 <= arg.val.d); + AVER(arg.val.d <= 1); + fragLimit = (Count)(arg.val.d * 100); + } + + AVERT(Align, align); + /* This restriction on the alignment is necessary because of the use + of a Freelist to store the free address ranges in low-memory + situations. . */ + AVER(AlignIsAligned(align, FreelistMinimumAlignment)); + AVER(align <= ArenaGrainSize(arena)); + AVER(0 < minSize); + AVER(minSize <= meanSize); + AVER(meanSize <= maxSize); + AVER(reserveDepth > 0); + AVER(fragLimit <= 100); + /* TODO: More parameter checks possible? */ + + /* see */ + fillSize = SizeArenaGrains(maxSize, arena); + /* see */ + reuseSize = 2 * fillSize; + abqDepth = (reserveDepth * meanSize + reuseSize - 1) / reuseSize; + /* keep the abq from being useless */ + if (abqDepth < 3) + abqDepth = 3; + + res = NextMethod(Pool, MVTPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + mvt = CouldBeA(MVTPool, pool); + + res = LandInit(MVTFreePrimary(mvt), CLASS(CBSFast), arena, align, mvt, + mps_args_none); + if (res != ResOK) + goto failFreePrimaryInit; + + res = LandInit(MVTFreeSecondary(mvt), CLASS(Freelist), arena, align, + mvt, mps_args_none); + if (res != ResOK) + goto failFreeSecondaryInit; + + MPS_ARGS_BEGIN(foArgs) { + MPS_ARGS_ADD(foArgs, FailoverPrimary, MVTFreePrimary(mvt)); + MPS_ARGS_ADD(foArgs, FailoverSecondary, MVTFreeSecondary(mvt)); + res = LandInit(MVTFreeLand(mvt), CLASS(Failover), arena, align, mvt, + foArgs); + } MPS_ARGS_END(foArgs); + if (res != ResOK) + goto failFreeLandInit; + + res = ABQInit(arena, MVTABQ(mvt), (void *)mvt, abqDepth, sizeof(RangeStruct)); + if (res != ResOK) + goto failABQInit; + + pool->alignment = align; + pool->alignShift = SizeLog2(pool->alignment); + mvt->reuseSize = reuseSize; + mvt->fillSize = fillSize; + mvt->abqOverflow = FALSE; + mvt->minSize = minSize; + mvt->meanSize = meanSize; + mvt->maxSize = maxSize; + mvt->fragLimit = fragLimit; + mvt->splinter = FALSE; + mvt->splinterBase = (Addr)0; + mvt->splinterLimit = (Addr)0; + + /* accounting */ + mvt->size = 0; + mvt->allocated = 0; + mvt->available = 0; + mvt->availLimit = 0; + mvt->unavailable = 0; + + /* meters*/ + METER_INIT(mvt->segAllocs, "segment allocations", (void *)mvt); + METER_INIT(mvt->segFrees, "segment frees", (void *)mvt); + METER_INIT(mvt->bufferFills, "buffer fills", (void *)mvt); + METER_INIT(mvt->bufferEmpties, "buffer empties", (void *)mvt); + METER_INIT(mvt->poolFrees, "pool frees", (void *)mvt); + METER_INIT(mvt->poolSize, "pool size", (void *)mvt); + METER_INIT(mvt->poolAllocated, "pool allocated", (void *)mvt); + METER_INIT(mvt->poolAvailable, "pool available", (void *)mvt); + METER_INIT(mvt->poolUnavailable, "pool unavailable", (void *)mvt); + METER_INIT(mvt->poolUtilization, "pool utilization", (void *)mvt); + METER_INIT(mvt->finds, "ABQ finds", (void *)mvt); + METER_INIT(mvt->overflows, "ABQ overflows", (void *)mvt); + METER_INIT(mvt->underflows, "ABQ underflows", (void *)mvt); + METER_INIT(mvt->refills, "ABQ refills", (void *)mvt); + METER_INIT(mvt->refillPushes, "ABQ refill pushes", (void *)mvt); + METER_INIT(mvt->returns, "ABQ returns", (void *)mvt); + METER_INIT(mvt->perfectFits, "perfect fits", (void *)mvt); + METER_INIT(mvt->firstFits, "first fits", (void *)mvt); + METER_INIT(mvt->secondFits, "second fits", (void *)mvt); + METER_INIT(mvt->failures, "failures", (void *)mvt); + METER_INIT(mvt->emergencyContingencies, "emergency contingencies", + (void *)mvt); + METER_INIT(mvt->fragLimitContingencies, + "fragmentation limit contingencies", (void *)mvt); + METER_INIT(mvt->contingencySearches, "contingency searches", (void *)mvt); + METER_INIT(mvt->contingencyHardSearches, + "contingency hard searches", (void *)mvt); + METER_INIT(mvt->splinters, "splinters", (void *)mvt); + METER_INIT(mvt->splintersUsed, "splinters used", (void *)mvt); + METER_INIT(mvt->splintersDropped, "splinters dropped", (void *)mvt); + METER_INIT(mvt->sawdust, "sawdust", (void *)mvt); + METER_INIT(mvt->exceptions, "exceptions", (void *)mvt); + METER_INIT(mvt->exceptionSplinters, "exception splinters", (void *)mvt); + METER_INIT(mvt->exceptionReturns, "exception returns", (void *)mvt); + + SetClassOfPoly(pool, CLASS(MVTPool)); + mvt->sig = MVTSig; + AVERC(MVT, mvt); + + EVENT6(PoolInitMVT, pool, minSize, meanSize, maxSize, + reserveDepth, fragLimit); + + return ResOK; + +failABQInit: + LandFinish(MVTFreeLand(mvt)); +failFreeLandInit: + LandFinish(MVTFreeSecondary(mvt)); +failFreeSecondaryInit: + LandFinish(MVTFreePrimary(mvt)); +failFreePrimaryInit: + NextMethod(Inst, MVTPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* MVTCheck -- validate an MVT Pool */ + +ATTRIBUTE_UNUSED +static Bool MVTCheck(MVT mvt) +{ + CHECKS(MVT, mvt); + CHECKC(MVTPool, mvt); + CHECKD(Pool, MVTPool(mvt)); + CHECKC(MVTPool, mvt); + CHECKD(CBS, &mvt->cbsStruct); + CHECKD(ABQ, &mvt->abqStruct); + CHECKD(Freelist, &mvt->flStruct); + CHECKD(Failover, &mvt->foStruct); + CHECKL(mvt->reuseSize >= 2 * mvt->fillSize); + CHECKL(mvt->fillSize >= mvt->maxSize); + CHECKL(mvt->maxSize >= mvt->meanSize); + CHECKL(mvt->meanSize >= mvt->minSize); + CHECKL(mvt->minSize > 0); + CHECKL(mvt->fragLimit <= 100); + CHECKL(mvt->availLimit == mvt->size * mvt->fragLimit / 100); + CHECKL(BoolCheck(mvt->abqOverflow)); + CHECKL(BoolCheck(mvt->splinter)); + if (mvt->splinter) { + CHECKL(AddrOffset(mvt->splinterBase, mvt->splinterLimit) >= + mvt->minSize); + CHECKL(mvt->splinterBase < mvt->splinterLimit); + } + CHECKL(mvt->size == mvt->allocated + mvt->available + + mvt->unavailable); + /* --- could check that sum of segment sizes == mvt->size */ + /* --- check meters? */ + + return TRUE; +} + + +/* MVTFinish -- finish an MVT pool + */ +static void MVTFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + MVT mvt = MustBeA(MVTPool, pool); + Arena arena = PoolArena(pool); + Ring ring; + Ring node, nextNode; + + AVERT(MVT, mvt); + + mvt->sig = SigInvalid; + + /* Free the segments in the pool */ + ring = PoolSegRing(pool); + RING_FOR(node, ring, nextNode) { + /* We mustn't call MVTSegFree, because we don't know whether or not + * there was any fragmented (unavailable) space in this segment, + * and so we can't keep the accounting correct. */ + SegFree(SegOfPoolRing(node)); + } + + /* Finish the ABQ, Failover, Freelist and CBS structures */ + ABQFinish(arena, MVTABQ(mvt)); + LandFinish(MVTFreeLand(mvt)); + LandFinish(MVTFreeSecondary(mvt)); + LandFinish(MVTFreePrimary(mvt)); + + NextMethod(Inst, MVTPool, finish)(inst); +} + + +/* SURELY(expr) -- evaluate expr and AVER that the result is true */ + +#define SURELY(expr) \ + BEGIN \ + Bool _b = (expr); \ + AVER(_b); \ + UNUSED(_b); \ + END + + +/* MUST(expr) -- evaluate expr and AVER that the result is ResOK */ + +#define MUST(expr) \ + BEGIN \ + Res _res = (expr); \ + AVER(_res == ResOK); \ + UNUSED(_res); \ + END + + +/* MVTNoteFill -- record that a buffer fill has occurred */ + +static void MVTNoteFill(MVT mvt, Addr base, Addr limit, Size minSize) +{ + mvt->available -= AddrOffset(base, limit); + mvt->allocated += AddrOffset(base, limit); + AVER(mvt->size == mvt->allocated + mvt->available + mvt->unavailable); + METER_ACC(mvt->poolUtilization, mvt->allocated * 100 / mvt->size); + METER_ACC(mvt->poolUnavailable, mvt->unavailable); + METER_ACC(mvt->poolAvailable, mvt->available); + METER_ACC(mvt->poolAllocated, mvt->allocated); + METER_ACC(mvt->poolSize, mvt->size); + METER_ACC(mvt->bufferFills, AddrOffset(base, limit)); + AVER(AddrOffset(base, limit) >= minSize); +} + + +/* MVTOversizeFill -- try to fill a request for a large object + * + * When a request exceeds mvt->fillSize, we allocate it on a segment of + * its own. + */ +static Res MVTOversizeFill(Addr *baseReturn, + Addr *limitReturn, + MVT mvt, + Size minSize) +{ + Res res; + Seg seg; + Addr base, limit; + Size alignedSize; + + alignedSize = SizeArenaGrains(minSize, PoolArena(MVTPool(mvt))); + + res = MVTSegAlloc(&seg, mvt, alignedSize); + if (res != ResOK) + return res; + + /* Just exactly fill the buffer so that only this allocation comes from + the segment. */ + base = SegBase(seg); + limit = AddrAdd(SegBase(seg), minSize); + + /* The rest of the segment was lost to fragmentation, so transfer it + * to the unavailable total. (We deliberately lose these fragments + * now so as to avoid the more severe fragmentation that we believe + * would result if we used these for allocation. See + * and + * .) + */ + mvt->available -= alignedSize - minSize; + mvt->unavailable += alignedSize - minSize; + + METER_ACC(mvt->exceptions, minSize); + METER_ACC(mvt->exceptionSplinters, alignedSize - minSize); + + MVTNoteFill(mvt, base, limit, minSize); + *baseReturn = base; + *limitReturn = limit; + return ResOK; +} + + +/* MVTSplinterFill -- try to fill a request from the splinter */ + +static Bool MVTSplinterFill(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size minSize) +{ + Addr base, limit; + + if (!mvt->splinter || + AddrOffset(mvt->splinterBase, mvt->splinterLimit) < minSize) + return FALSE; + + base = mvt->splinterBase; + limit = mvt->splinterLimit; + mvt->splinter = FALSE; + + METER_ACC(mvt->splintersUsed, AddrOffset(base, limit)); + + MVTNoteFill(mvt, base, limit, minSize); + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* MVTOneSegOnly -- restrict a buffer fill to a single segment + * + * After a block has been found, this is applied so that the block + * used to fill the buffer does not span multiple segments. (This + * makes it more likely that when we free the objects that were + * allocated from the block, that this will free the whole segment, + * and so we'll be able to return the segment to the arena. A block + * that spanned two segments would keep both segments allocated, + * possibly unnecessarily.) + */ +static void MVTOneSegOnly(Addr *baseIO, Addr *limitIO, MVT mvt, Size minSize) +{ + Addr base, limit, segLimit; + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Arena arena; + + base = *baseIO; + limit = *limitIO; + + arena = PoolArena(MVTPool(mvt)); + + SURELY(SegOfAddr(&seg, arena, base)); + segLimit = SegLimit(seg); + if (limit <= segLimit) { + /* perfect fit */ + METER_ACC(mvt->perfectFits, AddrOffset(base, limit)); + } else if (AddrOffset(base, segLimit) >= minSize) { + /* fit in 1st segment */ + limit = segLimit; + METER_ACC(mvt->firstFits, AddrOffset(base, limit)); + } else { + /* fit in 2nd segment */ + base = segLimit; + SURELY(SegOfAddr(&seg, arena, base)); + segLimit = SegLimit(seg); + if (limit > segLimit) + limit = segLimit; + METER_ACC(mvt->secondFits, AddrOffset(base, limit)); + } + + *baseIO = base; + *limitIO = limit; +} + + +/* MVTABQFill -- try to fill a request from the available block queue */ + +static Bool MVTABQFill(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size minSize) +{ + Addr base, limit; + RangeStruct range; + Res res; + + MVTRefillABQIfEmpty(mvt, minSize); + + if (!ABQPeek(MVTABQ(mvt), &range)) + return FALSE; + /* Check that the range was stored and retrieved correctly by the ABQ. */ + AVERT(Range, &range); + + base = RangeBase(&range); + limit = RangeLimit(&range); + MVTOneSegOnly(&base, &limit, mvt, minSize); + + METER_ACC(mvt->finds, minSize); + + res = MVTDelete(mvt, base, limit); + if (res != ResOK) { + return FALSE; + } + + MVTNoteFill(mvt, base, limit, minSize); + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* MVTContingencyFill -- try to fill a request from the free lists */ +static Bool MVTContingencyFill(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size minSize) +{ + Res res; + Addr base, limit; + + if (!MVTContingencySearch(&base, &limit, mvt, minSize)) + return FALSE; + + MVTOneSegOnly(&base, &limit, mvt, minSize); + + res = MVTDelete(mvt, base, limit); + if (res != ResOK) + return FALSE; + + MVTNoteFill(mvt, base, limit, minSize); + *baseReturn = base; + *limitReturn = limit; + return TRUE; +} + + +/* MVTSegFill -- try to fill a request with a new segment */ + +static Res MVTSegFill(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size fillSize, + Size minSize) +{ + Res res; + Seg seg; + Addr base, limit; + + res = MVTSegAlloc(&seg, mvt, fillSize); + if (res != ResOK) + return res; + + base = SegBase(seg); + limit = SegLimit(seg); + + MVTNoteFill(mvt, base, limit, minSize); + *baseReturn = base; + *limitReturn = limit; + return ResOK; +} + + +/* MVTBufferFill -- refill an allocation buffer from an MVT pool + * + * + */ +static Res MVTBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size minSize) +{ + MVT mvt; + Res res; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Pool, pool); + mvt = PoolMVT(pool); + AVERT(MVT, mvt); + AVERT(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(minSize > 0); + AVER(SizeIsAligned(minSize, pool->alignment)); + + /* Allocate oversize blocks exactly, directly from the arena. + */ + if (minSize > mvt->fillSize) { + return MVTOversizeFill(baseReturn, limitReturn, mvt, + minSize); + } + + /* Use any splinter, if available. + */ + if (MVTSplinterFill(baseReturn, limitReturn, mvt, minSize)) + return ResOK; + + /* Attempt to retrieve a free block from the ABQ. */ + if (MVTABQFill(baseReturn, limitReturn, mvt, minSize)) + return ResOK; + + METER_ACC(mvt->underflows, minSize); + + /* If fragmentation is acceptable, attempt to find a free block from + the free lists. */ + if (mvt->available >= mvt->availLimit) { + METER_ACC(mvt->fragLimitContingencies, minSize); + if (MVTContingencyFill(baseReturn, limitReturn, mvt, minSize)) + return ResOK; + } + + /* Attempt to request a block from the arena. + */ + res = MVTSegFill(baseReturn, limitReturn, + mvt, mvt->fillSize, minSize); + if (res == ResOK) + return ResOK; + + /* Things are looking pretty desperate. Try the contingencies again, + disregarding fragmentation limits. */ + if (ResIsAllocFailure(res)) { + METER_ACC(mvt->emergencyContingencies, minSize); + if (MVTContingencyFill(baseReturn, limitReturn, mvt, minSize)) + return ResOK; + } + + METER_ACC(mvt->failures, minSize); + AVER(res != ResOK); + return res; +} + + +/* MVTDeleteOverlapping -- ABQIterate callback used by MVTInsert and + * MVTDelete. It receives a Range in its closure argument, and sets + * *deleteReturn to TRUE for ranges in the ABQ that overlap with it, + * and FALSE for ranges that do not. + */ +static Bool MVTDeleteOverlapping(Bool *deleteReturn, void *element, + void *closure) +{ + Range oldRange, newRange; + + AVER(deleteReturn != NULL); + AVER(element != NULL); + AVER(closure != NULL); + + oldRange = element; + newRange = closure; + + *deleteReturn = RangesOverlap(oldRange, newRange); + return TRUE; +} + + +/* MVTReserve -- add a range to the available range queue, and if the + * queue is full, return segments to the arena. Return TRUE if it + * succeeded in adding the range to the queue, FALSE if the queue + * overflowed. + */ +static Bool MVTReserve(MVT mvt, Range range) +{ + AVERT(MVT, mvt); + AVERT(Range, range); + AVER(RangeSize(range) >= mvt->reuseSize); + + /* */ + if (!ABQPush(MVTABQ(mvt), range)) { + Arena arena = PoolArena(MVTPool(mvt)); + RangeStruct oldRange; + /* We just failed to push, so the ABQ must be full, and so surely + * the peek will succeed. */ + SURELY(ABQPeek(MVTABQ(mvt), &oldRange)); + AVERT(Range, &oldRange); + if (!MVTReturnSegs(mvt, &oldRange, arena)) + goto overflow; + METER_ACC(mvt->returns, RangeSize(&oldRange)); + if (!ABQPush(MVTABQ(mvt), range)) + goto overflow; + } + + return TRUE; + +overflow: + mvt->abqOverflow = TRUE; + METER_ACC(mvt->overflows, RangeSize(range)); + return FALSE; +} + + +/* MVTInsert -- insert an address range into the free lists and update + * the ABQ accordingly. + */ +static Res MVTInsert(MVT mvt, Addr base, Addr limit) +{ + Res res; + RangeStruct range, newRange; + + AVERT(MVT, mvt); + AVER(base < limit); + + RangeInit(&range, base, limit); + res = LandInsert(&newRange, MVTFreeLand(mvt), &range); + if (res != ResOK) + return res; + + if (RangeSize(&newRange) >= mvt->reuseSize) { + /* The new range is big enough that it might have been coalesced + * with ranges on the ABQ, so ensure that the corresponding ranges + * are coalesced on the ABQ. + */ + ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &newRange); + (void)MVTReserve(mvt, &newRange); + } + + return ResOK; +} + + +/* MVTDelete -- delete an address range from the free lists, and + * update the ABQ accordingly. + */ +static Res MVTDelete(MVT mvt, Addr base, Addr limit) +{ + RangeStruct range, rangeOld, rangeLeft, rangeRight; + Res res; + + AVERT(MVT, mvt); + AVER(base < limit); + + RangeInit(&range, base, limit); + res = LandDelete(&rangeOld, MVTFreeLand(mvt), &range); + if (res != ResOK) + return res; + AVER(RangesNest(&rangeOld, &range)); + + /* If the old address range was larger than the reuse size, then it + * might be on the ABQ, so ensure it is removed. + */ + if (RangeSize(&rangeOld) >= mvt->reuseSize) + ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &rangeOld); + + /* There might be fragments at the left or the right of the deleted + * range, and either might be big enough to go back on the ABQ. + */ + RangeInit(&rangeLeft, RangeBase(&rangeOld), base); + if (RangeSize(&rangeLeft) >= mvt->reuseSize) + (void)MVTReserve(mvt, &rangeLeft); + + RangeInit(&rangeRight, limit, RangeLimit(&rangeOld)); + if (RangeSize(&rangeRight) >= mvt->reuseSize) + (void)MVTReserve(mvt, &rangeRight); + + return ResOK; +} + + +/* MVTBufferEmpty -- return an unusable portion of a buffer to the MVT + * pool + * + * + */ +static void MVTBufferEmpty(Pool pool, Buffer buffer) +{ + MVT mvt; + Size size; + Res res; + Addr base, limit; + + AVERT(Pool, pool); + mvt = PoolMVT(pool); + AVERT(MVT, mvt); + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + base = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(base <= limit); + + size = AddrOffset(base, limit); + if (size == 0) + return; + + mvt->available += size; + mvt->allocated -= size; + AVER(mvt->size == mvt->allocated + mvt->available + + mvt->unavailable); + METER_ACC(mvt->poolUtilization, mvt->allocated * 100 / mvt->size); + METER_ACC(mvt->poolUnavailable, mvt->unavailable); + METER_ACC(mvt->poolAvailable, mvt->available); + METER_ACC(mvt->poolAllocated, mvt->allocated); + METER_ACC(mvt->poolSize, mvt->size); + METER_ACC(mvt->bufferEmpties, size); + + /* */ + if (size < mvt->minSize) { + res = MVTInsert(mvt, base, limit); + AVER(res == ResOK); + METER_ACC(mvt->sawdust, size); + return; + } + + METER_ACC(mvt->splinters, size); + /* */ + if (mvt->splinter) { + Size oldSize = AddrOffset(mvt->splinterBase, mvt->splinterLimit); + + /* Old better, drop new */ + if (size < oldSize) { + res = MVTInsert(mvt, base, limit); + AVER(res == ResOK); + METER_ACC(mvt->splintersDropped, size); + return; + } else { + /* New better, drop old */ + res = MVTInsert(mvt, mvt->splinterBase, mvt->splinterLimit); + AVER(res == ResOK); + METER_ACC(mvt->splintersDropped, oldSize); + } + } + + mvt->splinter = TRUE; + mvt->splinterBase = base; + mvt->splinterLimit = limit; +} + + +/* MVTFree -- free a block (previously allocated from a buffer) that + * is no longer in use + * + * see + */ +static void MVTFree(Pool pool, Addr base, Size size) +{ + MVT mvt; + Addr limit; + + AVERT(Pool, pool); + mvt = PoolMVT(pool); + AVERT(MVT, mvt); + AVER(base != (Addr)0); + AVER(size > 0); + + /* We know the buffer observes pool->alignment */ + size = SizeAlignUp(size, pool->alignment); + limit = AddrAdd(base, size); + METER_ACC(mvt->poolFrees, size); + mvt->available += size; + mvt->allocated -= size; + AVER(mvt->size == mvt->allocated + mvt->available + mvt->unavailable); + METER_ACC(mvt->poolUtilization, mvt->allocated * 100 / mvt->size); + METER_ACC(mvt->poolUnavailable, mvt->unavailable); + METER_ACC(mvt->poolAvailable, mvt->available); + METER_ACC(mvt->poolAllocated, mvt->allocated); + METER_ACC(mvt->poolSize, mvt->size); + + /* */ + /* Return exceptional blocks directly to arena */ + if (size > mvt->fillSize) { + Seg seg = NULL; /* suppress "may be used uninitialized" */ + SURELY(SegOfAddr(&seg, PoolArena(pool), base)); + AVER(base == SegBase(seg)); + AVER(limit <= SegLimit(seg)); + mvt->available += SegSize(seg) - size; + mvt->unavailable -= SegSize(seg) - size; + AVER(mvt->size == mvt->allocated + mvt->available + + mvt->unavailable); + METER_ACC(mvt->exceptionReturns, SegSize(seg)); + MVTSegFree(mvt, seg); + return; + } + + MUST(MVTInsert(mvt, base, limit)); +} + + +/* MVTTotalSize -- total memory allocated from the arena */ + +static Size MVTTotalSize(Pool pool) +{ + MVT mvt; + + AVERT(Pool, pool); + mvt = PoolMVT(pool); + AVERT(MVT, mvt); + + return mvt->size; +} + + +/* MVTFreeSize -- free memory (unused by client program) */ + +static Size MVTFreeSize(Pool pool) +{ + MVT mvt; + + AVERT(Pool, pool); + mvt = PoolMVT(pool); + AVERT(MVT, mvt); + + return mvt->available + mvt->unavailable; +} + + +/* MVTDescribe -- describe an MVT pool */ + +static Res MVTDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + MVT mvt = CouldBeA(MVTPool, pool); + Res res; + + if (!TESTC(MVTPool, mvt)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, MVTPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "minSize: $U\n", (WriteFU)mvt->minSize, + "meanSize: $U\n", (WriteFU)mvt->meanSize, + "maxSize: $U\n", (WriteFU)mvt->maxSize, + "fragLimit: $U\n", (WriteFU)mvt->fragLimit, + "reuseSize: $U\n", (WriteFU)mvt->reuseSize, + "fillSize: $U\n", (WriteFU)mvt->fillSize, + "availLimit: $U\n", (WriteFU)mvt->availLimit, + "abqOverflow: $S\n", WriteFYesNo(mvt->abqOverflow), + "splinter: $S\n", WriteFYesNo(mvt->splinter), + "splinterBase: $A\n", (WriteFA)mvt->splinterBase, + "splinterLimit: $A\n", (WriteFU)mvt->splinterLimit, + "size: $U\n", (WriteFU)mvt->size, + "allocated: $U\n", (WriteFU)mvt->allocated, + "available: $U\n", (WriteFU)mvt->available, + "unavailable: $U\n", (WriteFU)mvt->unavailable, + NULL); + if (res != ResOK) + return res; + + res = LandDescribe(MVTFreePrimary(mvt), stream, depth + 2); + if (res != ResOK) + return res; + res = LandDescribe(MVTFreeSecondary(mvt), stream, depth + 2); + if (res != ResOK) + return res; + res = LandDescribe(MVTFreeLand(mvt), stream, depth + 2); + if (res != ResOK) + return res; + res = ABQDescribe(MVTABQ(mvt), (ABQDescribeElement)RangeDescribe, stream, + depth + 2); + if (res != ResOK) + return res; + + METER_WRITE(mvt->segAllocs, stream, depth + 2); + METER_WRITE(mvt->segFrees, stream, depth + 2); + METER_WRITE(mvt->bufferFills, stream, depth + 2); + METER_WRITE(mvt->bufferEmpties, stream, depth + 2); + METER_WRITE(mvt->poolFrees, stream, depth + 2); + METER_WRITE(mvt->poolSize, stream, depth + 2); + METER_WRITE(mvt->poolAllocated, stream, depth + 2); + METER_WRITE(mvt->poolAvailable, stream, depth + 2); + METER_WRITE(mvt->poolUnavailable, stream, depth + 2); + METER_WRITE(mvt->poolUtilization, stream, depth + 2); + METER_WRITE(mvt->finds, stream, depth + 2); + METER_WRITE(mvt->overflows, stream, depth + 2); + METER_WRITE(mvt->underflows, stream, depth + 2); + METER_WRITE(mvt->refills, stream, depth + 2); + METER_WRITE(mvt->refillPushes, stream, depth + 2); + METER_WRITE(mvt->returns, stream, depth + 2); + METER_WRITE(mvt->perfectFits, stream, depth + 2); + METER_WRITE(mvt->firstFits, stream, depth + 2); + METER_WRITE(mvt->secondFits, stream, depth + 2); + METER_WRITE(mvt->failures, stream, depth + 2); + METER_WRITE(mvt->emergencyContingencies, stream, depth + 2); + METER_WRITE(mvt->fragLimitContingencies, stream, depth + 2); + METER_WRITE(mvt->contingencySearches, stream, depth + 2); + METER_WRITE(mvt->contingencyHardSearches, stream, depth + 2); + METER_WRITE(mvt->splinters, stream, depth + 2); + METER_WRITE(mvt->splintersUsed, stream, depth + 2); + METER_WRITE(mvt->splintersDropped, stream, depth + 2); + METER_WRITE(mvt->sawdust, stream, depth + 2); + METER_WRITE(mvt->exceptions, stream, depth + 2); + METER_WRITE(mvt->exceptionSplinters, stream, depth + 2); + METER_WRITE(mvt->exceptionReturns, stream, depth + 2); + + return ResOK; +} + + +/* Pool Interface */ + + +/* PoolClassMVT -- the Pool (sub-)Class for an MVT pool */ + +PoolClass PoolClassMVT(void) +{ + return CLASS(MVTPool); +} + + +/* MPS Interface */ + + +/* mps_class_mvt -- the class of an mvt pool */ + +mps_pool_class_t mps_class_mvt(void) +{ + return (mps_pool_class_t)(PoolClassMVT()); +} + + +/* Internal methods */ + + +/* MVTSegAlloc -- encapsulates SegAlloc with associated accounting and + * metering + */ +static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size) +{ + Res res = SegAlloc(segReturn, CLASS(Seg), LocusPrefDefault(), size, + MVTPool(mvt), argsNone); + + if (res == ResOK) { + Size segSize = SegSize(*segReturn); + + /* see */ + AVER(segSize >= mvt->fillSize); + mvt->size += segSize; + mvt->available += segSize; + mvt->availLimit = mvt->size * mvt->fragLimit / 100; + AVER(mvt->size == mvt->allocated + mvt->available + mvt->unavailable); + METER_ACC(mvt->segAllocs, segSize); + } + return res; +} + + +/* MVTSegFree -- encapsulates SegFree with associated accounting and + * metering + */ +static void MVTSegFree(MVT mvt, Seg seg) +{ + Size size; + + size = SegSize(seg); + AVER(mvt->available >= size); + + mvt->available -= size; + mvt->size -= size; + mvt->availLimit = mvt->size * mvt->fragLimit / 100; + AVER(mvt->size == mvt->allocated + mvt->available + mvt->unavailable); + + SegFree(seg); + METER_ACC(mvt->segFrees, size); +} + + +/* MVTReturnSegs -- return (interior) segments of a range to the arena */ + +static Bool MVTReturnSegs(MVT mvt, Range range, Arena arena) +{ + Addr base, limit; + Bool success = FALSE; + + base = RangeBase(range); + limit = RangeLimit(range); + + while (base < limit) { + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Addr segBase, segLimit; + + SURELY(SegOfAddr(&seg, arena, base)); + segBase = SegBase(seg); + segLimit = SegLimit(seg); + if (base <= segBase && limit >= segLimit) { + MUST(MVTDelete(mvt, segBase, segLimit)); + MVTSegFree(mvt, seg); + success = TRUE; + } + base = segLimit; + } + + return success; +} + + +/* MVTRefillABQIfEmpty -- refill the ABQ from the free lists if it is + * empty. + */ + +static Bool MVTRefillVisitor(Land land, Range range, + void *closure) +{ + MVT mvt; + + AVERT(Land, land); + mvt = closure; + AVERT(MVT, mvt); + + if (RangeSize(range) < mvt->reuseSize) + return TRUE; + + METER_ACC(mvt->refillPushes, ABQDepth(MVTABQ(mvt))); + return MVTReserve(mvt, range); +} + +static void MVTRefillABQIfEmpty(MVT mvt, Size size) +{ + AVERT(MVT, mvt); + AVER(size > 0); + + /* If there have never been any overflows from the ABQ back to the + * free lists, then there cannot be any blocks in the free lists + * that are worth adding to the ABQ. So as an optimization, we don't + * bother to look. + */ + if (mvt->abqOverflow && ABQIsEmpty(MVTABQ(mvt))) { + mvt->abqOverflow = FALSE; + METER_ACC(mvt->refills, size); + /* The iteration stops if the ABQ overflows, so may finish or not. */ + (void)LandIterate(MVTFreeLand(mvt), MVTRefillVisitor, mvt); + } +} + + +/* MVTContingencySearch -- search free lists for a block of a given size */ + +typedef struct MVTContigencyClosureStruct +{ + MVT mvt; + RangeStruct range; + Arena arena; + Size min; + /* meters */ + Count steps; + Count hardSteps; +} MVTContigencyClosureStruct, *MVTContigencyClosure; + +static Bool MVTContingencyVisitor(Land land, Range range, + void *closure) +{ + MVT mvt; + Size size; + Addr base, limit; + MVTContigencyClosure cl; + + AVERT(Land, land); + AVERT(Range, range); + AVER(closure != NULL); + cl = closure; + mvt = cl->mvt; + AVERT(MVT, mvt); + + base = RangeBase(range); + limit = RangeLimit(range); + size = RangeSize(range); + + cl->steps++; + if (size < cl->min) + return TRUE; + + /* verify that min will fit when seg-aligned */ + if (size >= 2 * cl->min) { + RangeInit(&cl->range, base, limit); + return FALSE; + } + + /* do it the hard way */ + cl->hardSteps++; + if (MVTCheckFit(base, limit, cl->min, cl->arena)) { + RangeInit(&cl->range, base, limit); + return FALSE; + } + + /* keep looking */ + return TRUE; +} + +static Bool MVTContingencySearch(Addr *baseReturn, Addr *limitReturn, + MVT mvt, Size min) +{ + MVTContigencyClosureStruct cls; + + cls.mvt = mvt; + cls.arena = PoolArena(MVTPool(mvt)); + cls.min = min; + cls.steps = 0; + cls.hardSteps = 0; + + if (LandIterate(MVTFreeLand(mvt), MVTContingencyVisitor, &cls)) + return FALSE; + + AVER(RangeSize(&cls.range) >= min); + METER_ACC(mvt->contingencySearches, cls.steps); + if (cls.hardSteps) { + METER_ACC(mvt->contingencyHardSearches, cls.hardSteps); + } + *baseReturn = RangeBase(&cls.range); + *limitReturn = RangeLimit(&cls.range); + return TRUE; +} + + +/* MVTCheckFit -- verify that segment-aligned block of size min can + * fit in a candidate address range. + */ + +static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena) +{ + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Addr segLimit; + + SURELY(SegOfAddr(&seg, arena, base)); + segLimit = SegLimit(seg); + + if (limit <= segLimit) { + if (AddrOffset(base, limit) >= min) + return TRUE; + } + + if (AddrOffset(base, segLimit) >= min) + return TRUE; + + base = segLimit; + SURELY(SegOfAddr(&seg, arena, base)); + segLimit = SegLimit(seg); + + if (AddrOffset(base, limit < segLimit ? limit : segLimit) >= min) + return TRUE; + + return FALSE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmv2.h b/mps/code/poolmv2.h new file mode 100644 index 00000000000..e62bb350722 --- /dev/null +++ b/mps/code/poolmv2.h @@ -0,0 +1,48 @@ +/* poolmv2.h: MANUAL VARIABLE-SIZED TEMPORAL POOL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: The implementation of the new manual-variable pool class + * + * .design: + */ + +#ifndef poolmv2_h +#define poolmv2_h + +#include "mpm.h" + +extern PoolClass PoolClassMVT(void); + +#endif /* poolmv2_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmvff.c b/mps/code/poolmvff.c new file mode 100644 index 00000000000..7cbd4de96c9 --- /dev/null +++ b/mps/code/poolmvff.c @@ -0,0 +1,787 @@ +/* poolmvff.c: First Fit Manual Variable Pool + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * **** RESTRICTION: This pool may not allocate from the arena control + * pool, since it is used to implement that pool. + * + * .purpose: This is a pool class for manually managed objects of + * variable size where address-ordered first (or last) fit is an + * appropriate policy. + * + * .design: + * + * .critical: In manual-allocation-bound programs using MVFF, many of + * these functions are on the critical paths via mps_alloc (and then + * PoolAlloc, MVFFAlloc) and mps_free (and then PoolFree, MVFFFree). + */ + +#include "cbs.h" +#include "dbgpool.h" +#include "failover.h" +#include "freelist.h" +#include "mpm.h" +#include "mpscmvff.h" +#include "poolmvff.h" +#include "mpscmfs.h" +#include "poolmfs.h" + +SRCID(poolmvff, "$Id$"); + + +/* Note: MVFFStruct is declared in mpmst.h rather than here because it + is the control pool and is inlined in the arena globals. */ + +typedef MVFF MVFFPool; +#define MVFFPoolCheck MVFFCheck +DECLARE_CLASS(Pool, MVFFPool, AbstractBufferPool); +DECLARE_CLASS(Pool, MVFFDebugPool, MVFFPool); + + +#define PoolMVFF(pool) PARENT(MVFFStruct, poolStruct, pool) +#define MVFFTotalLand(mvff) (&(mvff)->totalCBSStruct.landStruct) +#define MVFFFreePrimary(mvff) (&(mvff)->freeCBSStruct.landStruct) +#define MVFFFreeSecondary(mvff) FreelistLand(&(mvff)->flStruct) +#define MVFFFreeLand(mvff) FailoverLand(&(mvff)->foStruct) +#define MVFFLocusPref(mvff) (&(mvff)->locusPrefStruct) +#define MVFFBlockPool(mvff) MFSPool(&(mvff)->cbsBlockPoolStruct) + + +/* MVFFDebug -- MVFFDebug class */ + +typedef struct MVFFDebugStruct { + MVFFStruct mvffStruct; /* MVFF structure */ + PoolDebugMixinStruct debug; /* debug mixin */ +} MVFFDebugStruct; + +typedef MVFFDebugStruct *MVFFDebug; + + +#define MVFF2MVFFDebug(mvff) PARENT(MVFFDebugStruct, mvffStruct, mvff) +#define MVFFDebug2MVFF(mvffd) (&((mvffd)->mvffStruct)) + + +/* MVFFReduce -- return memory to the arena + * + * This is usually called immediately after inserting a range into the + * MVFFFreeLand. (But not in all cases: see MVFFExtend.) + */ +static void MVFFReduce(MVFF mvff) +{ + Arena arena; + Size freeSize, freeLimit, targetFree; + RangeStruct freeRange, oldFreeRange; + Align grainSize; + Land totalLand, freeLand; + + AVERT_CRITICAL(MVFF, mvff); + arena = PoolArena(MVFFPool(mvff)); + + /* Try to return memory when the amount of free memory exceeds a + threshold fraction of the total memory. */ + + totalLand = MVFFTotalLand(mvff); + freeLimit = (Size)((double)LandSize(totalLand) * mvff->spare); + freeLand = MVFFFreeLand(mvff); + freeSize = LandSize(freeLand); + if (freeSize < freeLimit) + return; + + /* NOTE: Memory is returned to the arena in the smallest units + possible (arena grains). There's a possibility that this could + lead to fragmentation in the arena (because allocation is in + multiples of mvff->extendBy). If so, try setting grainSize = + mvff->extendBy here. */ + + grainSize = ArenaGrainSize(arena); + + /* For hysteresis, return only a proportion of the free memory. */ + + targetFree = freeLimit / 2; + + /* Each time around this loop we either break, or we free at least + one grain back to the arena, thus ensuring that eventually the + loop will terminate */ + + /* NOTE: If this code becomes very hot, then the test of whether there's + a large free block in the CBS could be inlined, since it's a property + stored at the root node. */ + + while (freeSize > targetFree + && LandFindLargest(&freeRange, &oldFreeRange, freeLand, + grainSize, FindDeleteNONE)) + { + RangeStruct grainRange, oldRange; + Size size; + Res res; + Addr base, limit; + + AVER(RangesEqual(&freeRange, &oldFreeRange)); + + base = AddrAlignUp(RangeBase(&freeRange), grainSize); + limit = AddrAlignDown(RangeLimit(&freeRange), grainSize); + + /* Give up if this block doesn't contain a whole aligned grain, + even though smaller better-aligned blocks might, because + LandFindLargest won't be able to find those anyway. */ + if (base >= limit) + break; + + size = AddrOffset(base, limit); + + /* Don't return (much) more than we need to. */ + if (size > freeSize - targetFree) + size = SizeAlignUp(freeSize - targetFree, grainSize); + + /* Calculate the range of grains we can return to the arena near the + top end of the free memory (because we're first fit). */ + RangeInit(&grainRange, AddrSub(limit, size), limit); + AVER(!RangeIsEmpty(&grainRange)); + AVER(RangesNest(&freeRange, &grainRange)); + AVER(RangeIsAligned(&grainRange, grainSize)); + + /* Delete the range from the free list before attempting to delete + it from the total allocated memory, so that we don't have + dangling blocks in the free list, even for a moment. If we fail + to delete from the TotalCBS we add back to the free list, which + can't fail. */ + + res = LandDelete(&oldRange, freeLand, &grainRange); + if (res != ResOK) + break; + freeSize -= RangeSize(&grainRange); + AVER(freeSize == LandSize(freeLand)); + + res = LandDelete(&oldRange, totalLand, &grainRange); + if (res != ResOK) { + RangeStruct coalescedRange; + res = LandInsert(&coalescedRange, freeLand, &grainRange); + AVER(res == ResOK); + break; + } + + ArenaFree(RangeBase(&grainRange), RangeSize(&grainRange), MVFFPool(mvff)); + } +} + + +/* MVFFExtend -- allocate a new range from the arena + * + * Allocate a new range from the arena of at least the specified + * size. The specified size should be pool-aligned. Add it to the + * allocated and free lists. + */ +static Res MVFFExtend(Range rangeReturn, MVFF mvff, Size size) +{ + Pool pool; + Arena arena; + Size allocSize; + RangeStruct range, coalescedRange; + Addr base; + Res res; + Land totalLand, freeLand; + + AVERT(MVFF, mvff); + AVER(size > 0); + + pool = MVFFPool(mvff); + arena = PoolArena(pool); + + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + /* Use extendBy unless it's too small */ + /* . */ + if (size <= mvff->extendBy) + allocSize = mvff->extendBy; + else + allocSize = size; + + allocSize = SizeArenaGrains(allocSize, arena); + + res = ArenaAlloc(&base, MVFFLocusPref(mvff), allocSize, pool); + if (res != ResOK) { + /* try again with a range just large enough for object */ + /* see */ + allocSize = SizeArenaGrains(size, arena); + res = ArenaAlloc(&base, MVFFLocusPref(mvff), allocSize, pool); + if (res != ResOK) + return res; + } + + RangeInitSize(&range, base, allocSize); + totalLand = MVFFTotalLand(mvff); + res = LandInsert(&coalescedRange, totalLand, &range); + if (res != ResOK) { + /* Can't record this memory, so return it to the arena and fail. */ + ArenaFree(base, allocSize, pool); + return res; + } + + DebugPoolFreeSplat(pool, RangeBase(&range), RangeLimit(&range)); + freeLand = MVFFFreeLand(mvff); + res = LandInsert(rangeReturn, freeLand, &range); + /* Insertion must succeed because it fails over to a Freelist. */ + AVER(res == ResOK); + + /* Don't call MVFFReduce; that would be silly. */ + + return ResOK; +} + + +/* mvffFindFree -- find a suitable free block or add one + * + * Finds a free block of the given (pool aligned) size, using the + * policy (first fit, last fit, or worst fit) specified by findMethod + * and findDelete. + * + * If there is no suitable free block, try extending the pool. + */ +static Res mvffFindFree(Range rangeReturn, MVFF mvff, Size size, + LandFindMethod findMethod, FindDelete findDelete) +{ + Bool found; + RangeStruct oldRange; + Land land; + + AVER_CRITICAL(rangeReturn != NULL); + AVERT_CRITICAL(MVFF, mvff); + AVER_CRITICAL(size > 0); + AVER_CRITICAL(SizeIsAligned(size, PoolAlignment(MVFFPool(mvff)))); + AVER_CRITICAL(FUNCHECK(findMethod)); + AVERT_CRITICAL(FindDelete, findDelete); + + land = MVFFFreeLand(mvff); + found = (*findMethod)(rangeReturn, &oldRange, land, size, findDelete); + if (!found) { + RangeStruct newRange; + Res res; + res = MVFFExtend(&newRange, mvff, size); + if (res != ResOK) + return res; + found = (*findMethod)(rangeReturn, &oldRange, land, size, findDelete); + + /* We know that the found range must intersect the newly added + * range. But it doesn't necessarily lie entirely within it. */ + AVER_CRITICAL(found); + AVER_CRITICAL(RangesOverlap(rangeReturn, &newRange)); + } + AVER_CRITICAL(found); + + return ResOK; +} + + +/* MVFFAlloc -- Allocate a block */ + +static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size) +{ + Res res; + MVFF mvff; + RangeStruct range; + LandFindMethod findMethod; + FindDelete findDelete; + + AVER_CRITICAL(aReturn != NULL); + AVERT_CRITICAL(Pool, pool); + mvff = PoolMVFF(pool); + AVERT_CRITICAL(MVFF, mvff); + AVER_CRITICAL(size > 0); + + size = SizeAlignUp(size, PoolAlignment(pool)); + findMethod = mvff->firstFit ? LandFindFirst : LandFindLast; + findDelete = mvff->slotHigh ? FindDeleteHIGH : FindDeleteLOW; + + res = mvffFindFree(&range, mvff, size, findMethod, findDelete); + if (res != ResOK) + return res; + + AVER_CRITICAL(RangeSize(&range) == size); + *aReturn = RangeBase(&range); + return ResOK; +} + + +/* MVFFFree -- free the given block */ + +static void MVFFFree(Pool pool, Addr old, Size size) +{ + Res res; + RangeStruct range, coalescedRange; + MVFF mvff; + Land freeLand; + + AVERT_CRITICAL(Pool, pool); + mvff = PoolMVFF(pool); + AVERT_CRITICAL(MVFF, mvff); + + AVER_CRITICAL(old != (Addr)0); + AVER_CRITICAL(AddrIsAligned(old, PoolAlignment(pool))); + AVER_CRITICAL(size > 0); + + RangeInitSize(&range, old, SizeAlignUp(size, PoolAlignment(pool))); + freeLand = MVFFFreeLand(mvff); + res = LandInsert(&coalescedRange, freeLand, &range); + /* Insertion must succeed because it fails over to a Freelist. */ + AVER_CRITICAL(res == ResOK); + MVFFReduce(mvff); +} + + +/* MVFFBufferFill -- Fill the buffer + * + * Fill it with the largest block we can find. This is worst-fit + * allocation policy; see . + */ +static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + Res res; + MVFF mvff; + RangeStruct range; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Pool, pool); + mvff = PoolMVFF(pool); + AVERT(MVFF, mvff); + AVERT(Buffer, buffer); + AVER(size > 0); + AVER(SizeIsAligned(size, PoolAlignment(pool))); + + res = mvffFindFree(&range, mvff, size, LandFindLargest, FindDeleteENTIRE); + if (res != ResOK) + return res; + AVER(RangeSize(&range) >= size); + + *baseReturn = RangeBase(&range); + *limitReturn = RangeLimit(&range); + return ResOK; +} + + +/* MVFFVarargs -- decode obsolete varargs */ + +static void MVFFVarargs(ArgStruct args[MPS_ARGS_MAX - 1], va_list varargs) +{ + args[0].key = MPS_KEY_EXTEND_BY; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_MEAN_SIZE; + args[1].val.size = va_arg(varargs, Size); + args[2].key = MPS_KEY_ALIGN; + args[2].val.align = va_arg(varargs, Size); /* promoted type */ + args[3].key = MPS_KEY_MVFF_SLOT_HIGH; + args[3].val.b = va_arg(varargs, Bool); + args[4].key = MPS_KEY_MVFF_ARENA_HIGH; + args[4].val.b = va_arg(varargs, Bool); + args[5].key = MPS_KEY_MVFF_FIRST_FIT; + args[5].val.b = va_arg(varargs, Bool); + args[6].key = MPS_KEY_ARGS_END; + AVER(MPS_ARGS_MAX - 1 > 6); + AVERT(ArgList, args); +} + +static void MVFFDebugVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_POOL_DEBUG_OPTIONS; + args[0].val.pool_debug_options = va_arg(varargs, mps_pool_debug_option_s *); + MVFFVarargs(args + 1, varargs); +} + + +/* MVFFInit -- initialize method for MVFF */ + +ARG_DEFINE_KEY(MVFF_SLOT_HIGH, Bool); +ARG_DEFINE_KEY(MVFF_ARENA_HIGH, Bool); +ARG_DEFINE_KEY(MVFF_FIRST_FIT, Bool); + +static Res MVFFInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + Size extendBy = MVFF_EXTEND_BY_DEFAULT; + Size avgSize = MVFF_AVG_SIZE_DEFAULT; + Align align = MVFF_ALIGN_DEFAULT; + Bool slotHigh = MVFF_SLOT_HIGH_DEFAULT; + Bool arenaHigh = MVFF_ARENA_HIGH_DEFAULT; + Bool firstFit = MVFF_FIRST_FIT_DEFAULT; + double spare = MVFF_SPARE_DEFAULT; + MVFF mvff; + Res res; + ArgStruct arg; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + AVERC(PoolClass, klass); + + /* .arg: class-specific additional arguments; see */ + /* */ + /* .arg.check: we do the same checks here and in MVFFCheck */ + /* except for arenaHigh, which is stored only in the locusPref. */ + + if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY)) + extendBy = arg.val.size; + + if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE)) + avgSize = arg.val.size; + + if (ArgPick(&arg, args, MPS_KEY_ALIGN)) + align = arg.val.align; + + if (ArgPick(&arg, args, MPS_KEY_SPARE)) + spare = arg.val.d; + + if (ArgPick(&arg, args, MPS_KEY_MVFF_SLOT_HIGH)) + slotHigh = arg.val.b; + + if (ArgPick(&arg, args, MPS_KEY_MVFF_ARENA_HIGH)) + arenaHigh = arg.val.b; + + if (ArgPick(&arg, args, MPS_KEY_MVFF_FIRST_FIT)) + firstFit = arg.val.b; + + AVER(extendBy > 0); /* .arg.check */ + AVER(avgSize > 0); /* .arg.check */ + AVER(avgSize <= extendBy); /* .arg.check */ + AVER(spare >= 0.0); /* .arg.check */ + AVER(spare <= 1.0); /* .arg.check */ + AVERT(Align, align); + /* This restriction on the alignment is necessary because of the use + of a Freelist to store the free address ranges in low-memory + situations. . */ + AVER(AlignIsAligned(align, FreelistMinimumAlignment)); + AVER(align <= ArenaGrainSize(arena)); + AVERT(Bool, slotHigh); + AVERT(Bool, arenaHigh); + AVERT(Bool, firstFit); + + res = NextMethod(Pool, MVFFPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + mvff = CouldBeA(MVFFPool, pool); + + mvff->extendBy = extendBy; + if (extendBy < ArenaGrainSize(arena)) + mvff->extendBy = ArenaGrainSize(arena); + mvff->avgSize = avgSize; + pool->alignment = align; + pool->alignShift = SizeLog2(pool->alignment); + mvff->slotHigh = slotHigh; + mvff->firstFit = firstFit; + mvff->spare = spare; + + LocusPrefInit(MVFFLocusPref(mvff)); + LocusPrefExpress(MVFFLocusPref(mvff), + arenaHigh ? LocusPrefHIGH : LocusPrefLOW, NULL); + + /* An MFS pool is explicitly initialised for the two CBSs partly to + * share space, but mostly to avoid a call to PoolCreate, so that + * MVFF can be used during arena bootstrap as the control pool. */ + + MPS_ARGS_BEGIN(piArgs) { + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSFastBlockStruct)); + res = PoolInit(MVFFBlockPool(mvff), arena, PoolClassMFS(), piArgs); + } MPS_ARGS_END(piArgs); + if (res != ResOK) + goto failBlockPoolInit; + + MPS_ARGS_BEGIN(liArgs) { + MPS_ARGS_ADD(liArgs, CBSBlockPool, MVFFBlockPool(mvff)); + res = LandInit(MVFFTotalLand(mvff), CLASS(CBSFast), arena, align, + mvff, liArgs); + } MPS_ARGS_END(liArgs); + if (res != ResOK) + goto failTotalLandInit; + + MPS_ARGS_BEGIN(liArgs) { + MPS_ARGS_ADD(liArgs, CBSBlockPool, MVFFBlockPool(mvff)); + res = LandInit(MVFFFreePrimary(mvff), CLASS(CBSFast), arena, align, + mvff, liArgs); + } MPS_ARGS_END(liArgs); + if (res != ResOK) + goto failFreePrimaryInit; + + res = LandInit(MVFFFreeSecondary(mvff), CLASS(Freelist), arena, align, + mvff, mps_args_none); + if (res != ResOK) + goto failFreeSecondaryInit; + + MPS_ARGS_BEGIN(foArgs) { + MPS_ARGS_ADD(foArgs, FailoverPrimary, MVFFFreePrimary(mvff)); + MPS_ARGS_ADD(foArgs, FailoverSecondary, MVFFFreeSecondary(mvff)); + res = LandInit(MVFFFreeLand(mvff), CLASS(Failover), arena, align, + mvff, foArgs); + } MPS_ARGS_END(foArgs); + if (res != ResOK) + goto failFreeLandInit; + + SetClassOfPoly(pool, CLASS(MVFFPool)); + mvff->sig = MVFFSig; + AVERC(MVFFPool, mvff); + + EVENT7(PoolInitMVFF, pool, extendBy, avgSize, align, + BOOLOF(slotHigh), BOOLOF(arenaHigh), BOOLOF(firstFit)); + + return ResOK; + +failFreeLandInit: + LandFinish(MVFFFreeSecondary(mvff)); +failFreeSecondaryInit: + LandFinish(MVFFFreePrimary(mvff)); +failFreePrimaryInit: + LandFinish(MVFFTotalLand(mvff)); +failTotalLandInit: + PoolFinish(MVFFBlockPool(mvff)); +failBlockPoolInit: + NextMethod(Inst, MVFFPool, finish)(MustBeA(Inst, pool)); +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* MVFFFinish -- finish method for MVFF */ + +static Bool mvffFinishVisitor(Bool *deleteReturn, Land land, Range range, + void *closure) +{ + Pool pool; + + AVER(deleteReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(closure != NULL); + pool = closure; + AVERT(Pool, pool); + + ArenaFree(RangeBase(range), RangeSize(range), pool); + *deleteReturn = TRUE; + return TRUE; +} + +static void MVFFFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + MVFF mvff = MustBeA(MVFFPool, pool); + Bool b; + Land totalLand; + + AVERT(MVFF, mvff); + mvff->sig = SigInvalid; + + totalLand = MVFFTotalLand(mvff); + b = LandIterateAndDelete(totalLand, mvffFinishVisitor, pool); + AVER(b); + AVER(LandSize(totalLand) == 0); + + LandFinish(MVFFFreeLand(mvff)); + LandFinish(MVFFFreeSecondary(mvff)); + LandFinish(MVFFFreePrimary(mvff)); + LandFinish(totalLand); + PoolFinish(MVFFBlockPool(mvff)); + NextMethod(Inst, MVFFPool, finish)(inst); +} + + +/* MVFFDebugMixin - find debug mixin in class MVFFDebug */ + +static PoolDebugMixin MVFFDebugMixin(Pool pool) +{ + MVFF mvff; + + AVERT(Pool, pool); + mvff = PoolMVFF(pool); + AVERT(MVFF, mvff); + /* Can't check MVFFDebug, because this is called during init */ + return &(MVFF2MVFFDebug(mvff)->debug); +} + + +/* MVFFTotalSize -- total memory allocated from the arena */ + +static Size MVFFTotalSize(Pool pool) +{ + MVFF mvff; + Land totalLand; + + AVERT(Pool, pool); + mvff = PoolMVFF(pool); + AVERT(MVFF, mvff); + + totalLand = MVFFTotalLand(mvff); + return LandSize(totalLand); +} + + +/* MVFFFreeSize -- free memory (unused by client program) */ + +static Size MVFFFreeSize(Pool pool) +{ + MVFF mvff; + Land freeLand; + + AVERT(Pool, pool); + mvff = PoolMVFF(pool); + AVERT(MVFF, mvff); + + freeLand = MVFFFreeLand(mvff); + return LandSize(freeLand); +} + + +/* MVFFDescribe -- describe an MVFF pool */ + +static Res MVFFDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + MVFF mvff = CouldBeA(MVFFPool, pool); + Res res; + + if (!TESTC(MVFFPool, mvff)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, MVFFPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "extendBy $W\n", (WriteFW)mvff->extendBy, + "avgSize $W\n", (WriteFW)mvff->avgSize, + "firstFit $U\n", (WriteFU)mvff->firstFit, + "slotHigh $U\n", (WriteFU)mvff->slotHigh, + "spare $D\n", (WriteFD)mvff->spare, + NULL); + if (res != ResOK) + return res; + + res = LocusPrefDescribe(MVFFLocusPref(mvff), stream, depth + 2); + if (res != ResOK) + return res; + + /* Don't describe MVFFBlockPool(mvff) otherwise it'll appear twice + * in the output of GlobalDescribe. */ + + res = LandDescribe(MVFFTotalLand(mvff), stream, depth + 2); + if (res != ResOK) + return res; + + res = LandDescribe(MVFFFreePrimary(mvff), stream, depth + 2); + if (res != ResOK) + return res; + + res = LandDescribe(MVFFFreeSecondary(mvff), stream, depth + 2); + if (res != ResOK) + return res; + + return ResOK; +} + + +DEFINE_CLASS(Pool, MVFFPool, klass) +{ + INHERIT_CLASS(klass, MVFFPool, AbstractBufferPool); + klass->instClassStruct.describe = MVFFDescribe; + klass->instClassStruct.finish = MVFFFinish; + klass->size = sizeof(MVFFStruct); + klass->varargs = MVFFVarargs; + klass->init = MVFFInit; + klass->alloc = MVFFAlloc; + klass->free = MVFFFree; + klass->bufferFill = MVFFBufferFill; + klass->totalSize = MVFFTotalSize; + klass->freeSize = MVFFFreeSize; + AVERT(PoolClass, klass); +} + + +PoolClass PoolClassMVFF(void) +{ + return CLASS(MVFFPool); +} + + +/* Pool class MVFFDebug */ + +DEFINE_CLASS(Pool, MVFFDebugPool, klass) +{ + INHERIT_CLASS(klass, MVFFDebugPool, MVFFPool); + PoolClassMixInDebug(klass); + klass->size = sizeof(MVFFDebugStruct); + klass->varargs = MVFFDebugVarargs; + klass->debugMixin = MVFFDebugMixin; + AVERT(PoolClass, klass); +} + + + +/* MPS Interface Extensions. */ + +mps_pool_class_t mps_class_mvff(void) +{ + return (mps_pool_class_t)(CLASS(MVFFPool)); +} + +mps_pool_class_t mps_class_mvff_debug(void) +{ + return (mps_pool_class_t)(CLASS(MVFFDebugPool)); +} + + +/* MVFFCheck -- check the consistency of an MVFF structure */ + +Bool MVFFCheck(MVFF mvff) +{ + CHECKS(MVFF, mvff); + CHECKC(MVFFPool, mvff); + CHECKD(Pool, MVFFPool(mvff)); + CHECKD(LocusPref, MVFFLocusPref(mvff)); + CHECKL(mvff->extendBy >= ArenaGrainSize(PoolArena(MVFFPool(mvff)))); + CHECKL(mvff->avgSize > 0); /* see .arg.check */ + CHECKL(mvff->avgSize <= mvff->extendBy); /* see .arg.check */ + CHECKL(mvff->spare >= 0.0); /* see .arg.check */ + CHECKL(mvff->spare <= 1.0); /* see .arg.check */ + CHECKD(MFS, &mvff->cbsBlockPoolStruct); + CHECKD(CBS, &mvff->totalCBSStruct); + CHECKD(CBS, &mvff->freeCBSStruct); + CHECKD(Freelist, &mvff->flStruct); + CHECKD(Failover, &mvff->foStruct); + CHECKL((LandSize)(MVFFTotalLand(mvff)) >= (LandSize)(MVFFFreeLand(mvff))); + CHECKL(SizeIsAligned((LandSize)(MVFFFreeLand(mvff)), PoolAlignment(MVFFPool(mvff)))); + CHECKL(SizeIsArenaGrains((LandSize)(MVFFTotalLand(mvff)), PoolArena(MVFFPool(mvff)))); + CHECKL(BoolCheck(mvff->slotHigh)); + CHECKL(BoolCheck(mvff->firstFit)); + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolmvff.h b/mps/code/poolmvff.h new file mode 100644 index 00000000000..bbc63c2b4c1 --- /dev/null +++ b/mps/code/poolmvff.h @@ -0,0 +1,60 @@ +/* poolmvff.h: First Fit Manual Variable Pool + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .purpose: This is a pool class for manually managed objects of + * variable size where address-ordered first (or last) fit is an + * appropriate policy. + * + * .design: + */ + +#ifndef poolmvff_h +#define poolmvff_h + + +#include "mpmtypes.h" +#include "mpscmvff.h" + +typedef struct MVFFStruct *MVFF; + +extern PoolClass PoolClassMVFF(void); + +extern Bool MVFFCheck(MVFF mvff); + +#define MVFFPool(mvff) (&(mvff)->poolStruct) + + +#endif /* poolmvff_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/pooln.c b/mps/code/pooln.c new file mode 100644 index 00000000000..a4382f65f10 --- /dev/null +++ b/mps/code/pooln.c @@ -0,0 +1,222 @@ +/* pooln.c: NULL POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "pooln.h" +#include "mpm.h" + +SRCID(pooln, "$Id$"); + + +/* PoolNStruct -- the pool structure */ + +typedef struct PoolNStruct { + PoolStruct poolStruct; /* generic pool structure */ + /* and that's it */ +} PoolNStruct; + + +typedef PoolN NPool; +DECLARE_CLASS(Pool, NPool, AbstractPool); + + +/* PoolPoolN -- get the PoolN structure from generic Pool */ + +#define PoolPoolN(pool) PARENT(PoolNStruct, poolStruct, pool) + + +/* PoolPoolN -- get the generic pool structure from a PoolN */ + +#define PoolNPool(pooln) (&(poolN)->poolStruct) + + +/* NInit -- init method for class N */ + +static Res NInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + PoolN poolN; + Res res; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + res = NextMethod(Pool, NPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + poolN = CouldBeA(NPool, pool); + + /* Initialize pool-specific structures. */ + + SetClassOfPoly(pool, CLASS(NPool)); + AVERC(PoolN, poolN); + + return ResOK; + +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* NFinish -- finish method for class N */ + +static void NFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + PoolN poolN = MustBeA(NPool, pool); + + /* Finish pool-specific structures. */ + UNUSED(poolN); + + NextMethod(Inst, NPool, finish)(inst); +} + + +/* NAlloc -- alloc method for class N */ + +static Res NAlloc(Addr *pReturn, Pool pool, Size size) +{ + PoolN poolN = MustBeA(NPool, pool); + + AVER(pReturn != NULL); + AVER(size > 0); + UNUSED(poolN); + + return ResLIMIT; /* limit of nil blocks exceeded */ +} + + +/* NFree -- free method for class N */ + +static void NFree(Pool pool, Addr old, Size size) +{ + PoolN poolN = MustBeA(NPool, pool); + + AVER(old != (Addr)0); + AVER(size > 0); + UNUSED(poolN); + + NOTREACHED; /* can't allocate, should never free */ +} + + +/* NBufferFill -- buffer fill method for class N */ + +static Res NBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + PoolN poolN = MustBeA(NPool, pool); + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Buffer, buffer); + AVER(BufferIsReset(buffer)); + AVER(size > 0); + UNUSED(poolN); + + NOTREACHED; /* can't create buffers, so shouldn't fill them */ + return ResUNIMPL; +} + + +/* NBufferEmpty -- buffer empty method for class N */ + +static void NBufferEmpty(Pool pool, Buffer buffer) +{ + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(BufferIsReady(buffer)); + NOTREACHED; /* can't create buffers, so they shouldn't trip */ +} + + +/* NDescribe -- describe method for class N */ + +static Res NDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Pool pool = CouldBeA(AbstractPool, inst); + PoolN poolN = CouldBeA(NPool, pool); + Res res; + + res = NextMethod(Inst, NPool, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + /* This is where you'd output some information about pool fields. */ + UNUSED(poolN); + + return ResOK; +} + + +/* NPoolClass -- pool class definition for N */ + +DEFINE_CLASS(Pool, NPool, klass) +{ + INHERIT_CLASS(klass, NPool, AbstractPool); + klass->instClassStruct.describe = NDescribe; + klass->instClassStruct.finish = NFinish; + klass->size = sizeof(PoolNStruct); + klass->attr |= AttrGC; + klass->init = NInit; + klass->alloc = NAlloc; + klass->free = NFree; + klass->bufferFill = NBufferFill; + klass->bufferEmpty = NBufferEmpty; + AVERT(PoolClass, klass); +} + + +/* PoolClassN -- returns the PoolClass for the null pool class */ + +PoolClass PoolClassN(void) +{ + return CLASS(NPool); +} + + +/* PoolNCheck -- check a pool of class N */ + +Bool PoolNCheck(PoolN poolN) +{ + CHECKL(poolN != NULL); + CHECKD(Pool, PoolNPool(poolN)); + CHECKC(NPool, poolN); + UNUSED(poolN); /* */ + + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/pooln.h b/mps/code/pooln.h new file mode 100644 index 00000000000..bd1df144c2d --- /dev/null +++ b/mps/code/pooln.h @@ -0,0 +1,70 @@ +/* pooln.h: NULL POOL + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: The null pool class is here for pedagogical purposes. It + * is a skeleton of a pool class. The class exhibits all the generic + * pool functions; none of them have non-trivial implementations. + * + * .create: The generic create method for this class takes no extra + * parameters. + */ + + +#ifndef pooln_h +#define pooln_h + +#include "mpmtypes.h" + + +/* PoolN -- instance type */ + +typedef struct PoolNStruct *PoolN; + + +/* PoolClassN -- returns the PoolClass for the null pool class */ + +extern PoolClass PoolClassN(void); + + +/* PoolNCheck -- check a pool of class N + * + * Validates a PoolN object. This function conforms to the validation + * protocol defined in . + */ + +extern Bool PoolNCheck(PoolN poolN); + + +#endif /* pooln_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolncv.c b/mps/code/poolncv.c new file mode 100644 index 00000000000..d0eb43b468b --- /dev/null +++ b/mps/code/poolncv.c @@ -0,0 +1,76 @@ +/* poolncv.c: NULL POOL COVERAGE TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "mpsavm.h" +#include "mpslib.h" +#include "pooln.h" +#include "testlib.h" + +#include /* printf */ + + +static void testit(ArenaClass klass, ArgList args) +{ + Arena arena; + Pool pool; + Res res; + Addr p; + + die(ArenaCreate(&arena, klass, args), "ArenaCreate"); + + die(PoolCreate(&pool, arena, PoolClassN(), argsNone), "PoolNCreate"); + res = PoolAlloc(&p, pool, 1); + if (res == ResOK) { + error("Error: Unexpectedly succeeded in" + "allocating block from PoolN\n"); + } + PoolDescribe(pool, mps_lib_get_stdout(), 0); + PoolDestroy(pool); + ArenaDestroy(arena); +} + + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 600000); + testit((ArenaClass)mps_arena_class_vm(), args); + } MPS_ARGS_END(args); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/poolsnc.c b/mps/code/poolsnc.c new file mode 100644 index 00000000000..6b9d87f21e8 --- /dev/null +++ b/mps/code/poolsnc.c @@ -0,0 +1,752 @@ +/* poolsnc.c: STACK NO CHECKING POOL CLASS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * DESIGN + * + * .design: + * + * LIGHTWEIGHT FRAMES + * + * .lw-frame-state: The pool uses lightweight frames as its only + * type of allocation frame. The lightweight frame state is set to + * Valid whenever a buffer has a segment and Disabled otherwise. + * . + * + * .lw-frame-null: The frame marker NULL is used as a special value + * to indicate bottom of stack. + */ + +#include "mpscsnc.h" +#include "mpm.h" + +SRCID(poolsnc, "$Id$"); + + +/* SNCStruct -- structure for an SNC pool + * + * . + */ + +#define SNCSig ((Sig)0x519b754c) /* SIGPooLSNC */ + +typedef struct SNCStruct { + PoolStruct poolStruct; + Seg freeSegs; + Sig sig; /* design.mps.sig.field.end.outer */ +} SNCStruct, *SNC; + +#define PoolSNC(pool) PARENT(SNCStruct, poolStruct, (pool)) +#define SNCPool(snc) (&(snc)->poolStruct) + + +/* Forward declarations */ + +typedef SNC SNCPool; +#define SNCPoolCheck SNCCheck +DECLARE_CLASS(Pool, SNCPool, AbstractSegBufPool); + +DECLARE_CLASS(Seg, SNCSeg, MutatorSeg); +DECLARE_CLASS(Buffer, SNCBuf, RankBuf); +static Bool SNCCheck(SNC snc); +static void sncPopPartialSegChain(SNC snc, Buffer buf, Seg upTo); +static void sncSegBufferEmpty(Seg seg, Buffer buffer); +static Res sncSegScan(Bool *totalReturn, Seg seg, ScanState ss); +static void sncSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s); + + +/* Management of segment chains + * + * Each buffer has an associated segment chain in stack order + * (top of stack first). We subclass the buffer to maintain the + * head of the chain. Segments are chained using the SegP field. + */ + + + +/* SNCBufStruct -- SNC Buffer subclass + * + * This subclass of RankBuf holds a segment chain. + */ + +#define SNCBufSig ((Sig)0x51954CBF) /* SIGnature SNC BuFfer */ + +typedef struct SNCBufStruct *SNCBuf; + +typedef struct SNCBufStruct { + SegBufStruct segBufStruct; /* superclass fields must come first */ + Seg topseg; /* The segment chain head -- may be NULL */ + Sig sig; /* design.mps.sig.field.end.outer */ +} SNCBufStruct; + + +/* SNCBufCheck -- check consistency of an SNCBuf */ + +ATTRIBUTE_UNUSED +static Bool SNCBufCheck(SNCBuf sncbuf) +{ + SegBuf segbuf = MustBeA(SegBuf, sncbuf); + CHECKS(SNCBuf, sncbuf); + CHECKD(SegBuf, segbuf); + if (sncbuf->topseg != NULL) { + CHECKD(Seg, sncbuf->topseg); + } + return TRUE; +} + + +/* sncBufferTopSeg -- return the head of segment chain from an SNCBuf */ + +static Seg sncBufferTopSeg(Buffer buffer) +{ + SNCBuf sncbuf = MustBeA(SNCBuf, buffer); + return sncbuf->topseg; +} + + +/* sncBufferSetTopSeg -- set the head of segment chain from an SNCBuf */ + +static void sncBufferSetTopSeg(Buffer buffer, Seg seg) +{ + SNCBuf sncbuf = MustBeA(SNCBuf, buffer); + if (NULL != seg) + AVERT(Seg, seg); + sncbuf->topseg = seg; +} + + +/* SNCBufInit -- Initialize an SNCBuf */ + +static Res SNCBufInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) +{ + SNCBuf sncbuf; + Res res; + + /* call next method */ + res = NextMethod(Buffer, SNCBuf, init)(buffer, pool, isMutator, args); + if (res != ResOK) + return res; + sncbuf = CouldBeA(SNCBuf, buffer); + + sncbuf->topseg = NULL; + + SetClassOfPoly(buffer, CLASS(SNCBuf)); + sncbuf->sig = SNCBufSig; + AVERC(SNCBuf, sncbuf); + + return ResOK; +} + + +/* SNCBufFinish -- Finish an SNCBuf */ + +static void SNCBufFinish(Inst inst) +{ + Buffer buffer = MustBeA(Buffer, inst); + SNCBuf sncbuf = MustBeA(SNCBuf, buffer); + SNC snc = MustBeA(SNCPool, BufferPool(buffer)); + + /* Put any segments which haven't been popped onto the free list */ + sncPopPartialSegChain(snc, buffer, NULL); + + sncbuf->sig = SigInvalid; + + NextMethod(Inst, SNCBuf, finish)(inst); +} + + +/* SNCBufClass -- The class definition */ + +DEFINE_CLASS(Buffer, SNCBuf, klass) +{ + INHERIT_CLASS(klass, SNCBuf, RankBuf); + klass->instClassStruct.finish = SNCBufFinish; + klass->size = sizeof(SNCBufStruct); + klass->init = SNCBufInit; + AVERT(BufferClass, klass); +} + + + +/* SNCSegStruct -- SNC segment subclass + * + * This subclass of MutatorSeg links segments in chains. + */ + +#define SNCSegSig ((Sig)0x51954C59) /* SIGSNCSeG */ + +typedef struct SNCSegStruct *SNCSeg; + +typedef struct SNCSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + SNCSeg next; /* Next segment in chain, or NULL */ + Sig sig; /* design.mps.sig.field.end.outer */ +} SNCSegStruct; + +#define SegSNCSeg(seg) ((SNCSeg)(seg)) +#define SNCSegSeg(sncseg) ((Seg)(sncseg)) + +#define sncSegNext(seg) RVALUE(SNCSegSeg(SegSNCSeg(seg)->next)) +#define sncSegSetNext(seg, nextseg) \ + ((void)(SegSNCSeg(seg)->next = SegSNCSeg(nextseg))) + +ATTRIBUTE_UNUSED +static Bool SNCSegCheck(SNCSeg sncseg) +{ + CHECKS(SNCSeg, sncseg); + CHECKD(GCSeg, &sncseg->gcSegStruct); + if (NULL != sncseg->next) { + CHECKS(SNCSeg, sncseg->next); + } + return TRUE; +} + + +/* sncSegInit -- Init method for SNC segments */ + +static Res sncSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + SNCSeg sncseg; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, SNCSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + return res; + sncseg = CouldBeA(SNCSeg, seg); + + AVERT(Pool, pool); + /* no useful checks for base and size */ + + sncseg->next = NULL; + + SetClassOfPoly(seg, CLASS(SNCSeg)); + sncseg->sig = SNCSegSig; + AVERC(SNCSeg, sncseg); + + return ResOK; +} + + +/* sncSegFinish -- finish an SNC segment */ + +static void sncSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + SNCSeg sncseg = MustBeA(SNCSeg, seg); + + sncseg->sig = SigInvalid; + + /* finish the superclass fields last */ + NextMethod(Inst, SNCSeg, finish)(inst); +} + + +/* SNCSegClass -- Class definition for SNC segments */ + +DEFINE_CLASS(Seg, SNCSeg, klass) +{ + INHERIT_CLASS(klass, SNCSeg, MutatorSeg); + SegClassMixInNoSplitMerge(klass); /* no support for this (yet) */ + klass->instClassStruct.finish = sncSegFinish; + klass->size = sizeof(SNCSegStruct); + klass->init = sncSegInit; + klass->bufferEmpty = sncSegBufferEmpty; + klass->scan = sncSegScan; + klass->walk = sncSegWalk; + AVERT(SegClass, klass); +} + + +/* sncRecordAllocatedSeg - stores a segment on the buffer chain */ + +static void sncRecordAllocatedSeg(Buffer buffer, Seg seg) +{ + AVERT(Buffer, buffer); + AVERT(Seg, seg); + AVER(sncSegNext(seg) == NULL); + + sncSegSetNext(seg, sncBufferTopSeg(buffer)); + sncBufferSetTopSeg(buffer, seg); +} + + +/* sncRecordFreeSeg - stores a segment on the freelist */ + +static void sncRecordFreeSeg(Arena arena, SNC snc, Seg seg) +{ + AVERT(SNC, snc); + AVERT(Seg, seg); + AVER(sncSegNext(seg) == NULL); + + /* Make sure it's not grey, and set to RankSetEMPTY */ + /* This means it won't be scanned */ + SegSetGrey(seg, TraceSetEMPTY); + SegSetRankAndSummary(seg, RankSetEMPTY, RefSetEMPTY); + + /* Pad the whole segment so we don't try to walk it. */ + ShieldExpose(arena, seg); + (*SNCPool(snc)->format->pad)(SegBase(seg), SegSize(seg)); + ShieldCover(arena, seg); + + sncSegSetNext(seg, snc->freeSegs); + snc->freeSegs = seg; +} + + +/* sncPopPartialSegChain + * + * Pops segments from the buffer chain up to a specified limit + */ + +static void sncPopPartialSegChain(SNC snc, Buffer buf, Seg upTo) +{ + Seg free; + AVERT(SNC, snc); + AVERT(Buffer, buf); + if (upTo != NULL) { + AVERT(Seg, upTo); + } + + /* Iterate the buffer chain of segments freeing all until upTo */ + free = sncBufferTopSeg(buf); + while (free != upTo) { + Seg next; + AVER(free != NULL); + next = sncSegNext(free); + sncSegSetNext(free, NULL); + sncRecordFreeSeg(BufferArena(buf), snc, free); + free = next; + } + /* Make upTo the head of the buffer chain */ + sncBufferSetTopSeg(buf, upTo); +} + + +/* sncFindFreeSeg + * + * attempts to find and detach a large enough segment from the + * freelist. returns TRUE on success. + */ +static Bool sncFindFreeSeg(Seg *segReturn, SNC snc, Size size) +{ + Seg free = snc->freeSegs; + Seg last = NULL; + + AVER(size > 0); + + /* iterate over the free list returning anything big enough */ + while (free != NULL) { + AVERT(Seg, free); + if (SegSize(free) >= size) { + /* This segment is big enough. Detach & return it */ + if (last == NULL) { + snc->freeSegs = sncSegNext(free); + } else { + sncSegSetNext(last, sncSegNext(free)); + } + sncSegSetNext(free, NULL); + *segReturn = free; + return TRUE; + } + last = free; + free = sncSegNext(free); + } + + return FALSE; +} + + +/* SNCVarargs -- decode obsolete varargs */ + +static void SNCVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) +{ + args[0].key = MPS_KEY_FORMAT; + args[0].val.format = va_arg(varargs, Format); + args[1].key = MPS_KEY_ARGS_END; + AVERT(ArgList, args); +} + + +/* SNCInit -- initialize an SNC pool */ + +static Res SNCInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + SNC snc; + Res res; + + AVER(pool != NULL); + AVERT(Arena, arena); + AVERT(ArgList, args); + UNUSED(klass); /* used for debug pools only */ + + res = NextMethod(Pool, SNCPool, init)(pool, arena, klass, args); + if (res != ResOK) + goto failNextInit; + snc = CouldBeA(SNCPool, pool); + + /* Ensure a format was supplied in the argument list. */ + AVER(pool->format != NULL); + + pool->alignment = pool->format->alignment; + pool->alignShift = SizeLog2(pool->alignment); + snc->freeSegs = NULL; + + SetClassOfPoly(pool, CLASS(SNCPool)); + snc->sig = SNCSig; + AVERC(SNCPool, snc); + + EVENT2(PoolInitSNC, pool, pool->format); + + return ResOK; + +failNextInit: + AVER(res != ResOK); + return res; +} + + +/* SNCFinish -- finish an SNC pool */ + +static void SNCFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + SNC snc = MustBeA(SNCPool, pool); + Ring ring, node, nextNode; + + AVERT(SNC, snc); + + ring = &pool->segRing; + RING_FOR(node, ring, nextNode) { + Seg seg = SegOfPoolRing(node); + AVERT(Seg, seg); + SegFree(seg); + } + + NextMethod(Inst, SNCPool, finish)(inst); +} + + +static Res SNCBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + SNC snc; + Arena arena; + Res res; + Seg seg; + Size asize; /* aligned size */ + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buffer); + AVER(size > 0); + AVER(BufferIsReset(buffer)); + + snc = PoolSNC(pool); + AVERT(SNC, snc); + + /* Try to find a free segment with enough space already */ + if (sncFindFreeSeg(&seg, snc, size)) { + goto found; + } + + /* No free seg, so create a new one */ + arena = PoolArena(pool); + asize = SizeArenaGrains(size, arena); + res = SegAlloc(&seg, CLASS(SNCSeg), LocusPrefDefault(), + asize, pool, argsNone); + if (res != ResOK) + return res; + +found: + /* */ + if (BufferRankSet(buffer) == RankSetEMPTY) + SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetEMPTY); + else + SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetUNIV); + + AVERT(Seg, seg); + /* put the segment on the buffer chain */ + sncRecordAllocatedSeg(buffer, seg); + *baseReturn = SegBase(seg); + *limitReturn = SegLimit(seg); + return ResOK; +} + + +static void sncSegBufferEmpty(Seg seg, Buffer buffer) +{ + Arena arena; + Pool pool; + Addr base, init, limit; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + base = BufferBase(buffer); + init = BufferGetInit(buffer); + limit = BufferLimit(buffer); + AVER(SegBase(seg) <= base); + AVER(base <= init); + AVER(init <= limit); + AVER(limit <= SegLimit(seg)); + + pool = SegPool(seg); + arena = PoolArena(pool); + + /* Pad the unused space at the end of the segment */ + if (init < limit) { + ShieldExpose(arena, seg); + (*pool->format->pad)(init, AddrOffset(init, limit)); + ShieldCover(arena, seg); + } +} + + +static Res sncSegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + Addr base, limit; + Res res; + + AVER(totalReturn != NULL); + AVERT(ScanState, ss); + AVERT(Seg, seg); + + base = SegBase(seg); + limit = SegBufferScanLimit(seg); + + if (base < limit) { + res = TraceScanFormat(ss, base, limit); + if (res != ResOK) { + *totalReturn = FALSE; + return res; + } + } else { + AVER(base == limit); + } + + *totalReturn = TRUE; + return ResOK; +} + + + +static Res SNCFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf) +{ + AVER(frameReturn != NULL); + AVERT(Pool, pool); + AVERT(Buffer, buf); + + if (BufferIsReset(buf)) { + AVER(sncBufferTopSeg(buf) == NULL); /* The stack must be empty */ + /* Use NULL to indicate an empty stack. .lw-frame-null */ + *frameReturn = NULL; + } else if (BufferGetInit(buf) < SegLimit(BufferSeg(buf))) { + /* Frame pointer is limit of initialized objects in buffer. */ + *frameReturn = (AllocFrame)BufferGetInit(buf); + } else { + /* Can't use the limit of initialized objects as the frame pointer + * because it's not in the segment (see job003882). Instead, refill + * the buffer and put the frame pointer at the beginning. */ + Res res; + Addr base, limit; + BufferDetach(buf, pool); + res = SNCBufferFill(&base, &limit, pool, buf, PoolAlignment(pool)); + if (res != ResOK) + return res; + BufferAttach(buf, base, limit, base, 0); + AVER(BufferGetInit(buf) < SegLimit(BufferSeg(buf))); + *frameReturn = (AllocFrame)BufferGetInit(buf); + } + return ResOK; +} + + +static Res SNCFramePop(Pool pool, Buffer buf, AllocFrame frame) +{ + Addr addr; + SNC snc; + AVERT(Pool, pool); + AVERT(Buffer, buf); + /* frame is an Addr and can't be directly checked */ + snc = PoolSNC(pool); + AVERT(SNC, snc); + + if (frame == NULL) { + /* corresponds to a pop to bottom of stack. .lw-frame-null */ + BufferDetach(buf, pool); + sncPopPartialSegChain(snc, buf, NULL); + + } else { + Arena arena; + Seg seg = NULL; /* suppress "may be used uninitialized" */ + Bool foundSeg; + Buffer segBuf; + + arena = PoolArena(pool); + addr = (Addr)frame; + foundSeg = SegOfAddr(&seg, arena, addr); + AVER(foundSeg); /* */ + AVER(SegPool(seg) == pool); + + if (SegBuffer(&segBuf, seg) && segBuf == buf) { + /* don't need to change the segment - just the alloc pointers */ + AVER(addr <= BufferScanLimit(buf)); /* check direction of pop */ + BufferSetAllocAddr(buf, addr); + } else { + /* need to change segment */ + BufferDetach(buf, pool); + sncPopPartialSegChain(snc, buf, seg); + BufferAttach(buf, SegBase(seg), SegLimit(seg), addr, (Size)0); + } + } + + return ResOK; +} + + +static void sncSegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AVERT(Seg, seg); + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures and can't be checked */ + + /* Avoid applying the function to grey objects. */ + /* They may have pointers to old-space. */ + if (SegGrey(seg) == TraceSetEMPTY) { + Addr object = SegBase(seg); + Addr nextObject; + Addr limit; + Pool pool = SegPool(seg); + + limit = SegBufferScanLimit(seg); + + while(object < limit) { + (*f)(object, format, pool, p, s); + nextObject = (*format->skip)(object); + AVER(nextObject > object); + object = nextObject; + } + AVER(object == limit); + } +} + + +/* SNCTotalSize -- total memory allocated from the arena */ + +static Size SNCTotalSize(Pool pool) +{ + SNC snc; + Ring ring, node, nextNode; + Size total = 0; + + AVERT(Pool, pool); + snc = PoolSNC(pool); + AVERT(SNC, snc); + + ring = &pool->segRing; + RING_FOR(node, ring, nextNode) { + Seg seg = SegOfPoolRing(node); + AVERT(Seg, seg); + total += SegSize(seg); + } + + return total; +} + + +/* SNCFreeSize -- free memory (unused by client program) */ + +static Size SNCFreeSize(Pool pool) +{ + SNC snc; + Seg seg; + Size free = 0; + + AVERT(Pool, pool); + snc = PoolSNC(pool); + AVERT(SNC, snc); + + seg = snc->freeSegs; + while (seg != NULL) { + AVERT(Seg, seg); + free += SegSize(seg); + seg = sncSegNext(seg); + } + + return free; +} + + +/* SNCPoolClass -- the class definition */ + +DEFINE_CLASS(Pool, SNCPool, klass) +{ + INHERIT_CLASS(klass, SNCPool, AbstractSegBufPool); + klass->instClassStruct.finish = SNCFinish; + klass->size = sizeof(SNCStruct); + klass->varargs = SNCVarargs; + klass->init = SNCInit; + klass->bufferFill = SNCBufferFill; + klass->framePush = SNCFramePush; + klass->framePop = SNCFramePop; + klass->bufferClass = SNCBufClassGet; + klass->totalSize = SNCTotalSize; + klass->freeSize = SNCFreeSize; + AVERT(PoolClass, klass); +} + + +mps_pool_class_t mps_class_snc(void) +{ + return (mps_pool_class_t)CLASS(SNCPool); +} + + +/* SNCCheck -- Check an SNC pool */ + +ATTRIBUTE_UNUSED +static Bool SNCCheck(SNC snc) +{ + CHECKS(SNC, snc); + CHECKC(SNCPool, snc); + CHECKD(Pool, SNCPool(snc)); + if (snc->freeSegs != NULL) { + CHECKD(Seg, snc->freeSegs); + } + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmc.h b/mps/code/prmc.h new file mode 100644 index 00000000000..986ed350855 --- /dev/null +++ b/mps/code/prmc.h @@ -0,0 +1,66 @@ +/* prmc.h: MUTATOR CONTEXT INTERFACE + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * See for the design of the generic interface including + * the contracts for these functions. + * + * This interface has several different implementations, typically one + * per platform, see for the various implementations. + */ + +#ifndef prmc_h +#define prmc_h + +#include "mpmtypes.h" + +#define MutatorContextSig ((Sig)0x519302C0) /* SIGnature MUTator COntext */ + +enum { + MutatorContextFAULT, /* Context of thread stopped by protection fault. */ + MutatorContextTHREAD, /* Context of thread stopped by thread manager. */ + MutatorContextLIMIT +}; + +typedef unsigned MutatorContextVar; + +extern Bool MutatorContextCheck(MutatorContext context); +extern Bool MutatorContextCanStepInstruction(MutatorContext context); +extern Res MutatorContextStepInstruction(MutatorContext context); +extern Addr MutatorContextSP(MutatorContext context); +extern Res MutatorContextScan(ScanState ss, MutatorContext context, + mps_area_scan_t scan, void *closure); + + +#endif /* prmc_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcan.c b/mps/code/prmcan.c new file mode 100644 index 00000000000..bf2a4ee170c --- /dev/null +++ b/mps/code/prmcan.c @@ -0,0 +1,51 @@ +/* prmcan.c: MUTATOR CONTEXT (GENERIC OPERATING SYSTEM) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * In this version, for a generic operating system, none of the + * functions have a useful implementation. + */ + +#include "mpm.h" + +SRCID(prmcan, "$Id$"); + + +Bool MutatorContextCheck(MutatorContext context) +{ + UNUSED(context); + + return TRUE; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcanan.c b/mps/code/prmcanan.c new file mode 100644 index 00000000000..cade39ed009 --- /dev/null +++ b/mps/code/prmcanan.c @@ -0,0 +1,59 @@ +/* prmcanan.c: MUTATOR CONTEXT (GENERIC PROCESSOR ARCHITECTURE) + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * In this version for a generic processor architecture, none of the + * functions have a useful implementation. + */ + +#include "mpm.h" + +SRCID(prmcanan, "$Id$"); + + +Bool MutatorContextCanStepInstruction(MutatorContext context) +{ + UNUSED(context); + + return FALSE; +} + + +Res MutatorContextStepInstruction(MutatorContext context) +{ + UNUSED(context); + + return ResUNIMPL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcfri3.c b/mps/code/prmcfri3.c new file mode 100644 index 00000000000..4d526e71b2a --- /dev/null +++ b/mps/code/prmcfri3.c @@ -0,0 +1,64 @@ +/* prmcfri3.c: MUTATOR CONTEXT INTEL 386 (FREEBSD) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * .source.i486: Intel486 Microprocessor Family Programmer's + * Reference Manual + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is ESP. + */ + +#include "prmcix.h" +#include "prmci3.h" + +SRCID(prmcfri3, "$Id$"); + +#if !defined(MPS_OS_FR) || !defined(MPS_ARCH_I3) +#error "prmcfri3.c is specific to MPS_OS_FR and MPS_ARCH_I3" +#endif + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + return (Addr)context->ucontext->uc_mcontext.mc_esp; /* .sp */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcfri6.c b/mps/code/prmcfri6.c new file mode 100644 index 00000000000..8a14204b089 --- /dev/null +++ b/mps/code/prmcfri6.c @@ -0,0 +1,58 @@ +/* prmcfri6.c: MUTATOR CONTEXT x64 (FREEBSD) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is RSP. + */ + +#include "prmcix.h" +#include "prmci6.h" + +SRCID(prmcfri6, "$Id$"); + +#if !defined(MPS_OS_FR) || !defined(MPS_ARCH_I6) +#error "prmcfri6.c is specific to MPS_OS_FR and MPS_ARCH_I6" +#endif + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + return (Addr)context->ucontext->uc_mcontext.mc_rsp; /* .sp */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmci3.c b/mps/code/prmci3.c new file mode 100644 index 00000000000..73a082b47cd --- /dev/null +++ b/mps/code/prmci3.c @@ -0,0 +1,278 @@ +/* prmci3.c: MUTATOR CONTEXT (INTEL 386) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: See for the generic design of the interface + * which is implemented in this module, including the contracts for the + * functions. + * + * .purpose: Implement the mutator context module. . + * + * .requirements: Current requirements are for limited support only, for + * stepping the sorts of instructions that the Dylan compiler might + * generate for table vector access - i.e., a restricted subset of MOV + * addressing modes. This avoids the need to scan entire weak tables at + * an inappropriate rank when a page fault occurs. + * + * + * SOURCES + * + * .source.i486: Intel486 Microprocessor Family Programmer's + * Reference Manual + * + * .source.dylan: Dylan table code implementation. Especially the + * following HOPE units: + * D-lib-dylan!table.dylan (class , slot entry-element) + * D-dfmc-harp-cg!harp-primitives.dylan (method op--repeated-slot-element) + * D-harp-pentium-harp!moves.dylan (pentium-template ld-index) + * + * + * ASSUMPTIONS + * + * .assume.null: It's always safe for MutatorContextCanStepInstruction + * to return FALSE. A null implementation of this module would be + * overly conservative but otherwise correct. + * + * .assume.want: The Dylan implementation is likely to access a + * weak table vector using either MOV r/m32,r32 or MOV r32,r/m32 + * instructions, where the r/m32 operand will be of one of the forms + * disp8[reg], disp8[reg1][reg2], disp8[reg1][reg2*4] (see .source.dylan + * and .source.i486) + * + * .assume.i3: Assume the following about the i386 environment: + * Steppable instructions (.assume.want) use the CS, DS & SS + * segment registers only (see .source.i486 Table 2-3). + * The processor runs in 32 bit mode. + * The CS, DS and SS segment registers all describe identical 32- + * bit flat address spaces. + */ + +#include "mpm.h" +#include "prmci3.h" + +SRCID(prmci3, "$Id$"); + +#if !defined(MPS_ARCH_I3) +#error "prmci3.c is specific to MPS_ARCH_I3" +#endif + + +/* DecodeCB -- Decode an Intel x86 control byte into Hi, Medium & Low fields */ + +static void DecodeCB(unsigned int *hReturn, + unsigned int *mReturn, + unsigned int *lReturn, + Byte op) +{ + /* see .source.i486 Figure 26-2 */ + unsigned int uop = (unsigned int)op; + *lReturn = uop & 7; + uop = uop >> 3; + *mReturn = uop & 7; + uop = uop >> 3; + *hReturn = uop & 3; +} + + +/* DecodeSIB -- Decode a Scale Index Base byte for an Intel x86 instruction */ + +static void DecodeSIB(unsigned int *sReturn, + unsigned int *iReturn, + unsigned int *bReturn, + Byte op) +{ + DecodeCB(sReturn, iReturn, bReturn, op); +} + + +/* DecodeModRM -- Decode a ModR/M byte for an Intel x86 instruction */ + +static void DecodeModRM(unsigned int *modReturn, + unsigned int *rReturn, + unsigned int *mReturn, + Byte op) +{ + DecodeCB(modReturn, rReturn, mReturn, op); +} + + +/* RegValue -- Return the value of a machine register from a context */ + +static Word RegValue(MutatorContext context, unsigned int regnum) +{ + MRef addr; + + addr = Prmci3AddressHoldingReg(context, regnum); + return *addr; +} + + +/* Return a byte element of an instruction vector as a + * Word value, with sign extension + */ +static Word SignedInsElt(Byte insvec[], Count i) +{ + signed char eltb; + + eltb = ((signed char*)insvec)[i]; + return (Word)eltb; +} + + +/* If a MOV instruction is a sufficiently simple example of a + * move between a register and memory (in either direction), + * then find the register, the effective address and the size + * of the instruction. The instruction is considered sufficiently + * simple if it uses a single byte displacement, a base register, + * and either no index or a (possibly scaled) register. + */ +static Bool DecodeSimpleMov(unsigned int *regnumReturn, + MRef *memReturn, + Size *inslenReturn, + MutatorContext context, + Byte insvec[]) +{ + unsigned int mod; + unsigned int r; + unsigned int m; + + DecodeModRM(&mod, &r, &m, insvec[1]); /* .source.i486 Table 26-3 */ + if(1 == mod) { + /* Only know about single byte displacements, .assume.want */ + Word base; + Word idx; /* can't shadow index(3) */ + Word disp; + + if(4 == m) { + /* There is an index. */ + unsigned int s; + unsigned int i; + unsigned int b; + + DecodeSIB(&s, &i, &b, insvec[2]); /* .source.i486 Table 26-3 */ + if(4 == i) { + return FALSE; /* degenerate SIB form - unused by Dylan compiler */ + } + disp = SignedInsElt(insvec, 3); + base = RegValue(context, b); + idx = RegValue(context, i) << s; + *inslenReturn = 4; + } else { + /* MOV with reg1 & [reg2+byte] parameters */ + disp = SignedInsElt(insvec, 2); + base = RegValue(context, m); + idx = 0; + *inslenReturn = 3; + } + *regnumReturn = r; + *memReturn = (MRef)(base + idx + disp); /* .assume.i3 */ + return TRUE; + } + + return FALSE; +} + + +static Bool IsSimpleMov(Size *inslenReturn, + MRef *srcReturn, + MRef *destReturn, + MutatorContext context) +{ + Byte *insvec; + unsigned int regnum; + MRef mem; + MRef faultmem; + + Prmci3DecodeFaultContext(&faultmem, &insvec, context); + + /* .assume.want */ + /* .source.i486 Page 26-210 */ + if((Byte)0x8b == insvec[0]) { + /* This is an instruction of type MOV reg, r/m32 */ + if(DecodeSimpleMov(®num, &mem, inslenReturn, context, insvec)) { + AVER(faultmem == mem); /* Ensure computed address matches exception */ + *srcReturn = mem; + *destReturn = Prmci3AddressHoldingReg(context, regnum); + return TRUE; + } + } else if((Byte)0x89 == insvec[0]) { + /* This is an instruction of type MOV r/m32, reg */ + if(DecodeSimpleMov(®num, &mem, inslenReturn, context, insvec)) { + AVER(faultmem == mem); /* Ensure computed address matches exception */ + *destReturn = mem; + *srcReturn = Prmci3AddressHoldingReg(context, regnum); + return TRUE; + } + } + + return FALSE; +} + + +Bool MutatorContextCanStepInstruction(MutatorContext context) +{ + Size inslen; + MRef src; + MRef dest; + + AVERT(MutatorContext, context); + + /* .assume.null */ + /* .assume.want */ + if(IsSimpleMov(&inslen, &src, &dest, context)) { + return TRUE; + } + + return FALSE; +} + + +Res MutatorContextStepInstruction(MutatorContext context) +{ + Size inslen; + MRef src; + MRef dest; + + AVERT(MutatorContext, context); + + /* .assume.null */ + /* .assume.want */ + if(IsSimpleMov(&inslen, &src, &dest, context)) { + *dest = *src; + Prmci3StepOverIns(context, inslen); + return ResOK; + } + + return ResUNIMPL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmci3.h b/mps/code/prmci3.h new file mode 100644 index 00000000000..b6b2a72fbb9 --- /dev/null +++ b/mps/code/prmci3.h @@ -0,0 +1,53 @@ +/* prmci3.h: MUTATOR CONTEXT (Intel 386) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers. + */ + +#ifndef prmci3_h +#define prmci3_h + + +#include "mpm.h" + +typedef Word *MRef; /* pointer to a machine word */ + +MRef Prmci3AddressHoldingReg(MutatorContext, unsigned int); + +void Prmci3DecodeFaultContext(MRef *, Byte **, MutatorContext); + +void Prmci3StepOverIns(MutatorContext, Size); + +#endif /* prmci3_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmci6.c b/mps/code/prmci6.c new file mode 100644 index 00000000000..31887f7cae0 --- /dev/null +++ b/mps/code/prmci6.c @@ -0,0 +1,119 @@ +/* prmci6.c: MUTATOR CONTEXT (x64) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: See for the generic design of the interface + * which is implemented in this module, including the contracts for the + * functions. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * .source.amd64: AMD64 Architecture Programmer's Manual Volume 3: + * General-Purpose and System Instructions + * + * + * + * ASSUMPTIONS + * + * .assume.null: It's always safe for MutatorContextCanStepInstruction + * to return FALSE. A null implementation of this module would be + * overly conservative but otherwise correct. + * + */ + +#include "mpm.h" +#include "prmci6.h" + +SRCID(prmci6, "$Id$"); + +#if !defined(MPS_ARCH_I6) +#error "prmci6.c is specific to MPS_ARCH_I6" +#endif + + +static Bool IsSimpleMov(Size *inslenReturn, + MRef *srcReturn, + MRef *destReturn, + MutatorContext context) +{ + Byte *insvec; + MRef faultmem; + + Prmci6DecodeFaultContext(&faultmem, &insvec, context); + /* Unimplemented */ + UNUSED(inslenReturn); + UNUSED(srcReturn); + UNUSED(destReturn); + + return FALSE; +} + + +Bool MutatorContextCanStepInstruction(MutatorContext context) +{ + Size inslen; + MRef src; + MRef dest; + + AVERT(MutatorContext, context); + + /* .assume.null */ + if(IsSimpleMov(&inslen, &src, &dest, context)) { + return TRUE; + } + + return FALSE; +} + + +Res MutatorContextStepInstruction(MutatorContext context) +{ + Size inslen; + MRef src; + MRef dest; + + AVERT(MutatorContext, context); + + /* .assume.null */ + if(IsSimpleMov(&inslen, &src, &dest, context)) { + *dest = *src; + Prmci6StepOverIns(context, inslen); + return ResOK; + } + + return ResUNIMPL; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmci6.h b/mps/code/prmci6.h new file mode 100644 index 00000000000..3e453a6f5b6 --- /dev/null +++ b/mps/code/prmci6.h @@ -0,0 +1,53 @@ +/* prmci6.h: MUTATOR CONTEXT (x64) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers. + */ + +#ifndef prmci6_h +#define prmci6_h + + +#include "mpm.h" + +typedef Word *MRef; /* pointer to a machine word */ + +MRef Prmci6AddressHoldingReg(MutatorContext, unsigned int); + +void Prmci6DecodeFaultContext(MRef *, Byte **, MutatorContext); + +void Prmci6StepOverIns(MutatorContext, Size); + +#endif /* prmci6_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcix.c b/mps/code/prmcix.c new file mode 100644 index 00000000000..8529405ae9c --- /dev/null +++ b/mps/code/prmcix.c @@ -0,0 +1,113 @@ +/* prmcix.c: MUTATOR CONTEXT (POSIX) + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * ASSUMPTIONS + * + * .context.regroots: The root registers are assumed to be recorded in + * the context at pointer-aligned boundaries. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) +#error "prmcix.c is specific to MPS_OS_FR or MPS_OS_LI" +#endif + +#include "prmcix.h" + +SRCID(prmcix, "$Id$"); + + +Bool MutatorContextCheck(MutatorContext context) +{ + CHECKS(MutatorContext, context); + CHECKL(NONNEGATIVE(context->var)); + CHECKL(context->var < MutatorContextLIMIT); + CHECKL((context->var == MutatorContextTHREAD) == (context->info == NULL)); + CHECKL(context->ucontext != NULL); + return TRUE; +} + + +void MutatorContextInitFault(MutatorContext context, siginfo_t *info, + ucontext_t *ucontext) +{ + AVER(context != NULL); + AVER(info != NULL); + AVER(ucontext != NULL); + + context->var = MutatorContextFAULT; + context->info = info; + context->ucontext = ucontext; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); +} + + +void MutatorContextInitThread(MutatorContext context, ucontext_t *ucontext) +{ + AVER(context != NULL); + AVER(ucontext != NULL); + + context->var = MutatorContextTHREAD; + context->info = NULL; + context->ucontext = ucontext; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); +} + + +Res MutatorContextScan(ScanState ss, MutatorContext context, + mps_area_scan_t scan_area, void *closure) +{ + mcontext_t *mc; + Res res; + + /* This scans the root registers (.context.regroots). It also + unnecessarily scans the rest of the context. The optimisation + to scan only relevant parts would be machine dependent. */ + mc = &context->ucontext->uc_mcontext; + res = TraceScanArea(ss, + (Word *)mc, + (Word *)((char *)mc + sizeof(*mc)), + scan_area, closure); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + diff --git a/mps/code/prmcix.h b/mps/code/prmcix.h new file mode 100644 index 00000000000..6abf292ca56 --- /dev/null +++ b/mps/code/prmcix.h @@ -0,0 +1,58 @@ +/* prmcix.h: MUTATOR CONTEXT (UNIX) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers. + */ + +#ifndef prmcix_h +#define prmcix_h + +#include "mpm.h" + +#include /* siginfo_t -- see .feature.li in config.h */ +#include /* ucontext_t */ + +typedef struct MutatorContextStruct { + Sig sig; /* design.mps.sig.field */ + MutatorContextVar var; /* Discriminator. */ + siginfo_t *info; /* Signal info, if stopped by protection + * fault; NULL if stopped by thread manager. */ + ucontext_t *ucontext; +} MutatorContextStruct; + +extern void MutatorContextInitFault(MutatorContext context, siginfo_t *info, ucontext_t *ucontext); +extern void MutatorContextInitThread(MutatorContext context, ucontext_t *ucontext); + +#endif /* prmcix_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmclia6.c b/mps/code/prmclia6.c new file mode 100644 index 00000000000..b79bb9db48c --- /dev/null +++ b/mps/code/prmclia6.c @@ -0,0 +1,53 @@ +/* prmclia6.c: MUTATOR CONTEXT ARM64 (LINUX) + * + * $Id$ + * Copyright (c) 2001-2021 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + */ + +#include "prmcix.h" + +SRCID(prmclia6, "$Id$"); + +#if !defined(MPS_OS_LI) || !defined(MPS_ARCH_A6) +#error "prmclia6.c is specific to MPS_OS_LI and MPS_ARCH_A6" +#endif + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + + return (Addr)context->ucontext->uc_mcontext.sp; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2021 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmclii3.c b/mps/code/prmclii3.c new file mode 100644 index 00000000000..e40581d09e1 --- /dev/null +++ b/mps/code/prmclii3.c @@ -0,0 +1,135 @@ +/* prmclii3.c: MUTATOR CONTEXT INTEL 386 (LINUX) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * .source.i486: Intel486 Microprocessor Family Programmer's + * Reference Manual + * + * .source.linux.kernel: Linux kernel source files. + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is ESP. + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + */ + +#include "prmcix.h" +#include "prmci3.h" + +SRCID(prmclii3, "$Id$"); + +#if !defined(MPS_OS_LI) || !defined(MPS_ARCH_I3) +#error "prmclii3.c is specific to MPS_OS_LI and MPS_ARCH_I3" +#endif + + +/* Prmci3AddressHoldingReg -- return an address of a register in a context */ + +MRef Prmci3AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + MRef gregs; + + AVERT(MutatorContext, context); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 7); + + /* TODO: The current arrangement of the fix operation (taking a Ref *) + forces us to pun these registers (actually `int` on LII3GC). We can + suppress the warning by casting through `void *` and this might make + it safe, but does it really? RB 2012-09-10 */ + AVER(sizeof(void *) == sizeof(*context->ucontext->uc_mcontext.gregs)); + gregs = (void *)context->ucontext->uc_mcontext.gregs; + + /* .source.i486 */ + /* .assume.regref */ + /* The register numbers (REG_EAX etc.) are defined in + but only if _GNU_SOURCE is defined: see .feature.li in + config.h. */ + switch (regnum) { + case 0: return &gregs[REG_EAX]; + case 1: return &gregs[REG_ECX]; + case 2: return &gregs[REG_EDX]; + case 3: return &gregs[REG_EBX]; + case 4: return &gregs[REG_ESP]; + case 5: return &gregs[REG_EBP]; + case 6: return &gregs[REG_ESI]; + case 7: return &gregs[REG_EDI]; + default: + NOTREACHED; + return NULL; /* Avoids compiler warning. */ + } +} + + +/* Prmci3DecodeFaultContext -- decode fault to find faulting address and IP */ + +void Prmci3DecodeFaultContext(MRef *faultmemReturn, + Byte **insvecReturn, + MutatorContext context) +{ + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + /* .source.linux.kernel (linux/arch/i386/mm/fault.c). */ + *faultmemReturn = (MRef)context->info->si_addr; + *insvecReturn = (Byte*)context->ucontext->uc_mcontext.gregs[REG_EIP]; +} + + +/* Prmci3StepOverIns -- modify context to step over instruction */ + +void Prmci3StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + + context->ucontext->uc_mcontext.gregs[REG_EIP] += (unsigned long)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + + return (Addr)context->ucontext->uc_mcontext.gregs[REG_ESP]; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmclii6.c b/mps/code/prmclii6.c new file mode 100644 index 00000000000..bcde9681091 --- /dev/null +++ b/mps/code/prmclii6.c @@ -0,0 +1,139 @@ +/* prmclii6.c: MUTATOR CONTEXT x64 (LINUX) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * .source.linux.kernel: Linux kernel source files. + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is RSP. + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + */ + +#include "prmcix.h" +#include "prmci6.h" + +SRCID(prmclii6, "$Id$"); + +#if !defined(MPS_OS_LI) || !defined(MPS_ARCH_I6) +#error "prmclii6.c is specific to MPS_OS_LI and MPS_ARCH_I6" +#endif + + +/* Prmci6AddressHoldingReg -- return an address of a register in a context */ + +MRef Prmci6AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + MRef gregs; + + AVERT(MutatorContext, context); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 15); + + /* TODO: The current arrangement of the fix operation (taking a Ref *) + forces us to pun these registers (actually `int` on LII6GC). We can + suppress the warning by casting through `void *` and this might make + it safe, but does it really? RB 2012-09-10 */ + AVER(sizeof(void *) == sizeof(*context->ucontext->uc_mcontext.gregs)); + gregs = (void *)context->ucontext->uc_mcontext.gregs; + + /* .assume.regref */ + /* The register numbers (REG_RAX etc.) are defined in + but only if _GNU_SOURCE is defined: see .feature.li in + config.h. */ + switch (regnum) { + case 0: return &gregs[REG_RAX]; + case 1: return &gregs[REG_RCX]; + case 2: return &gregs[REG_RDX]; + case 3: return &gregs[REG_RBX]; + case 4: return &gregs[REG_RSP]; + case 5: return &gregs[REG_RBP]; + case 6: return &gregs[REG_RSI]; + case 7: return &gregs[REG_RDI]; + case 8: return &gregs[REG_R8]; + case 9: return &gregs[REG_R9]; + case 10: return &gregs[REG_R10]; + case 11: return &gregs[REG_R11]; + case 12: return &gregs[REG_R12]; + case 13: return &gregs[REG_R13]; + case 14: return &gregs[REG_R14]; + case 15: return &gregs[REG_R15]; + default: + NOTREACHED; + return NULL; /* Avoids compiler warning. */ + } +} + + +/* Prmci6DecodeFaultContext -- decode fault to find faulting address and IP */ + +void Prmci6DecodeFaultContext(MRef *faultmemReturn, + Byte **insvecReturn, + MutatorContext context) +{ + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + /* .source.linux.kernel (linux/arch/x86/mm/fault.c). */ + *faultmemReturn = (MRef)context->info->si_addr; + *insvecReturn = (Byte*)context->ucontext->uc_mcontext.gregs[REG_RIP]; +} + + +/* Prmci6StepOverIns -- modify context to step over instruction */ + +void Prmci6StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + + context->ucontext->uc_mcontext.gregs[REG_RIP] += (Word)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + + return (Addr)context->ucontext->uc_mcontext.gregs[REG_RSP]; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcw3.c b/mps/code/prmcw3.c new file mode 100644 index 00000000000..f1d49c7a993 --- /dev/null +++ b/mps/code/prmcw3.c @@ -0,0 +1,125 @@ +/* prmcw3.c: MUTATOR CONTEXT FOR WIN32 + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * ASSUMPTIONS + * + * .context.regroots: The root registers are assumed to be recorded in + * the context at word-aligned boundaries. + * + * .context.flags: The ContextFlags field in the CONTEXT structure + * determines what is recorded by GetThreadContext. This must include: + * + * .context.sp: CONTEXT_CONTROL, so that the stack pointer (Esp on + * IA-32; Rsp on x86-64) is recorded. + * + * .context.regroots: CONTEXT_INTEGER, so that the root registers + * (Edi, Esi, Ebx, Edx, Ecx, Eax on IA-32; Rdi, Rsi, Rbx, Rbp, Rdx, + * Rcx, Rax, R8, ..., R15 on x86-64) are recorded. + * + * See the header WinNT.h for documentation of CONTEXT and + * ContextFlags. + */ + +#include "prmcw3.h" + +SRCID(prmcw3, "$Id$"); + +#if !defined(MPS_OS_W3) +#error "prmcw3.c is specific to MPS_OS_W3" +#endif + + +Bool MutatorContextCheck(MutatorContext context) +{ + CHECKS(MutatorContext, context); + CHECKL(NONNEGATIVE(context->var)); + CHECKL(context->var < MutatorContextLIMIT); + return TRUE; +} + + +Res MutatorContextInitThread(MutatorContext context, HANDLE thread) +{ + BOOL success; + + AVER(context != NULL); + + context->var = MutatorContextTHREAD; + /* This dumps the relevant registers into the context */ + /* .context.flags */ + context->the.context.ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER; + success = GetThreadContext(thread, &context->the.context); + if (!success) + return ResFAIL; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); + return ResOK; +} + + +void MutatorContextInitFault(MutatorContext context, + LPEXCEPTION_POINTERS ep) +{ + AVER(context != NULL); + AVER(ep != NULL); + + context->var = MutatorContextFAULT; + context->the.ep = ep; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); +} + + +Res MutatorContextScan(ScanState ss, MutatorContext context, + mps_area_scan_t scan_area, void *closure) +{ + CONTEXT *cx; + Res res; + + AVERT(ScanState, ss); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextTHREAD); + + cx = &context->the.context; + res = TraceScanArea(ss, (Word *)cx, (Word *)((char *)cx + sizeof *cx), + scan_area, closure); /* .context.regroots */ + + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcw3.h b/mps/code/prmcw3.h new file mode 100644 index 00000000000..d744e54531f --- /dev/null +++ b/mps/code/prmcw3.h @@ -0,0 +1,57 @@ +/* prmcw3.h: MUTATOR CONTEXT FOR WIN32 + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers. + */ + +#ifndef prmcw3_h +#define prmcw3_h + +#include "mpm.h" +#include "mpswin.h" + +typedef struct MutatorContextStruct { + Sig sig; /* design.mps.sig.field */ + MutatorContextVar var; /* Union discriminator */ + union { + LPEXCEPTION_POINTERS ep; /* Windows Exception Pointers */ + CONTEXT context; /* Thread context */ + } the; +} MutatorContextStruct; + +extern Res MutatorContextInitThread(MutatorContext context, HANDLE thread); +extern void MutatorContextInitFault(MutatorContext context, LPEXCEPTION_POINTERS ep); + +#endif /* prmcw3_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcw3i3.c b/mps/code/prmcw3i3.c new file mode 100644 index 00000000000..757cc6e3712 --- /dev/null +++ b/mps/code/prmcw3i3.c @@ -0,0 +1,135 @@ +/* prmcw3i3.c: MUTATOR CONTEXT INTEL 386 (Windows) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * PURPOSE + * + * .purpose: Implement the mutator context module. . + * + * SOURCES + * + * .source.i486: Intel486 Microprocessor Family Programmer's + * Reference Manual (book.intel92). + * + * ASSUMPTIONS + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + * + * .assume.sp: The stack pointer is stored in CONTEXT.Esp. This + * requires CONTEXT_CONTROL to be set in ContextFlags when + * GetThreadContext is called . + */ + +#include "prmcw3.h" +#include "prmci3.h" +#include "mpm.h" + +SRCID(prmcw3i3, "$Id$"); + +#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I3) +#error "prmcw3i3.c is specific to MPS_OS_W3 and MPS_ARCH_I3" +#endif + + +/* Prmci3AddressHoldingReg -- Return an address for a given machine register */ + +MRef Prmci3AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + PCONTEXT wincont; + + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 7); + + wincont = context->the.ep->ContextRecord; + + switch (regnum) { + case 0: return (MRef)&wincont->Eax; + case 1: return (MRef)&wincont->Ecx; + case 2: return (MRef)&wincont->Edx; + case 3: return (MRef)&wincont->Ebx; + case 4: return (MRef)&wincont->Esp; + case 5: return (MRef)&wincont->Ebp; + case 6: return (MRef)&wincont->Esi; + case 7: return (MRef)&wincont->Edi; + default: + NOTREACHED; + return NULL; /* suppress warning */ + } +} + + +/* Prmci3DecodeFaultContext -- decode fault context */ + +void Prmci3DecodeFaultContext(MRef *faultmemReturn, Byte **insvecReturn, + MutatorContext context) +{ + LPEXCEPTION_RECORD er; + + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + er = context->the.ep->ExceptionRecord; + + /* Assert that this is an access violation. The computation of */ + /* faultmemReturn depends on this. */ + AVER(er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION); + + *faultmemReturn = (MRef)er->ExceptionInformation[1]; + *insvecReturn = (Byte*)context->the.ep->ContextRecord->Eip; +} + + +/* Prmci3StepOverIns -- skip an instruction by changing the context */ + +void Prmci3StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + context->the.ep->ContextRecord->Eip += (DWORD)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextTHREAD); + + return (Addr)context->the.context.Esp; /* .assume.sp */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcw3i6.c b/mps/code/prmcw3i6.c new file mode 100644 index 00000000000..4531bf998d9 --- /dev/null +++ b/mps/code/prmcw3i6.c @@ -0,0 +1,141 @@ +/* prmcw3i6.c: MUTATOR CONTEXT INTEL x64 (Windows) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * PURPOSE + * + * .purpose: Implement the mutator context module. . + * + * SOURCES + * + * + * ASSUMPTIONS + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + * + * .assume.sp: The stack pointer is stored in CONTEXT.Rsp. This + * requires CONTEXT_CONTROL to be set in ContextFlags when + * GetThreadContext is called. + */ + +#include "prmcw3.h" +#include "prmci6.h" +#include "mpm.h" + +SRCID(prmcw3i6, "$Id$"); + +#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I6) +#error "prmcw3i6.c is specific to MPS_OS_W3 and MPS_ARCH_I6" +#endif + + +/* Prmci6AddressHoldingReg -- Return an address for a given machine register */ + +MRef Prmci6AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + PCONTEXT wincont; + + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 16); + + wincont = context->the.ep->ContextRecord; + + switch (regnum) { + case 0: return (MRef)&wincont->Rax; + case 1: return (MRef)&wincont->Rcx; + case 2: return (MRef)&wincont->Rdx; + case 3: return (MRef)&wincont->Rbx; + case 4: return (MRef)&wincont->Rsp; + case 5: return (MRef)&wincont->Rbp; + case 6: return (MRef)&wincont->Rsi; + case 7: return (MRef)&wincont->Rdi; + case 8: return (MRef)&wincont->R8; + case 9: return (MRef)&wincont->R9; + case 10: return (MRef)&wincont->R10; + case 11: return (MRef)&wincont->R11; + case 12: return (MRef)&wincont->R12; + case 13: return (MRef)&wincont->R13; + case 14: return (MRef)&wincont->R14; + case 15: return (MRef)&wincont->R15; + default: + NOTREACHED; + return NULL; /* suppress warning */ + } +} + + +/* Prmci6DecodeFaultContext -- decode fault context */ + +void Prmci6DecodeFaultContext(MRef *faultmemReturn, Byte **insvecReturn, + MutatorContext context) +{ + LPEXCEPTION_RECORD er; + + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + er = context->the.ep->ExceptionRecord; + + /* Assert that this is an access violation. The computation of */ + /* faultmem depends on this. */ + AVER(er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION); + + *faultmemReturn = (MRef)er->ExceptionInformation[1]; + *insvecReturn = (Byte*)context->the.ep->ContextRecord->Rip; +} + + +/* Prmci6StepOverIns -- skip an instruction by changing the context */ + +void Prmci6StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + context->the.ep->ContextRecord->Rip += (DWORD64)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextTHREAD); + + return (Addr)context->the.context.Rsp; /* .assume.sp */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcxc.c b/mps/code/prmcxc.c new file mode 100644 index 00000000000..fe58bacb939 --- /dev/null +++ b/mps/code/prmcxc.c @@ -0,0 +1,116 @@ +/* prmcxc.c: MUTATOR CONTEXT (macOS) + * + * $Id$ + * Copyright (c) 2016-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * ASSUMPTIONS + * + * .context.regroots: The root registers are assumed to be recorded in + * the context at pointer-aligned boundaries. + */ + +#include "prmcxc.h" + +SRCID(prmcxc, "$Id$"); + +#if !defined(MPS_OS_XC) +#error "prmcxc.c is specific to MPS_OS_XC" +#endif + + +Bool MutatorContextCheck(MutatorContext context) +{ + CHECKS(MutatorContext, context); + CHECKL(sizeof *context->threadState == sizeof(THREAD_STATE_S)); + CHECKL(NONNEGATIVE(context->var)); + CHECKL(context->var < MutatorContextLIMIT); + CHECKL((context->var == MutatorContextTHREAD) == (context->address == NULL)); + CHECKL(context->threadState != NULL); + return TRUE; +} + + +void MutatorContextInitFault(MutatorContext context, Addr address, + THREAD_STATE_S *threadState) +{ + AVER(context != NULL); + AVER(address != NULL); + AVER(threadState != NULL); + + context->var = MutatorContextFAULT; + context->address = address; + context->threadState = threadState; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); +} + + +void MutatorContextInitThread(MutatorContext context, + THREAD_STATE_S *threadState) +{ + AVER(context != NULL); + AVER(threadState != NULL); + + context->var = MutatorContextTHREAD; + context->address = NULL; + context->threadState = threadState; + context->sig = MutatorContextSig; + + AVERT(MutatorContext, context); +} + + +Res MutatorContextScan(ScanState ss, MutatorContext context, + mps_area_scan_t scan_area, void *closure) +{ + THREAD_STATE_S *mc; + Res res; + + AVERT(ScanState, ss); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextTHREAD); + + /* This scans the root registers (.context.regroots). It also + unnecessarily scans the rest of the context. The optimisation + to scan only relevant parts would be architecture dependent. */ + mc = context->threadState; + res = TraceScanArea(ss, + (Word *)mc, + (Word *)((char *)mc + sizeof(*mc)), + scan_area, closure); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcxc.h b/mps/code/prmcxc.h new file mode 100644 index 00000000000..fa6ea9ec88c --- /dev/null +++ b/mps/code/prmcxc.h @@ -0,0 +1,66 @@ +/* prmcxc.h: MUTATOR CONTEXT (macOS) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MPS developers. + */ + +#ifndef prmcxc_h +#define prmcxc_h + +#include "mpm.h" + +#include +#if defined(MPS_ARCH_A6) +#include +#elif defined(MPS_ARCH_I3) || defined(MPS_ARCH_I6) +#include +#else +#error "Unknown macOS architecture" +#endif + +typedef struct MutatorContextStruct { + Sig sig; /* design.mps.sig.field */ + MutatorContextVar var; /* Discriminator. */ + Addr address; /* Fault address, if stopped by protection + * fault; NULL if stopped by thread manager. */ + THREAD_STATE_S *threadState; + /* TODO: Consider getting the floats in case the compiler stashes + intermediate values in them. Never observed. */ +} MutatorContextStruct; + +extern void MutatorContextInitFault(MutatorContext context, Addr address, THREAD_STATE_S *threadState); +extern void MutatorContextInitThread(MutatorContext context, THREAD_STATE_S *threadState); + +#endif /* prmcxc_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcxca6.c b/mps/code/prmcxca6.c new file mode 100644 index 00000000000..ce1de415a32 --- /dev/null +++ b/mps/code/prmcxca6.c @@ -0,0 +1,57 @@ +/* prmcxca6.c: MUTATOR CONTEXT ARM64 (macOS) + * + * $Id$ + * Copyright (c) 2001-2021 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is SP. + */ + +#include "prmcxc.h" + +SRCID(prmcxca6, "$Id$"); + +#if !defined(MPS_OS_XC) || !defined(MPS_ARCH_A6) +#error "prmcxca6.c is specific to MPS_OS_XC and MPS_ARCH_A6" +#endif + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + return (Addr)arm_thread_state64_get_sp(*(context->threadState)); /* .sp */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2021 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcxci3.c b/mps/code/prmcxci3.c new file mode 100644 index 00000000000..ebc038518d9 --- /dev/null +++ b/mps/code/prmcxci3.c @@ -0,0 +1,130 @@ +/* prmcxci3.c: MUTATOR CONTEXT (macOS, IA-32) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * .source.i486: Intel486 Microprocessor Family Programmer's + * Reference Manual + * + * + * ASSUMPTIONS + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + */ + +#include "prmcxc.h" +#include "prmci3.h" + +SRCID(prmcxci3, "$Id$"); + +#if !defined(MPS_OS_XC) || !defined(MPS_ARCH_I3) +#error "prmcxci3.c is specific to MPS_OS_XC and MPS_ARCH_I3" +#endif + + +/* Prmci3AddressHoldingReg -- return an address of a register in a context */ + +MRef Prmci3AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + THREAD_STATE_S *threadState; + + AVERT(MutatorContext, context); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 7); + + threadState = context->threadState; + + /* .source.i486 */ + /* .assume.regref */ + /* The register numbers (REG_EAX etc.) are defined in + but only if _GNU_SOURCE is defined: see .feature.li in + config.h. */ + /* TODO: The current arrangement of the fix operation (taking a Ref *) + forces us to pun these registers (actually `int` on LII3GC). We can + suppress the warning by casting through `void *` and this might make + it safe, but does it really? RB 2012-09-10 */ + switch (regnum) { + case 0: return (void *)&threadState->__eax; + case 1: return (void *)&threadState->__ecx; + case 2: return (void *)&threadState->__edx; + case 3: return (void *)&threadState->__ebx; + case 4: return (void *)&threadState->__esp; + case 5: return (void *)&threadState->__ebp; + case 6: return (void *)&threadState->__esi; + case 7: return (void *)&threadState->__edi; + default: + NOTREACHED; + return NULL; /* Avoids compiler warning. */ + } +} + + +/* Prmci3DecodeFaultContext -- decode fault to find faulting address and IP */ + +void Prmci3DecodeFaultContext(MRef *faultmemReturn, + Byte **insvecReturn, + MutatorContext context) +{ + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + *faultmemReturn = (MRef)context->address; + *insvecReturn = (Byte*)context->threadState->__eip; +} + + +/* Prmci3StepOverIns -- modify context to step over instruction */ + +void Prmci3StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + AVER(0 < inslen); + + context->threadState->__eip += (Word)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + + return (Addr)context->threadState->__esp; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prmcxci6.c b/mps/code/prmcxci6.c new file mode 100644 index 00000000000..b17b82695a8 --- /dev/null +++ b/mps/code/prmcxci6.c @@ -0,0 +1,135 @@ +/* prmcxci6.c: MUTATOR CONTEXT (macOS, x86-64) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement the mutator context module. . + * + * + * SOURCES + * + * + * ASSUMPTIONS + * + * .sp: The stack pointer in the context is RSP. + * + * .assume.regref: The registers in the context can be modified by + * storing into an MRef pointer. + */ + +#include "prmcxc.h" +#include "prmci6.h" + +SRCID(prmcxci6, "$Id$"); + +#if !defined(MPS_OS_XC) || !defined(MPS_ARCH_I6) +#error "prmcxci6.c is specific to MPS_OS_XC and MPS_ARCH_I6" +#endif + + +/* Prmci6AddressHoldingReg -- return an address of a register in a context */ + +MRef Prmci6AddressHoldingReg(MutatorContext context, unsigned int regnum) +{ + THREAD_STATE_S *threadState; + + AVERT(MutatorContext, context); + AVER(NONNEGATIVE(regnum)); + AVER(regnum <= 15); + + threadState = context->threadState; + + /* .assume.regref */ + /* The register numbers (REG_RAX etc.) are defined in + but only if _XOPEN_SOURCE is defined: see .feature.xc in + config.h. */ + /* MRef (a Word *) is not compatible with pointers to the register + types (actually a __uint64_t). To avoid aliasing optimization + problems, the registers are cast through (void *). */ + switch (regnum) { + case 0: return (void *)&threadState->__rax; + case 1: return (void *)&threadState->__rcx; + case 2: return (void *)&threadState->__rdx; + case 3: return (void *)&threadState->__rbx; + case 4: return (void *)&threadState->__rsp; + case 5: return (void *)&threadState->__rbp; + case 6: return (void *)&threadState->__rsi; + case 7: return (void *)&threadState->__rdi; + case 8: return (void *)&threadState->__r8; + case 9: return (void *)&threadState->__r9; + case 10: return (void *)&threadState->__r10; + case 11: return (void *)&threadState->__r11; + case 12: return (void *)&threadState->__r12; + case 13: return (void *)&threadState->__r13; + case 14: return (void *)&threadState->__r14; + case 15: return (void *)&threadState->__r15; + default: + NOTREACHED; + return NULL; /* Avoids compiler warning. */ + } +} + + +/* Prmci6DecodeFaultContext -- decode fault to find faulting address and IP */ + +void Prmci6DecodeFaultContext(MRef *faultmemReturn, + Byte **insvecReturn, + MutatorContext context) +{ + AVER(faultmemReturn != NULL); + AVER(insvecReturn != NULL); + AVERT(MutatorContext, context); + AVER(context->var == MutatorContextFAULT); + + *faultmemReturn = (MRef)context->address; + *insvecReturn = (Byte*)context->threadState->__rip; +} + + +/* Prmci6StepOverIns -- modify context to step over instruction */ + +void Prmci6StepOverIns(MutatorContext context, Size inslen) +{ + AVERT(MutatorContext, context); + AVER(0 < inslen); + + context->threadState->__rip += (Word)inslen; +} + + +Addr MutatorContextSP(MutatorContext context) +{ + AVERT(MutatorContext, context); + + return (Addr)context->threadState->__rsp; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/prot.h b/mps/code/prot.h new file mode 100644 index 00000000000..149a3b97044 --- /dev/null +++ b/mps/code/prot.h @@ -0,0 +1,58 @@ +/* prot.h: MEMORY PROTECTION INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * See for the design of the generic interface including + * the contracts for these functions. + * + * This interface has several different implementations, typically one + * per platform, see for the various implementations, + * and for the corresponding designs. + */ + +#ifndef prot_h +#define prot_h + +#include "mpmtypes.h" + + +/* Protection Interface */ + +extern void ProtSetup(void); +extern Size ProtGranularity(void); +extern void ProtSet(Addr base, Addr limit, AccessSet mode); +extern void ProtSync(Arena arena); + + +#endif /* prot_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protan.c b/mps/code/protan.c new file mode 100644 index 00000000000..c83a25197ce --- /dev/null +++ b/mps/code/protan.c @@ -0,0 +1,101 @@ +/* protan.c: ANSI MEMORY PROTECTION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * + * DESIGN + * + * + */ + +#include "mpm.h" + +SRCID(protan, "$Id$"); + + +/* ProtSetup -- global protection setup */ + +void ProtSetup(void) +{ + NOOP; +} + + +/* ProtGranularity -- return the granularity of protection */ + +Size ProtGranularity(void) +{ + /* Any range of addresses can be "protected" since ProtSet does nothing. */ + return (Size)MPS_PF_ALIGN; +} + + +/* ProtSet -- set the protection for a page */ + +void ProtSet(Addr base, Addr limit, AccessSet pm) +{ + AVER(base < limit); + AVERT(AccessSet, pm); + UNUSED(pm); + NOOP; +} + + +/* ProtSync -- synchronize protection settings with hardware + * + * . + */ + +void ProtSync(Arena arena) +{ + Bool synced; + + AVERT(Arena, arena); + + do { + Seg seg; + + synced = TRUE; + if (SegFirst(&seg, arena)) { + do { + if (SegPM(seg) != AccessSetEMPTY) { /* */ + ShieldEnter(arena); + TraceSegAccess(arena, seg, SegPM(seg)); + ShieldLeave(arena); + synced = FALSE; + } + } while(SegNext(&seg, arena, seg)); + } + } while(!synced); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protix.c b/mps/code/protix.c new file mode 100644 index 00000000000..fa6541913b7 --- /dev/null +++ b/mps/code/protix.c @@ -0,0 +1,169 @@ +/* protix.c: PROTECTION FOR UNIX + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * Somewhat generic across different Unix systems. Shared between + * macOS, FreeBSD, and Linux. + * + * + * SOURCES + * + * [SUSV2MPROTECT] Single UNIX Specification, Version 2, mprotect + * + * + * ASSUMPTIONS + * + * .assume.mprotect.base: We assume that the first argument to mprotect can + * be safely passed as a void *. Single UNIX Specification Version 2 (aka + * X/OPEN XSH5) says that the parameter is a void *. Some Unix-likes may + * declare this parameter as a caddr_t. FreeBSD used to do this (on the now + * very obsolete FreeBSD 2.2.x series), as did macOS, but both now implement + * it correctly as void *. caddr_t is usually char *. + * + * .assume.write-only: More of an anti-assumption really. We + * assume that asking the OS for a write-only page (that is, flags = + * PROT_WRITE) does not work. What actually happens on all the + * Unix-like OSes that we've seen is that asking for write-permission + * (flags = PROT_WRITE) grants read-permission as well. That is why + * when the MPS requires that a page be read-protected (mode == + * AccessREAD) we must ensure that writes are also not allowed. + * The portable guarantees of mprotect (see [SUSV2MPROTECT]) are that + * writes are not permitted where PROT_WRITE is not used and no access + * is permitted when PROT_NONE alone is used. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC) +#error "protix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC" +#endif + +#include "vm.h" + +#include +#include +#include /* sig_atomic_t */ +#include +#include +#include + +SRCID(protix, "$Id$"); + + +/* Value for memory protection corresponding to AccessSetEMPTY. + * See .convert.access for an explanation of the conversion. + * We use a global variable and not a constant so that we can clear + * the executable flag from future requests if Apple Hardened Runtime + * is detected. See for details. */ + +static sig_atomic_t prot_all = PROT_READ | PROT_WRITE | PROT_EXEC; + + +/* ProtSet -- set protection + * + * This is just a thin veneer on top of mprotect(2). + */ + +void ProtSet(Addr base, Addr limit, AccessSet mode) +{ + int flags, result; + + AVER(sizeof(size_t) == sizeof(Addr)); + AVER(base < limit); + AVER(base != 0); + AVER(AddrOffset(base, limit) <= INT_MAX); /* should be redundant */ + AVERT(AccessSet, mode); + + /* .convert.access: Convert between MPS AccessSet and UNIX PROT thingies. + In this function, AccessREAD means protect against read accesses + (disallow them). PROT_READ means allow read accesses. Notice that + this follows a difference in contract as well as style. AccessREAD + means that no reads should be permitted (all reads should go via + the signal handler), possibly other operations (write) also go via + the signal handler; PROT_WRITE means that all writes should be + allowed, possibly that means other operations (read) are also + allowed. + */ + switch(mode) { + case AccessWRITE | AccessREAD: + case AccessREAD: /* forbids writes as well, see .assume.write-only */ + flags = PROT_NONE; + break; + case AccessWRITE: + flags = PROT_READ | PROT_EXEC; + break; + case AccessSetEMPTY: + flags = (int)prot_all; /* potential narrowing cast, but safe */ + break; + default: + NOTREACHED; + flags = PROT_NONE; + } + + /* .assume.mprotect.base */ + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags); + if (MAYBE_HARDENED_RUNTIME && result != 0 && errno == EACCES + && (flags & PROT_WRITE) && (flags & PROT_EXEC)) + { + /* Apple Hardened Runtime is enabled, so that we cannot have + * memory that is simultaneously writable and executable. Handle + * this by dropping the executable part of the request. See + * for details. */ + prot_all = PROT_READ | PROT_WRITE; + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags & prot_all); + } + if (result != 0) + NOTREACHED; +} + + +/* ProtSync -- synchronize protection settings with hardware + * + * This does nothing under Posix. See protan.c. + */ + +void ProtSync(Arena arena) +{ + UNUSED(arena); + NOOP; +} + + +/* ProtGranularity -- return the granularity of protection */ + +Size ProtGranularity(void) +{ + /* Individual pages can be protected. */ + return PageSize(); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protocol.c b/mps/code/protocol.c new file mode 100644 index 00000000000..11cd0e27a05 --- /dev/null +++ b/mps/code/protocol.c @@ -0,0 +1,190 @@ +/* The class definition for the root of the hierarchy */ + +/* pool.c: PROTOCOL IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * . + */ + +#include "mpm.h" +#include "protocol.h" + +SRCID(protocol, "$Id$"); + + +/* The class definitions for the root of the hierarchy */ + +static void InstClassInitInternal(InstClass klass); + +DEFINE_CLASS(Inst, Inst, klass) +{ + InstClassInitInternal(klass); + klass->instStruct.klass = CLASS(InstClass); + AVERT(InstClass, klass); +} + +DEFINE_CLASS(Inst, InstClass, klass) +{ + /* Can't use INHERIT_CLASS(klass, InstClass, Inst) here because it + causes infinite regression, so we have to set this one up by + hand. */ + InstClassInitInternal(klass); + klass->superclass = &CLASS_STATIC(Inst); + klass->name = "InstClass"; + klass->level = ClassLevelInstClass; + klass->display[ClassLevelInstClass] = CLASS_ID(InstClass); + AVERT(InstClass, klass); +} + +static void InstClassInitInternal(InstClass klass) +{ + ClassLevel i; + + klass->name = "Inst"; + klass->superclass = NULL; + for (i = 0; i < ClassDEPTH; ++i) + klass->display[i] = NULL; + klass->level = 0; + klass->display[klass->level] = CLASS_ID(Inst); + + /* Generic methods */ + klass->describe = InstDescribe; + klass->finish = InstFinish; + klass->init = InstInit; + + /* We can't call CLASS(InstClass) here because it causes a loop back + to here, so we have to tie this knot specially. */ + klass->instStruct.klass = &CLASS_STATIC(InstClass); + + klass->sig = InstClassSig; + AVERT(InstClass, klass); +} + + +/* InstClassCheck -- check a protocol class */ + +Bool InstClassCheck(InstClass klass) +{ + ClassLevel i; + CHECKS(InstClass, klass); + CHECKL(klass->name != NULL); + CHECKL(klass->level < ClassDEPTH); + for (i = 0; i <= klass->level; ++i) { + CHECKL(klass->display[i] != NULL); + } + for (i = klass->level + 1; i < ClassDEPTH; ++i) { + CHECKL(klass->display[i] == NULL); + } + CHECKL(FUNCHECK(klass->describe)); + CHECKL(FUNCHECK(klass->finish)); + CHECKL(FUNCHECK(klass->init)); + return TRUE; +} + + +/* InstInit -- initialize a protocol instance + * + * Initialisation makes the instance valid, so that it will pass + * InstCheck, and the instance can be specialized to be a member of a + * subclass. + */ + +void InstInit(Inst inst) +{ + AVER(inst != NULL); + inst->klass = CLASS(Inst); + AVERC(Inst, inst); +} + + +/* InstFinish -- finish a protocol instance + * + * Finishing makes the instance invalid, so that it will fail + * InstCheck and can't be used. + */ + +static InstClassStruct invalidClassStruct = { + /* .instStruct = */ {&invalidClassStruct}, + /* .sig = */ SigInvalid, + /* .name = */ "Invalid", + /* .superclass = */ &invalidClassStruct, + /* .level = */ 0, + /* .display = */ {(ClassId)&invalidClassStruct}, + /* .describe = */ NULL, + /* .finish = */ NULL, + /* .init = */ NULL, +}; + +void InstFinish(Inst inst) +{ + AVERC(Inst, inst); + inst->klass = &invalidClassStruct; +} + + +/* InstCheck -- check a protocol instance */ + +Bool InstCheck(Inst inst) +{ + CHECKD(InstClass, inst->klass); + return TRUE; +} + + +void ClassRegister(InstClass klass) +{ + Word classId; + + /* label the pool class with its name */ + EventInit(); + classId = EventInternString(ClassName(klass)); + EventLabelPointer(klass, classId); +} + + +Res InstDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + InstClass klass; + + if (!TESTC(Inst, inst)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + klass = ClassOfPoly(Inst, inst); + return WriteF(stream, depth, + "$S $P\n", (WriteFS)ClassName(klass), inst, + NULL); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protocol.h b/mps/code/protocol.h new file mode 100644 index 00000000000..c13bafcd9c7 --- /dev/null +++ b/mps/code/protocol.h @@ -0,0 +1,360 @@ +/* protocol.h: PROTOCOL INHERITANCE DEFINITIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * . + */ + +#ifndef protocol_h +#define protocol_h + +#include "config.h" +#include "mpmtypes.h" + + +/* Identifier derivation macros. + * + * These turn the base identifier of a class (e.g. "Inst") into other + * identifiers (e.g. "InstClassStruct"). These are not intended to be + * used outside of this file. These macros implement + * and + * . + * + * INST_TYPE derives the type of an instance of the class, + * e.g. "Land", which will be a pointer to an INST_STRUCT. + * + * INST_STRUCT derives the type of a structure of an instance, + * e.g. "LandStruct". + * + * INST_CHECK derives the name of the checking function for the + * instance, e.g. "LandCheck". + * + * CLASS_TYPE derives the type of the class, e.g. "LandClass", which + * will be a pointer to a CLASS_STRUCT. + * + * CLASS_STRUCT derives the type of the structure of the class, + * e.g. "LandClassStruct". + * + * CLASS_ENSURE derives the name of the ensure function that returns + * the canonical class object, e.g. "LandClassGet". + * + * CLASS_INIT derives the name of the init function that initializes a + * CLASS_STRUCT, e.g. "LandClassInit". + * + * CLASS_CHECK derives the name of the checking function for the + * class, e.g. "LandClassCheck". + * + * CLASS_GUARDIAN derives the name of a boolean that indicates whether + * the canonical class object has been initialized yet, + * e.g. "ClassGuardianLand". + * + * CLASS_STATIC derives the name of a static global variable that + * contains the canonical class object, e.g. "ClassStaticLand". + * + * KIND_CLASS derives the class name of a kind, which is used in + * contexts like CLASS_TYPE(KIND_CLASS(kind)), so that the kind "Land" + * is implemented by the canonical "LandClassClass". + */ + +#define INST_TYPE(klass) klass +#define INST_STRUCT(klass) klass ## Struct +#define INST_CHECK(klass) klass ## Check +#define CLASS_TYPE(klass) klass ## Class +#define CLASS_STRUCT(klass) klass ## ClassStruct +#define CLASS_ENSURE(klass) klass ## ClassGet +#define CLASS_INIT(klass) klass ## ClassInit +#define CLASS_CHECK(klass) klass ## ClassCheck +#define CLASS_GUARDIAN(klass) ClassGuardian ## klass +#define CLASS_STATIC(klass) ClassStatic ## klass +#define KIND_CLASS(klass) klass ## Class + + +/* ClassId -- static identity of a class + * + * We use the address of the static storage for the canonical class + * object as the class id, suitable for fast comparison. This is not + * intended to be dereferenced. We would like to define it as a + * pointer to an incomplete structure, but GCC 4.7 buggily complains + * about punning if we do that, so use void *, even though that's a + * bit more error prone. + */ + +typedef void *ClassId; +#define CLASS_ID(klass) ((ClassId)&CLASS_STATIC(klass)) + + +/* DECLARE_CLASS -- declare the existence of a protocol class + * + * Declares a prototype for the class ensure function, which ensures + * that the class is initialized once and return it. See + * . + */ + +#define DECLARE_CLASS(kind, klass, super) \ + extern CLASS_TYPE(kind) CLASS_ENSURE(klass)(void); \ + extern void CLASS_INIT(klass)(CLASS_TYPE(kind) var); \ + extern CLASS_STRUCT(kind) CLASS_STATIC(klass); \ + enum { ClassLevel ## klass = ClassLevel ## super + 1 } + + +/* DEFINE_CLASS -- define a protocol class + * + * Defines the static storage and functions for the canonical class + * object for a class. Takes care to avoid initializing the class + * twice, even when called asynchronously from multiple threads, since + * this code can be reached without first entering an arena. See + * . + */ + +#define DEFINE_CLASS(kind, className, var) \ + static Bool CLASS_GUARDIAN(className) = FALSE; \ + CLASS_STRUCT(kind) CLASS_STATIC(className); \ + CLASS_TYPE(kind) CLASS_ENSURE(className)(void) \ + { \ + CLASS_TYPE(kind) klass = &CLASS_STATIC(className); \ + if (CLASS_GUARDIAN(className) == FALSE) { \ + LockClaimGlobalRecursive(); \ + if (CLASS_GUARDIAN(className) == FALSE) { \ + CLASS_INIT(className)(klass); \ + /* Prevent infinite regress. */ \ + if (CLASS_ID(className) != CLASS_ID(InstClass) && \ + CLASS_ID(className) != CLASS_ID(Inst)) \ + SetClassOfPoly(klass, CLASS(KIND_CLASS(kind))); \ + AVER(CLASS_CHECK(kind)(klass)); \ + CLASS_GUARDIAN(className) = TRUE; \ + ClassRegister(MustBeA(InstClass, klass)); \ + } \ + LockReleaseGlobalRecursive(); \ + } \ + return klass; \ + } \ + void CLASS_INIT(className)(CLASS_TYPE(kind) var) + + +/* CLASS -- expression for getting a class + * + * Use this to get a class, rather than calling anything defined by + * DEFINE_CLASS directly. . + */ + +#define CLASS(klass) (CLASS_ENSURE(klass)()) + + +/* INHERIT_CLASS -- inheriting from a superclass + * + * This macro is used at the start of a class definition to inherit + * the superclass and override the fields essential to the workings of + * the protocol. . + */ + +#define INHERIT_CLASS(this, _class, super) \ + BEGIN \ + InstClass instClass = (InstClass)(this); \ + CLASS_INIT(super)(this); \ + instClass->superclass = (InstClass)CLASS(super); \ + instClass->name = #_class; \ + instClass->level = instClass->superclass->level + 1; \ + AVER(instClass->level < ClassDEPTH); \ + instClass->display[instClass->level] = CLASS_ID(_class); \ + END + + +/* Inst -- the base class of the hierarchy + * + * An InstStruct named instStruct must be the first field of any + * instance structure using the protocol + * . + */ + +typedef struct InstStruct *Inst; +typedef struct InstClassStruct *InstClass; + +typedef struct InstStruct { + InstClass klass; + /* Do not add permanent fields here. Introduce a subclass. */ +} InstStruct; + +typedef const char *ClassName; +typedef unsigned char ClassLevel; +typedef Res (*DescribeMethod)(Inst inst, mps_lib_FILE *stream, Count depth); +typedef void (*InstInitMethod)(Inst inst); +typedef void (*FinishMethod)(Inst inst); +#define ClassDEPTH 8 /* maximum depth of class hierarchy */ + +#define InstClassSig ((Sig)0x519B1452) /* SIGnature Protocol INST */ + +typedef struct InstClassStruct { + InstStruct instStruct; /* classes are instances of kinds */ + Sig sig; /* design.mps.sig.field */ + ClassName name; /* human readable name such as "Land" */ + InstClass superclass; /* pointer to direct superclass */ + ClassLevel level; /* distance from root of class hierarchy */ + ClassId display[ClassDEPTH]; /* classes at this level and above */ + DescribeMethod describe; /* write a debugging description */ + FinishMethod finish; /* finish instance */ + InstInitMethod init; /* base init method */ +} InstClassStruct; + +enum {ClassLevelNoSuper = -1}; +DECLARE_CLASS(Inst, Inst, NoSuper); +DECLARE_CLASS(Inst, InstClass, Inst); + +extern Bool InstClassCheck(InstClass klass); +extern Bool InstCheck(Inst inst); +extern void InstInit(Inst inst); +extern void InstFinish(Inst inst); +extern Res InstDescribe(Inst inst, mps_lib_FILE *stream, Count depth); + + +/* ClassRegister -- class registration + * + * This is called once for each class initialised by DEFINE_CLASS and + * is not intended for use outside this file. + */ + +extern void ClassRegister(InstClass klass); + + +/* IsSubclass, IsA -- fast subclass test + * + * The InstClassStruct is arranged to make these tests fast and + * simple, so that it can be used as a consistency check in the MPS. + * . + */ + +#define IsSubclass(sub, super) \ + (((InstClass)(sub))->display[ClassLevel ## super] == CLASS_ID(super)) + +#define IsA(_class, inst) \ + IsSubclass(CouldBeA(Inst, inst)->klass, _class) + +#define IsNonNullAndA(_class, inst) \ + ((inst) != NULL && \ + CouldBeA(Inst, inst)->klass != NULL && \ + IsA(_class, inst)) + + +/* CouldBeA, MustBeA -- coerce instances + * + * CouldBeA converts an instance to another class without checking, + * like C++ ``static_cast``. . + * + * MustBeA converts an instance to another class, but checks that the + * object is a subclass, causing an assertion if not (depending on + * build variety). . It is like + * C++ "dynamic_cast" with an assert. + * + * MustBeA_CRITICAL is like MustBeA for use on the critical path, + * where it does no checking at all in production builds. See + * . + */ + +#define CouldBeA(klass, inst) ((INST_TYPE(klass))(inst)) + +#define MustBeA(_class, inst) \ + CouldBeA(_class, \ + AVERPC(IsNonNullAndA(_class, inst), \ + "MustBeA " #_class ": " #inst, \ + inst)) + +#define MustBeA_CRITICAL(_class, inst) \ + CouldBeA(_class, \ + AVERPC_CRITICAL(IsNonNullAndA(_class, inst), \ + "MustBeA " #_class ": " #inst, \ + inst)) + + +/* Protocol introspection interface + * + * The following are macros because of the need to cast subtypes of + * InstClass. Nevertheless they are named as functions. See + * . + */ + +#define SuperclassPoly(kind, klass) \ + MustBeA(KIND_CLASS(kind), MustBeA(InstClass, klass)->superclass) + +#define ClassOfPoly(kind, inst) \ + MustBeA(KIND_CLASS(kind), MustBeA(Inst, inst)->klass) + + +/* ClassName -- get the human readable name of a class + * + * ClassName is used in describe methods and other unsafe places, so + * we don't use MustBeA. + */ + +#define ClassName(klass) RVALUE(CouldBeA(InstClass, klass)->name) + + +/* SetClassOfPoly -- set the class of an object + * + * This should only be used when specialising an instance to be a + * member of a subclass, once the instance has been initialized. See + * . + */ + +#define SetClassOfPoly(inst, _class) \ + BEGIN MustBeA(Inst, inst)->klass = MustBeA(InstClass, _class); END + + +/* Method -- method call + * + * Use this macro to call a method on a class, rather than accessing + * the class directly. . For + * example: + * + * res = Method(Land, land, insert)(land, range); + */ + +#define Method(kind, inst, meth) (ClassOfPoly(kind, inst)->meth) + + +/* NextMethod -- call a method in the superclass + * + * . + * + * TODO: All uses of NextMethod are statically known, but several + * experiments with statically generating some kind of SUPERCLASS + * lookup have failed because the names of types, classes, and the + * hierarchy are inconsistent. Revisit this later. + */ + +#define SUPERCLASS(kind, klass) \ + MustBeA(KIND_CLASS(kind), CouldBeA(InstClass, CLASS(klass))->superclass) + +#define NextMethod(kind, klass, meth) (SUPERCLASS(kind, klass)->meth) + + +#endif /* protocol_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protsgix.c b/mps/code/protsgix.c new file mode 100644 index 00000000000..966569c9293 --- /dev/null +++ b/mps/code/protsgix.c @@ -0,0 +1,182 @@ +/* protsgix.c: PROTECTION (SIGNAL HANDLER) FOR POSIX + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This implements protection exception handling using POSIX signals. + * It is designed to run on any POSIX-compliant Unix. + * + * + * SOURCES + * + * .source.posix: POSIX specifications for signal.h and sigaction + * + * + * + * .source.man: sigaction(2): FreeBSD System Calls Manual. + * + * .source.merge: A blend from primarily the FreeBSD version (protfri3.c) + * and the OSF/1 (DIGITAL UNIX) version (proto1.c); influenced by other + * Unix versions. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) +#error "protsgix.c is specific to MPS_OS_FR or MPS_OS_LI" +#endif + +#include "prmcix.h" + +#include /* for errno */ +#include /* for many functions */ +#include /* for ucontext_t */ +#include /* for getpid */ + +SRCID(protsgix, "$Id$"); + + +/* The previously-installed signal action, as returned by */ +/* sigaction(3). See ProtSetup. */ + +static struct sigaction sigNext; + + +/* sigHandle -- protection signal handler + * + * This is the signal handler installed by ProtSetup to deal with + * protection faults. It is installed on the signal given by the + * PROT_SIGNAL macro (that is, SIGSEGV). It constructs a mutator + * context based on the signal context, and passes it to ArenaAccess, + * which attempts to handle the fault and remove its cause. If the + * fault is handled, then the handler returns and execution resumes. + * If it isn't handled, then sigHandle does its best to pass the + * signal on to the previously installed signal handler (sigNext); + * which it does by signalling itself using kill(2). + * + * .sigh.args: We set the SA_SIGINFO flag in the sa_flags field of the + * sigaction structure, and so the signal handler in the sa_sigaction + * field receives three arguments: signal number, pointer to signal + * info structure, pointer to signal context structure. + * + * .sigh.check: We check that info->si_code is SEGV_ACCERR (meaning + * "Invalid permissions for mapped object"). + * + * .sign.addr: If so, we assume info->si_addr is the fault address. + * + * .sigh.mode: The fault type (read/write) does not appear to be + * available to the signal handler (see mail archive). + */ + +#define PROT_SIGNAL SIGSEGV + +static void sigHandle(int sig, siginfo_t *info, void *uap) /* .sigh.args */ +{ + ERRNO_SAVE { + int e; + /* sigset renamed to asigset due to clash with global on Darwin. */ + sigset_t asigset, oldset; + struct sigaction sa; + + AVER(sig == PROT_SIGNAL); + + if (info->si_code == SEGV_ACCERR) { /* .sigh.check */ + AccessSet mode; + Addr base; + MutatorContextStruct context; + + MutatorContextInitFault(&context, info, (ucontext_t *)uap); + + mode = AccessREAD | AccessWRITE; /* .sigh.mode */ + + /* We assume that the access is for one word at the address. */ + base = (Addr)info->si_addr; /* .sigh.addr */ + + /* Offer each protection structure the opportunity to handle the */ + /* exception. If it succeeds, then allow the mutator to continue. */ + if (ArenaAccess(base, mode, &context)) + goto done; + } + + /* The exception was not handled by any known protection structure, */ + /* so throw it to the previously installed handler. That handler won't */ + /* get an accurate context (the MPS would fail if it were the second in */ + /* line) but it's the best we can do. */ + + e = sigaction(PROT_SIGNAL, &sigNext, &sa); + AVER(e == 0); + e = sigemptyset(&asigset); + AVER(e == 0); + e = sigaddset(&asigset, PROT_SIGNAL); + AVER(e == 0); + e = sigprocmask(SIG_UNBLOCK, &asigset, &oldset); + AVER(e == 0); + e = kill(getpid(), PROT_SIGNAL); + AVER(e == 0); + e = sigprocmask(SIG_SETMASK, &oldset, NULL); + AVER(e == 0); + e = sigaction(PROT_SIGNAL, &sa, NULL); + AVER(e == 0); + + done: + ; + } ERRNO_RESTORE; +} + + +/* ProtSetup -- global protection setup + * + * Under Unix, the global setup involves installing a signal handler + * on PROT_SIGNAL to catch and handle page faults (see sigHandle). + * The previous handler is recorded so that it can be reached from + * sigHandle if it fails to handle the fault. + * + * NOTE: There are problems with this approach: + * 1. we can't honor the sa_flags for the previous handler, + * 2. what if this thread is suspended just after calling signal(3)? + * The sigNext variable will never be initialized! + */ + +void ProtSetup(void) +{ + struct sigaction sa; + int result; + + sa.sa_sigaction = sigHandle; + result = sigemptyset(&sa.sa_mask); + AVER(result == 0); + sa.sa_flags = SA_SIGINFO | SA_RESTART; + + result = sigaction(PROT_SIGNAL, &sa, &sigNext); + AVER(result == 0); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protw3.c b/mps/code/protw3.c new file mode 100644 index 00000000000..6c57731ae45 --- /dev/null +++ b/mps/code/protw3.c @@ -0,0 +1,176 @@ +/* protw3.c: PROTECTION FOR WIN32 + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "prmcw3.h" + +#if !defined(MPS_OS_W3) +#error "protw3.c is specific to MPS_OS_W3" +#endif + +#include "mpswin.h" +#include "vm.h" /* PageSize */ + +SRCID(protw3, "$Id$"); + + +void ProtSet(Addr base, Addr limit, AccessSet mode) +{ + DWORD newProtect; + DWORD oldProtect; + + AVER(base < limit); + AVER(base != 0); + AVERT(AccessSet, mode); + + newProtect = PAGE_EXECUTE_READWRITE; + if((mode & AccessWRITE) != 0) + newProtect = PAGE_EXECUTE_READ; + if((mode & AccessREAD) != 0) + newProtect = PAGE_NOACCESS; + + if(VirtualProtect((LPVOID)base, (SIZE_T)AddrOffset(base, limit), + newProtect, &oldProtect) == 0) + NOTREACHED; +} + + +LONG WINAPI ProtSEHfilter(LPEXCEPTION_POINTERS info) +{ + LPEXCEPTION_RECORD er; + ULONG_PTR address; + AccessSet mode; + Addr base, limit; + LONG action; + DWORD lastError; + MutatorContextStruct context; + + er = info->ExceptionRecord; + + if(er->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) + return EXCEPTION_CONTINUE_SEARCH; + + /* This is the first point where we call a Windows API function that + * might change the last error. There are also no early returns from + * this point onwards. + */ + lastError = GetLastError(); + + MutatorContextInitFault(&context, info); + + /* assert that the exception is continuable */ + /* Note that Microsoft say that this field should be 0 or */ + /* EXCEPTION_NONCONTINUABLE, but this is not true */ + AVER((er->ExceptionFlags & EXCEPTION_NONCONTINUABLE) == 0); + + /* er->ExceptionRecord is pointer to next exception in chain */ + /* er->ExceptionAddress is where exception occurred */ + + AVER(er->NumberParameters >= 2); + + switch (er->ExceptionInformation[0]) { + case 0: /* read */ + case 8: /* execute */ + mode = AccessREAD; + break; + case 1: /* write */ + /* Pages cannot be made write-only, so an attempt to write must + also cause a read-access if necessary */ + mode = AccessREAD | AccessWRITE; + break; + default: + /* */ + NOTREACHED; + mode = AccessREAD | AccessWRITE; + break; + } + + address = er->ExceptionInformation[1]; + + base = (Addr)address; + limit = AddrAdd(address, sizeof(Addr)); + + if(base < limit) { + if(ArenaAccess(base, mode, &context)) + action = EXCEPTION_CONTINUE_EXECUTION; + else + action = EXCEPTION_CONTINUE_SEARCH; + } else { + /* Access on last sizeof(Addr) (ie 4 on this platform) bytes */ + /* in memory. We assume we can't get this page anyway */ + /* so it can't be our fault. */ + action = EXCEPTION_CONTINUE_SEARCH; + } + + /* Restore the last error value before returning. */ + SetLastError(lastError); + + return action; +} + + +/* ProtSetup -- set up the protection system */ + +void ProtSetup(void) +{ + void *handler; + /* See "AddVectoredExceptionHandler function (Windows)" + */ + /* ProtSetup is called only once per process, not once per arena, so + this exception handler is only installed once. */ + handler = AddVectoredExceptionHandler(1uL, ProtSEHfilter); + AVER(handler != NULL); +} + + +/* ProtGranularity -- return the granularity of protection */ + +Size ProtGranularity(void) +{ + /* Individual pages can be protected. */ + return PageSize(); +} + + +/* ProtSync -- synchronize protection settings with hardware + * + * This does nothing under Win32. See protan.c. + */ + +void ProtSync(Arena arena) +{ + UNUSED(arena); + NOOP; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protxc.c b/mps/code/protxc.c new file mode 100644 index 00000000000..172908b5cd6 --- /dev/null +++ b/mps/code/protxc.c @@ -0,0 +1,442 @@ +/* protxc.c: PROTECTION EXCEPTION HANDLER (macOS) + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * This is the protection exception handling code for macOS using the + * Mach interface (not pthreads). + * + * In Mach, a thread that hits protected memory is suspended, and a message + * is sent to a separate handler thread. + * + * The handler thread can fix things up and continue the suspended thread by + * sending back a "success" reply. It can forward the message to another + * handler of the same kind, or it can forward the message to another handler + * at the next level out (the levels are thread, task, host) by sending a + * "fail" reply. + * + * In macOS, pthreads are implemented by Mach threads. (The implementation is + * part of the XNU source code at opensource.apple.com. [copy to import?]) So + * we can use some pthread interfaces (pthread_create, pthread_once) for + * convenience in setting up threads. + * + * This module sets up an exception handling thread for the EXC_BAD_ACCESS + * exceptions that will be caused by the MPS shield (read/write barriers). + * That thread calls the MPS to resolve the condition and allow the mutator + * thread to progress. + * + * + * REFERENCES + * + * [Fuller_2013] "Mach Exception Handlers"; Landon Fuller; + * . + * + * [XNU] "xnu-2050.22.13" source code; + * Apple Computer; + * . + * + * [Mach_man] Mach man pages within XNU; + * Apple Computer; + * . + * + * [Libc] "Libc-825.26" source code; + * Apple Computer; + * . + * + * + * TRANSGRESSIONS + * + * .trans.stdlib: It's OK to use the C library from here because we know + * we're on macOS and not freestanding. In particular, we use memcpy. + * + * .trans.must: Various OS calls are asserted to succeed, since there isn't + * really a dynamic reason they should fail, so it must be a static error. + * In a RASH build, they don't stop the program, just in case it's able to + * limp along. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_XC) +#error "protxc.c is specific to MPS_OS_XC" +#endif + +#include "prmcxc.h" +#include "protxc.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* see .trans.stdlib */ +#include /* see .trans.stdlib */ + +SRCID(protxc, "$Id$"); + + +/* Exception request message structure + * + * The following declaration is extracted by running the Mach Interface + * Generator like this: + * + * mig /usr/include/mach/mach_exc.defs + * + * then copying it from the resulting mach_exc.h file. This gets the + * structure with 64-bit code fields, corresponding to the exception + * behaviour EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES. Only the + * 32-bit structures are available in /usr/include/mach. Note that these + * 32- and 64-bit message structures are independent of the architecture + * word width, so we choose to use 64-bit on both I3 and I6. + */ + +#ifdef __MigPackStructs +#pragma pack(4) +#endif + typedef struct { + mach_msg_header_t Head; + /* start of the kernel processed data */ + mach_msg_body_t msgh_body; + mach_msg_port_descriptor_t thread; + mach_msg_port_descriptor_t task; + /* end of the kernel processed data */ + NDR_record_t NDR; + exception_type_t exception; + mach_msg_type_number_t codeCnt; + int64_t code[2]; + int flavor; + mach_msg_type_number_t old_stateCnt; + natural_t old_state[224]; + } __Request__mach_exception_raise_state_identity_t; +#ifdef __MigPackStructs +#pragma pack() +#endif + + +/* Local short names, for convenience. */ +typedef __Request__mach_exception_raise_state_identity_t protRequestStruct; +typedef __Reply__exception_raise_state_identity_t protReplyStruct; + + +/* protExcPort -- exception message receiving Mach port + * + * This will be the port that will receive messages for our exception + * handler, initialized by protSetupInner. + */ + +static mach_port_name_t protExcPort = MACH_PORT_NULL; + + +/* protBuildReply -- build a reply message based on a request. */ + +static void protBuildReply(protReplyStruct *reply, + protRequestStruct *request, + kern_return_t ret_code) +{ + mach_msg_size_t state_size; + reply->Head.msgh_bits = + MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request->Head.msgh_bits), 0); + reply->Head.msgh_remote_port = request->Head.msgh_remote_port; + reply->Head.msgh_local_port = MACH_PORT_NULL; + reply->Head.msgh_reserved = 0; + reply->Head.msgh_id = request->Head.msgh_id + 100; + reply->NDR = request->NDR; + reply->RetCode = ret_code; + reply->flavor = request->flavor; + reply->new_stateCnt = request->old_stateCnt; + state_size = reply->new_stateCnt * sizeof(natural_t); + AVER(sizeof(reply->new_state) >= state_size); + memcpy(reply->new_state, request->old_state, state_size); + /* If you use sizeof(reply) for reply->Head.msgh_size then the state + gets ignored. */ + reply->Head.msgh_size = offsetof(protReplyStruct, new_state) + state_size; +} + + +/* protMustSend -- send a Mach message without fail, probably */ + +static void protMustSend(mach_msg_header_t *head) +{ + kern_return_t kr; + kr = mach_msg(head, + MACH_SEND_MSG, + head->msgh_size, + /* recv_size */ 0, + MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + AVER(kr == KERN_SUCCESS); + if (kr != KERN_SUCCESS) + mach_error("ERROR: MPS mach_msg send", kr); /* .trans.must */ +} + + +/* protCatchOne -- catch one EXC_BAD_ACCESS exception message. + * + * macOS provides a function exc_server (in + * /usr/lib/system/libsystem_kernel.dylib) that's documented in the XNU + * sources + * and generated by the Mach Interface Generator (mig). It unpacks + * an exception message structure and calls one of several handler functions. + * We can't use it because: + * + * 1. It's hard-wired to call certain functions by name. The MPS can't + * steal those names in case the client program is using them too. + * + * 2. It fails anyway in Xcode's default "Release" build with hidden + * symbols, because it uses dlsym to find those handler functions, and + * dlsym can't find them. + * + * So instead this function duplicates the work of exc_server, and is shorter + * because it's specialised for protection exceptions of a single behaviour + * and flavour. It is also more flexible and can dispatch to any function + * we want. The downside is that it depends on various unpublished stuff + * like the code numbers for certain messages. + */ + +static void protCatchOne(void) +{ + protRequestStruct request; + mach_msg_return_t mr; + protReplyStruct reply; + + AVER(MACH_PORT_VALID(protExcPort)); + mr = mach_msg(&request.Head, + /* option */ MACH_RCV_MSG, + /* send_size */ 0, + /* receive_limit */ sizeof(request), + /* receive_name */ protExcPort, + /* timeout */ MACH_MSG_TIMEOUT_NONE, + /* notify */ MACH_PORT_NULL); + AVER(mr == MACH_MSG_SUCCESS); + if (mr != MACH_MSG_SUCCESS) + mach_error("ERROR: MPS mach_msg recv\n", mr); /* .trans.must */ + + /* 2407 is the id for the 64-bit exception requests we asked for in + ProtThreadRegister, with state and identity + information, determined by experimentation and confirmed by + running mig on /usr/include/mach/mach_exc.defs */ + AVER(request.Head.msgh_id == 2407); + AVER(request.Head.msgh_local_port == protExcPort); + AVER(request.task.name == mach_task_self()); + AVER(request.exception == EXC_BAD_ACCESS); + AVER(request.codeCnt == 2); + AVER(request.old_stateCnt == THREAD_STATE_COUNT); + AVER(request.flavor == THREAD_STATE_FLAVOR); + + /* TODO: This could dispatch to separate worker threads, in order to + spread scanning work across several cores once the MPS can be + re-entered. */ + + if (request.code[0] == KERN_PROTECTION_FAILURE) { + MutatorContextStruct context; + + /* The cast via Word suppresses "cast to pointer from integer of + different size" warnings in GCC, for the XCI3GC build. */ + MutatorContextInitFault(&context, (Addr)(Word)request.code[1], + (void *)request.old_state); + + if (ArenaAccess(context.address, + AccessREAD | AccessWRITE, + &context)) { + /* Send a reply that will cause the thread to continue. + Note that ArenaAccess may have updated request.old_state + via context.thread_state, and that will get copied to the + reply and affect the state the thread resumes in. */ + protBuildReply(&reply, &request, KERN_SUCCESS); + protMustSend(&reply.Head); + return; + } + } + + /* We didn't handle the exception -- it wasn't one of ours. */ + + /* .assume.only-port: We assume that there was no previously installed + exception port. This is checked in ProtThreadRegister, and we don't + check it again here to avoid the extra system call. If there + were, we must arrange to forward the exception message to the + previous port. This module used to do that because it installed a + task-wide exception handler, but the code is pretty hairy and not + necessary as long as the MPS is registering threads individually. + If we ever need to reinstate that code, look at + https://info.ravenbrook.com/project/mps/prototype/2013-06-24/machtest */ + + protBuildReply(&reply, &request, KERN_FAILURE); + protMustSend(&reply.Head); +} + + +/* protCatchThread -- exception handler thread loop. + * + * Note that this thread does *not* have a thread-specific exception handler + * installed. This means that an exception-causing bug in the exception + * handler won't cause a deadlock. + */ + +ATTRIBUTE_NORETURN +static void *protCatchThread(void *p) +{ + UNUSED(p); + for (;;) + protCatchOne(); +} + + +/* ProtThreadRegister -- register a thread for protection exception handling */ + +void ProtThreadRegister(void) +{ + kern_return_t kr; + mach_msg_type_number_t old_exception_count = 1; + exception_mask_t old_exception_masks; + exception_behavior_t behavior; + mach_port_t old_exception_ports; + exception_behavior_t old_behaviors; + thread_state_flavor_t old_flavors; + mach_port_t self; + + self = mach_thread_self(); + AVER(MACH_PORT_VALID(self)); + + /* Ask to receive EXC_BAD_ACCESS exceptions on our port, complete + with thread state and identity information in the message. + The MACH_EXCEPTION_CODES flag causes the code fields to be + passed 64-bits wide, matching protRequestStruct [Fuller_2013]. */ + behavior = (exception_behavior_t)(EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES); + AVER(MACH_PORT_VALID(protExcPort)); + kr = thread_swap_exception_ports(self, + EXC_MASK_BAD_ACCESS, + protExcPort, + behavior, + THREAD_STATE_FLAVOR, + &old_exception_masks, + &old_exception_count, + &old_exception_ports, + &old_behaviors, + &old_flavors); + AVER(kr == KERN_SUCCESS); + if (kr != KERN_SUCCESS) + mach_error("ERROR: MPS thread_swap_exception_ports", kr); /* .trans.must */ + AVER(old_exception_masks == EXC_MASK_BAD_ACCESS); + AVER(old_exception_count == 1); + AVER(old_exception_ports == MACH_PORT_NULL + || old_exception_ports == protExcPort); /* .assume.only-port */ +} + + +/* protExcThreadStart -- create exception port, register the current + * thread with that port, and create a thread to handle exception + * messages. + */ + +static void protExcThreadStart(void) +{ + kern_return_t kr; + mach_port_t self; + pthread_t excThread; + int pr; + + /* Create a port to send and receive exceptions. */ + self = mach_task_self(); + AVER(MACH_PORT_VALID(self)); + kr = mach_port_allocate(self, + MACH_PORT_RIGHT_RECEIVE, + &protExcPort); + AVER(kr == KERN_SUCCESS); + if (kr != KERN_SUCCESS) + mach_error("ERROR: MPS mach_port_allocate", kr); /* .trans.must */ + AVER(MACH_PORT_VALID(protExcPort)); + + /* Allow me to send exceptions on this port. */ + /* TODO: Find out why this is necessary. */ + self = mach_task_self(); + AVER(MACH_PORT_VALID(self)); + kr = mach_port_insert_right(self, + protExcPort, protExcPort, + MACH_MSG_TYPE_MAKE_SEND); + AVER(kr == KERN_SUCCESS); + if (kr != KERN_SUCCESS) + mach_error("ERROR: MPS mach_port_insert_right", kr); /* .trans.must */ + + /* We don't require the mutator to register the sole thread in a + * single-threaded program, so register it automatically now. */ + ProtThreadRegister(); + + /* Launch the exception handling thread. We use pthread_create + * because it's much simpler than setting up a thread from scratch + * using Mach, and that's basically what it does. See [Libc] + * */ + pr = pthread_create(&excThread, NULL, protCatchThread, NULL); + AVER(pr == 0); + if (pr != 0) + fprintf(stderr, "ERROR: MPS pthread_create: %d\n", pr); /* .trans.must */ +} + + +/* protAtForkChild -- support for fork() + * + */ + +static void protAtForkChild(void) +{ + /* Restart the exception handling thread + . */ + protExcThreadStart(); +} + + +/* ProtSetup -- set up protection exception handling */ + +static void protSetupInner(void) +{ + protExcThreadStart(); + + /* Install fork handlers . */ + pthread_atfork(NULL, NULL, protAtForkChild); +} + +void ProtSetup(void) +{ + int pr; + static pthread_once_t prot_setup_once = PTHREAD_ONCE_INIT; + + /* ProtSetup may be called several times if the client creates more than + one arena, but we still only want one exception handling thread. */ + pr = pthread_once(&prot_setup_once, protSetupInner); + AVER(pr == 0); + if (pr != 0) + fprintf(stderr, "ERROR: MPS pthread_once: %d\n", pr); /* .trans.must */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/protxc.h b/mps/code/protxc.h new file mode 100644 index 00000000000..55b74980327 --- /dev/null +++ b/mps/code/protxc.h @@ -0,0 +1,41 @@ +/* protxc.h: PROTECTION EXCEPTION HANDLER (macOS) + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef protxc_h +#define protxc_h + +extern void ProtThreadRegister(void); + +#endif /* protxc_h */ + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/pthrdext.c b/mps/code/pthrdext.c new file mode 100644 index 00000000000..7619468a187 --- /dev/null +++ b/mps/code/pthrdext.c @@ -0,0 +1,395 @@ +/* pthrdext.c: POSIX THREAD EXTENSIONS + * + * $Id$ + * Copyright (c) 2001-2023 Ravenbrook Limited. See end of file for license. + * + * .purpose: Provides extension to Pthreads to permit thread suspend + * and resume, so that the MPS can gain exclusive access to memory via + * the Shield (design.mps.shield). + * + * .design: see + * + * .acknowledgements: This was derived from code posted to + * comp.programming.threads by Dave Butenhof and Raymond Lau + * (, ). + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) +#error "pthrdext.c is specific to MPS_OS_FR or MPS_OS_LI" +#endif + +#include "pthrdext.h" + +#include +#include +#include +#include +#include /* see .feature.li in config.h */ +#include +#include + +SRCID(pthreadext, "$Id$"); + + +/* Static data initialized on first use of the module + * + */ + +/* mutex */ +static pthread_mutex_t pthreadextMut = PTHREAD_MUTEX_INITIALIZER; + +/* semaphore */ +static sem_t pthreadextSem; + +/* initialization support */ +static pthread_once_t pthreadextOnce = PTHREAD_ONCE_INIT; +static Bool pthreadextModuleInitialized = FALSE; + + +/* Global variables protected by the mutex + * + */ + +static PThreadext suspendingVictim = NULL; /* current victim */ +static RingStruct suspendedRing; /* PThreadext suspend ring */ + + +/* suspendSignalHandler -- signal handler called when suspending a thread + * + * + * + * Handle PTHREADEXT_SIGSUSPEND in the target thread, to suspend it until + * receiving PTHREADEXT_SIGRESUME (resume). Note that this is run with both + * PTHREADEXT_SIGSUSPEND and PTHREADEXT_SIGRESUME blocked. Having + * PTHREADEXT_SIGRESUME blocked prevents a resume before we can finish the + * suspend protocol. + */ + +#include "prmcix.h" + +static void suspendSignalHandler(int sig, + siginfo_t *info, + void *uap) +{ + ERRNO_SAVE { + sigset_t signal_set; + ucontext_t ucontext; + MutatorContextStruct context; + int status; + + AVER(sig == PTHREADEXT_SIGSUSPEND); + UNUSED(sig); + UNUSED(info); + + AVER(suspendingVictim != NULL); + /* copy the ucontext structure so we definitely have it on our stack, + * not (e.g.) shared with other threads. */ + ucontext = *(ucontext_t *)uap; + MutatorContextInitThread(&context, &ucontext); + suspendingVictim->context = &context; + /* Block all signals except PTHREADEXT_SIGRESUME while suspended. */ + status = sigfillset(&signal_set); + AVER(status == 0); + status = sigdelset(&signal_set, PTHREADEXT_SIGRESUME); + AVER(status == 0); + status = sem_post(&pthreadextSem); + AVER(status == 0); + status = sigsuspend(&signal_set); + AVER(status == -1); + + /* Once here, the resume signal handler has run to completion. */ + } ERRNO_RESTORE; +} + + +/* resumeSignalHandler -- signal handler called when resuming a thread + * + * + */ + +static void resumeSignalHandler(int sig) +{ + ERRNO_SAVE { + AVER(sig == PTHREADEXT_SIGRESUME); + UNUSED(sig); + } ERRNO_RESTORE; +} + +/* PThreadextModuleInit -- Initialize the PThreadext module + * + * + * + * Dynamically initialize all state when first used + * (called by pthread_once). + */ + +static void PThreadextModuleInit(void) +{ + int status; + struct sigaction pthreadext_sigsuspend, pthreadext_sigresume; + + AVER(pthreadextModuleInitialized == FALSE); + + /* Initialize the ring of suspended threads */ + RingInit(&suspendedRing); + + /* Initialize the semaphore */ + status = sem_init(&pthreadextSem, 0, 0); + AVER(status != -1); + + /* Install the signal handlers for suspend/resume. */ + /* We add PTHREADEXT_SIGRESUME to the sa_mask field for the */ + /* PTHREADEXT_SIGSUSPEND handler. That avoids a race if one thread */ + /* suspends the target while another resumes that same target. (The */ + /* PTHREADEXT_SIGRESUME signal cannot be delivered before the */ + /* target thread calls sigsuspend.) */ + + status = sigemptyset(&pthreadext_sigsuspend.sa_mask); + AVER(status == 0); + status = sigaddset(&pthreadext_sigsuspend.sa_mask, PTHREADEXT_SIGRESUME); + AVER(status == 0); + + pthreadext_sigsuspend.sa_flags = SA_SIGINFO | SA_RESTART; + pthreadext_sigsuspend.sa_sigaction = suspendSignalHandler; + pthreadext_sigresume.sa_flags = SA_RESTART; + pthreadext_sigresume.sa_handler = resumeSignalHandler; + status = sigemptyset(&pthreadext_sigresume.sa_mask); + AVER(status == 0); + + status = sigaction(PTHREADEXT_SIGSUSPEND, &pthreadext_sigsuspend, NULL); + AVER(status == 0); + + status = sigaction(PTHREADEXT_SIGRESUME, &pthreadext_sigresume, NULL); + AVER(status == 0); + + pthreadextModuleInitialized = TRUE; +} + + +/* PThreadextCheck -- check the consistency of a PThreadext structure */ + +Bool PThreadextCheck(PThreadext pthreadext) +{ + int status; + + status = pthread_mutex_lock(&pthreadextMut); + AVER(status == 0); + + CHECKS(PThreadext, pthreadext); + /* can't check ID */ + CHECKD_NOSIG(Ring, &pthreadext->threadRing); + CHECKD_NOSIG(Ring, &pthreadext->idRing); + if (pthreadext->context == NULL) { + /* not suspended */ + CHECKL(RingIsSingle(&pthreadext->threadRing)); + CHECKL(RingIsSingle(&pthreadext->idRing)); + } else { + /* suspended */ + Ring node, next; + CHECKL(!RingIsSingle(&pthreadext->threadRing)); + RING_FOR(node, &pthreadext->idRing, next) { + PThreadext pt = RING_ELT(PThreadext, idRing, node); + CHECKL(pt->id == pthreadext->id); + CHECKL(pt->context == pthreadext->context); + } + } + status = pthread_mutex_unlock(&pthreadextMut); + AVER(status == 0); + + return TRUE; +} + + +/* PThreadextInit -- Initialize a pthreadext */ + +void PThreadextInit(PThreadext pthreadext, pthread_t id) +{ + int status; + + /* The first call to init will initialize the package. */ + status = pthread_once(&pthreadextOnce, PThreadextModuleInit); + AVER(status == 0); + + pthreadext->id = id; + pthreadext->context = NULL; + RingInit(&pthreadext->threadRing); + RingInit(&pthreadext->idRing); + pthreadext->sig = PThreadextSig; + AVERT(PThreadext, pthreadext); +} + + +/* PThreadextFinish -- Finish a pthreadext + * + * + */ + +void PThreadextFinish(PThreadext pthreadext) +{ + int status; + + AVERT(PThreadext, pthreadext); + + status = pthread_mutex_lock(&pthreadextMut); + AVER(status == 0); + + if(pthreadext->context == NULL) { + AVER(RingIsSingle(&pthreadext->threadRing)); + AVER(RingIsSingle(&pthreadext->idRing)); + } else { + /* In suspended state: remove from rings. */ + AVER(!RingIsSingle(&pthreadext->threadRing)); + RingRemove(&pthreadext->threadRing); + if(!RingIsSingle(&pthreadext->idRing)) + RingRemove(&pthreadext->idRing); + } + + status = pthread_mutex_unlock(&pthreadextMut); + AVER(status == 0); + + RingFinish(&pthreadext->threadRing); + RingFinish(&pthreadext->idRing); + pthreadext->sig = SigInvalid; +} + + +/* PThreadextSuspend -- suspend a thread + * + * + */ + +Res PThreadextSuspend(PThreadext target, MutatorContext *contextReturn) +{ + Ring node, next; + Res res; + int status; + + AVERT(PThreadext, target); + AVER(contextReturn != NULL); + AVER(target->context == NULL); /* multiple suspends illegal */ + + /* Serialize access to suspend, makes life easier */ + status = pthread_mutex_lock(&pthreadextMut); + AVER(status == 0); + AVER(suspendingVictim == NULL); + + /* Threads are added to the suspended ring on suspension */ + /* If the same thread Id has already been suspended, then */ + /* don't signal the thread, just add the target onto the id ring */ + RING_FOR(node, &suspendedRing, next) { + PThreadext alreadySusp = RING_ELT(PThreadext, threadRing, node); + if (alreadySusp->id == target->id) { + RingAppend(&alreadySusp->idRing, &target->idRing); + target->context = alreadySusp->context; + goto noteSuspended; + } + } + + /* Ok, we really need to suspend this thread. */ + suspendingVictim = target; + status = pthread_kill(target->id, PTHREADEXT_SIGSUSPEND); + if (status != 0) { + res = ResFAIL; + goto unlock; + } + + /* Wait for the victim to acknowledge suspension. */ + while (sem_wait(&pthreadextSem) != 0) { + if (errno != EINTR) { + res = ResFAIL; + goto unlock; + } + } + +noteSuspended: + AVER(target->context != NULL); + RingAppend(&suspendedRing, &target->threadRing); + *contextReturn = target->context; + res = ResOK; + +unlock: + suspendingVictim = NULL; + status = pthread_mutex_unlock(&pthreadextMut); + AVER(status == 0); + return res; +} + + +/* PThreadextResume -- resume a suspended thread + * + * + */ + +Res PThreadextResume(PThreadext target) +{ + Res res; + int status; + + AVERT(PThreadext, target); + AVER(pthreadextModuleInitialized); /* must have been a prior suspend */ + AVER(target->context != NULL); + + /* Serialize access to suspend, makes life easier. */ + status = pthread_mutex_lock(&pthreadextMut); + AVER(status == 0); + + if (RingIsSingle(&target->idRing)) { + /* Really want to resume the thread. Signal it to continue. */ + status = pthread_kill(target->id, PTHREADEXT_SIGRESUME); + if (status == 0) { + goto noteResumed; + } else { + res = ResFAIL; + goto unlock; + } + + } else { + /* Leave thread suspended on behalf of another PThreadext. */ + /* Remove it from the id ring */ + RingRemove(&target->idRing); + goto noteResumed; + } + +noteResumed: + /* Remove the thread from the suspended ring */ + RingRemove(&target->threadRing); + target->context = NULL; + res = ResOK; + +unlock: + status = pthread_mutex_unlock(&pthreadextMut); + AVER(status == 0); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/pthrdext.h b/mps/code/pthrdext.h new file mode 100644 index 00000000000..13beb6d1698 --- /dev/null +++ b/mps/code/pthrdext.h @@ -0,0 +1,98 @@ +/* pthreadext.h: POSIX THREAD EXTENSIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .readership: MM developers. + * + * .purpose: Provides extension to Pthreads. + */ + +#ifndef pthreadext_h +#define pthreadext_h + +#include + +#include "mpm.h" + + +#define PThreadextSig ((Sig)0x519B286E) /* SIGnature PTHReadExt */ + + +/* PThreadext -- extension datatype */ + +typedef struct PThreadextStruct *PThreadext; + + +/* PThreadextStruct -- structure definition + * + * Should be embedded in a client structure + */ + +typedef struct PThreadextStruct { + Sig sig; /* design.mps.sig.field */ + pthread_t id; /* Thread ID */ + MutatorContext context; /* context if suspended */ + RingStruct threadRing; /* ring of suspended threads */ + RingStruct idRing; /* duplicate suspensions for id */ +} PThreadextStruct; + + + +/* PThreadextCheck -- Check a pthreadext */ + +extern Bool PThreadextCheck(PThreadext pthreadext); + + +/* PThreadextInit -- Initialize a pthreadext */ + +extern void PThreadextInit(PThreadext pthreadext, pthread_t id); + + +/* PThreadextFinish -- Finish a pthreadext */ + +extern void PThreadextFinish(PThreadext pthreadext); + + +/* PThreadextSuspend -- Suspend a pthreadext and return its context. */ + +extern Res PThreadextSuspend(PThreadext pthreadext, + MutatorContext *contextReturn); + + +/* PThreadextResume -- Resume a suspended pthreadext */ + +extern Res PThreadextResume(PThreadext pthreadext); + + +#endif /* pthreadext_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/qs.c b/mps/code/qs.c new file mode 100644 index 00000000000..aebe9a2c824 --- /dev/null +++ b/mps/code/qs.c @@ -0,0 +1,557 @@ +/* qs.c: QUICKSORT + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * The purpose of this program is to act as a "real" client of the MM. + * It is a test, but (hopefully) less contrived than some of the other + * tests. + * + * C stack will contain the continuations (list of PCs). The + * activation stack will parallel the C stack and contain the program's + * variables. This is all slightly bizarre. + * And qs cheats a tiny bit by using the C stack to save leaf objects + * (integers). + * + * nil, the end of list, is represented by a NULL pointer. + * + * list length 1000 makes 40404 conses (by experiment). + * + * Some registers are not nulled out when they could be. + * + * TODO: There should be fewer casts and more unions. + */ + +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpscmvff.h" +#include "mpstd.h" + +#include /* printf */ +#include /* qsort */ + + +#define testArenaSIZE ((size_t)1000*1024) +#define genCOUNT 2 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + { 150, 0.85 }, { 170, 0.45 } }; + + +static mps_res_t scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit); +static mps_addr_t skip(mps_addr_t object); +static void move(mps_addr_t object, mps_addr_t to); +static mps_addr_t isMoved(mps_addr_t object); +static void copy(mps_addr_t object, mps_addr_t to); +static void pad(mps_addr_t base, size_t size); + +static struct mps_fmt_A_s fmt_A_s = + { + (mps_align_t)4, + scan, skip, copy, + move, isMoved, + pad + }; + + + +/* Tags used by object format */ +enum {QSInt, QSRef, QSEvac, QSPadOne, QSPadMany}; + +typedef struct QSCellStruct *QSCell; +typedef struct QSCellStruct { + mps_word_t tag; + mps_addr_t value; + QSCell tail; +} QSCellStruct; + + +static mps_arena_t arena; +static mps_pool_t pool; /* automatic pool */ +static mps_ap_t ap; /* AP for above */ +static mps_pool_t mpool; /* manual pool */ +static mps_root_t regroot; +static mps_root_t actroot; + + +/* list holds an array that we qsort(), listl is its length */ +static mps_word_t *list; +static mps_word_t listl; + + +/* Machine State + * + * The machine consists of a stack and 3 registers. + */ + +static QSCell activationStack; +#define NREGS 3 +static mps_addr_t reg[NREGS]; +static mps_word_t regtag[NREGS]; + + +/* Machine Instructions + * + * The machine can perform the following operations: + * cons + * append + * swap + */ + +/* should cons return in reg[0] or should it return via C? */ +static void cons(mps_word_t tag0, mps_addr_t value0, QSCell tail) +{ + mps_addr_t p; + QSCell new; + + do { + die(mps_reserve(&p, ap, sizeof(QSCellStruct)), + "cons"); + new = (QSCell)p; + new->tag = tag0; + new->value = value0; + new->tail = tail; + } while(!mps_commit(ap, p, sizeof(QSCellStruct))); + + reg[0] = (mps_addr_t)new; + regtag[0] = QSRef; +} + + +/* Appends reg[1] to reg[0] */ +/* append nil, y = y + * append x::xs, y = x::append xs, y + * append x,y = (if (null x) y (cons (car x) (append (cdr x) y))) + */ +static void append(void) +{ + cdie(regtag[0] == QSRef, "append 0"); + cdie(regtag[1] == QSRef, "append 1"); + + if(reg[0] == (mps_word_t)0) { + reg[0] = reg[1]; + regtag[0] = regtag[1]; + goto ret; + } + + cons(regtag[0], reg[0], activationStack); + activationStack = (QSCell)reg[0]; + cons(regtag[1], reg[1], activationStack); + activationStack = (QSCell)reg[0]; + + reg[0] = activationStack->tail->value; + regtag[0] = activationStack->tail->tag; + cdie(regtag[0] == QSRef, "append tail"); + reg[0] = (mps_addr_t)((QSCell)reg[0])->tail; /* (cdr x) */ + regtag[0] = QSRef; + append(); + reg[1] = reg[0]; + regtag[1] = regtag[0]; + reg[0] = activationStack->tail->value; + regtag[0] = activationStack->tail->tag; + cdie(regtag[0] == QSRef, "append sec"); + regtag[0] = ((QSCell)reg[0])->tag; + reg[0] = ((QSCell)reg[0])->value; /* (car x) */ + cons(regtag[0], reg[0], (QSCell)reg[1]); + activationStack = activationStack->tail->tail; + + ret: + /* null out reg[1] */ + regtag[1] = QSRef; + reg[1] = (mps_addr_t)0; +} + + +/* swaps reg[0] with reg[1], destroys reg[2] */ +static void swap(void) +{ + regtag[2]=regtag[0]; + reg[2]=reg[0]; + regtag[0]=regtag[1]; + reg[0]=reg[1]; + regtag[1]=regtag[2]; + reg[1]=reg[2]; + regtag[2]=QSRef; + reg[2]=(mps_addr_t)0; +} + + +static void makerndlist(unsigned l) +{ + size_t i; + mps_word_t r; + mps_addr_t addr; + + cdie(l > 0, "list len"); + if(list != NULL) { + mps_free(mpool, (mps_addr_t)list, (listl * sizeof(mps_word_t))); + list = NULL; + } + listl = l; + addr = list; + die(mps_alloc(&addr, mpool, (l * sizeof(mps_word_t))), + "Alloc List"); + list = addr; + reg[0] = (mps_addr_t)0; + regtag[0] = QSRef; + for(i = 0; i < l; ++i) { + r = rnd(); + cons(QSInt, + (mps_addr_t)r, /* TODO: dirty cast */ + (QSCell)reg[0]); + list[i] = r; + } +} + + +/* reg[0] is split into two lists: those elements less than p, and + * those elements >= p. The two lists are returned in reg[0] and reg[1] + */ +static void part(mps_word_t p) +{ + regtag[2]=regtag[0]; + reg[2]=reg[0]; + cdie(regtag[2] == QSRef, "part 0"); + regtag[0]=QSRef; + reg[0]=(mps_addr_t)0; + regtag[1]=QSRef; + reg[1]=(mps_addr_t)0; + + while(reg[2] != (mps_word_t)0) { + cdie(((QSCell)reg[2])->tag == QSInt, "part int"); + if((mps_word_t)((QSCell)reg[2])->value < p) { + /* cons onto reg[0] */ + cons(QSInt, ((QSCell)reg[2])->value, (QSCell)reg[0]); + } else { + /* cons onto reg[1] */ + cons(QSRef, reg[0], activationStack); /* save reg0 */ + activationStack = (QSCell)reg[0]; + cons(QSInt, ((QSCell)reg[2])->value, (QSCell)reg[1]); + reg[1]=reg[0]; + reg[0]=activationStack->value; + activationStack = activationStack->tail; + } + reg[2]=(mps_addr_t)((QSCell)reg[2])->tail; + } +} + + +/* applies the quicksort algorithm to sort reg[0] */ +static void qs(void) +{ + mps_word_t pivot; + + cdie(regtag[0] == QSRef, "qs 0"); + + /* base case */ + if(reg[0] == (mps_word_t)0) { + return; + } + + /* check that we have an int list */ + cdie(((QSCell)reg[0])->tag == QSInt, "qs int"); + + pivot = (mps_word_t)((QSCell)reg[0])->value; + reg[0] = (mps_addr_t)((QSCell)reg[0])->tail; + part(pivot); + + cons(QSRef, reg[0], activationStack); + activationStack = (QSCell)reg[0]; + cons(QSRef, reg[1], activationStack); + activationStack = (QSCell)reg[0]; + + reg[0] = reg[1]; + regtag[0] = regtag[1]; + cdie(regtag[0] == QSRef, "qs 1"); + qs(); + cons(QSInt, (mps_addr_t)pivot, (QSCell)reg[0]); + activationStack = activationStack->tail; + cons(QSRef, reg[0], activationStack); + activationStack = (QSCell)reg[0]; + reg[0] = activationStack->tail->value; + regtag[0] = activationStack->tail->tag; + cdie(regtag[0] == QSRef, "qs tail"); + qs(); + reg[1] = activationStack->value; + regtag[1] = activationStack->tag; + activationStack = activationStack->tail->tail; + append(); +} + + +/* Compare + * + * Used as an argument to qsort() + */ +static int compare(const void *a, const void *b) +{ + mps_word_t aa, bb; + + aa = *(const mps_word_t *)a; + bb = *(const mps_word_t *)b; + if(aa < bb) { + return -1; + } else if(aa == bb) { + return 0; + } else { + return 1; + } +} + + +/* compares the qsort'ed list with our quicksorted list */ +static void validate(void) +{ + mps_word_t i; + + cdie(regtag[0] == QSRef, "validate 0"); + regtag[1] = regtag[0]; + reg[1] = reg[0]; + for(i = 0; i < listl; ++i) { + cdie(((QSCell)reg[1])->tag == QSInt, "validate int"); + if((mps_word_t)((QSCell)reg[1])->value != list[i]) { + printf("mps_res_t: Element %"PRIuLONGEST" of the " + "two lists do not match.\n", (ulongest_t)i); + return; + } + reg[1] = (mps_addr_t)((QSCell)reg[1])->tail; + } + cdie(reg[1] == (mps_word_t)0, "validate end"); + printf("Note: Lists compare equal.\n"); +} + + +static void test(void) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_addr_t base; + + die(mps_pool_create_k(&mpool, arena, mps_class_mvff(), mps_args_none), + "pool create"); + + die(mps_fmt_create_A(&format, arena, &fmt_A_s), "FormatCreate"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + die(mps_pool_create(&pool, arena, mps_class_amc(), format, chain), + "AMCCreate"); + die(mps_ap_create(&ap, pool, mps_rank_exact()), "APCreate"); + die(mps_root_create_table(®root, + arena, + mps_rank_ambig(), + 0, + reg, + NREGS), + "RootCreateTable"); + + base = &activationStack; + die(mps_root_create_table(&actroot, arena, mps_rank_ambig(), 0, base, 1), + "RootCreateTable"); + + /* makes a random list */ + makerndlist(1000); + + part(0); + swap(); + qs(); + qsort(list, listl, sizeof(mps_word_t), &compare); + validate(); + + mps_arena_park(arena); + mps_root_destroy(regroot); + mps_root_destroy(actroot); + mps_ap_destroy(ap); + mps_pool_destroy(pool); + mps_pool_destroy(mpool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_release(arena); +} + + +/* Machine Object Format */ + +static void pad(mps_addr_t base, size_t size) +{ + mps_word_t *object = base; + cdie(size >= sizeof(mps_word_t), "pad size"); + if(size == sizeof(mps_word_t)) { + object[0] = QSPadOne; + return; + } + cdie(size >= 2*sizeof(mps_word_t), "pad size 2"); + object[0] = QSPadMany; + object[1] = size; +} + + +static mps_res_t scan1(mps_ss_t ss, mps_addr_t *objectIO) +{ + QSCell cell; + mps_res_t res; + mps_addr_t addr; + + cdie(objectIO != NULL, "objectIO"); + + MPS_SCAN_BEGIN(ss) { + cell = (QSCell)*objectIO; + + switch(cell->tag) { + case QSRef: + addr = cell->value; + if(!MPS_FIX1(ss, addr)) + goto fixTail; + res = MPS_FIX2(ss, &addr); + if(res != MPS_RES_OK) + return res; + cell->value = addr; + /* fall through */ + + case QSInt: + fixTail: + addr = cell->tail; + if(!MPS_FIX1(ss, addr)) + break; + res = MPS_FIX2(ss, &addr); + if(res != MPS_RES_OK) + return res; + cell->tail = addr; + break; + + case QSEvac: + /* skip */ + break; + + case QSPadOne: + *objectIO = (mps_addr_t)((mps_word_t *)cell+1); + return MPS_RES_OK; + + case QSPadMany: + *objectIO = (mps_addr_t)((mps_word_t)cell+((mps_word_t *)cell)[1]); + return MPS_RES_OK; + + default: + cdie(0, "unknown tag"); + return MPS_RES_OK; + } + } MPS_SCAN_END(ss); + + *objectIO = (mps_addr_t)(cell+1); + + return MPS_RES_OK; +} + + +static mps_res_t scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) +{ + Insist(mps_arena_busy(arena)); + while(base < limit) { + mps_res_t res; + + res = scan1(ss, &base); + if(res != MPS_RES_OK) { + return res; + } + } + + cdie(base == limit, "scan limit"); + return MPS_RES_OK; +} + + +static mps_addr_t skip(mps_addr_t object) +{ + QSCell cell = (QSCell)object; + switch(cell->tag) + { + case QSPadOne: + return (mps_addr_t)((mps_word_t *)cell+1); + case QSPadMany: + return (mps_addr_t)((mps_word_t)cell+((mps_word_t *)cell)[1]); + default: + return (mps_addr_t)((QSCell)object + 1); + } +} + + +static void move(mps_addr_t object, mps_addr_t to) +{ + QSCell cell; + + cell = (QSCell)object; + + cell->tag = QSEvac; + cell->value = to; +} + + +static mps_addr_t isMoved(mps_addr_t object) +{ + QSCell cell; + + cell = (QSCell)object; + + if(cell->tag == QSEvac) { + return (mps_addr_t)cell->value; + } + return (mps_addr_t)0; +} + + +static void copy(mps_addr_t object, mps_addr_t to) +{ + QSCell cells, celld; + + cells = (QSCell)object; + celld = (QSCell)to; + + *celld = *cells; +} + + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + test(); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/range.c b/mps/code/range.c new file mode 100644 index 00000000000..4b5e47b43e8 --- /dev/null +++ b/mps/code/range.c @@ -0,0 +1,165 @@ +/* range.c: ADDRESS RANGE IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .design: + */ + +#include "check.h" +#include "mpm.h" +#include "range.h" + +SRCID(range, "$Id$"); + + +Bool RangeCheck(Range range) +{ + CHECKL(range->base <= range->limit); + + return TRUE; +} + +void RangeInit(Range range, Addr base, Addr limit) +{ + AVER(range != NULL); + AVER(base <= limit); + + range->base = base; + range->limit = limit; + + AVERT(Range, range); +} + +void RangeInitSize(Range range, Addr base, Size size) +{ + RangeInit(range, base, AddrAdd(base, size)); +} + +void RangeFinish(Range range) +{ + AVERT(Range, range); + /* Make range invalid and recognisably so, since Range doesn't have + a signature. */ + range->limit = (Addr)0; + range->base = (Addr)(Word)0xF191583D; /* FINISHED */ +} + +Res RangeDescribe(Range range, mps_lib_FILE *stream, Count depth) +{ + Res res; + + AVERT(Range, range); + AVER(stream != NULL); + + res = WriteF(stream, depth, + "Range $P {\n", (WriteFP)range, + " base: $P\n", (WriteFP)RangeBase(range), + " limit: $P\n", (WriteFP)RangeLimit(range), + " size: $U\n", (WriteFU)RangeSize(range), + "} Range $P\n", (WriteFP)range, NULL); + if (res != ResOK) { + return res; + } + + return ResOK; +} + +Bool RangesOverlap(Range range1, Range range2) +{ + AVERT(Range, range1); + AVERT(Range, range2); + return RangeBase(range1) < RangeLimit(range2) + && RangeBase(range2) < RangeLimit(range1); +} + +Bool RangesNest(Range outer, Range inner) +{ + AVERT(Range, outer); + AVERT(Range, inner); + return RangeBase(outer) <= RangeBase(inner) + && RangeLimit(inner) <= RangeLimit(outer); +} + +Bool RangesEqual(Range range1, Range range2) +{ + AVERT(Range, range1); + AVERT(Range, range2); + return RangeBase(range1) == RangeBase(range2) + && RangeLimit(range1) == RangeLimit(range2); +} + +Bool RangeIsAligned(Range range, Align alignment) +{ + AVERT(Range, range); + return AddrIsAligned(RangeBase(range), alignment) + && AddrIsAligned(RangeLimit(range), alignment); +} + +Addr (RangeBase)(Range range) +{ + AVERT(Range, range); + return RangeBase(range); +} + +Addr (RangeLimit)(Range range) +{ + AVERT(Range, range); + return RangeLimit(range); +} + +void (RangeSetBase)(Range range, Addr addr) +{ + AVERT(Range, range); + AVER(addr >= RangeBase(range)); + RangeSetBase(range, addr); +} + +void (RangeSetLimit)(Range range, Addr addr) +{ + AVERT(Range, range); + AVER(addr <= RangeLimit(range)); + RangeSetLimit(range, addr); +} + +Size (RangeSize)(Range range) +{ + AVERT(Range, range); + return RangeSize(range); +} + +void RangeCopy(Range to, Range from) +{ + AVERT(Range, from); + RangeInit(to, RangeBase(from), RangeLimit(from)); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/range.h b/mps/code/range.h new file mode 100644 index 00000000000..8646516756c --- /dev/null +++ b/mps/code/range.h @@ -0,0 +1,80 @@ +/* range.h: ADDRESS RANGE INTERFACE + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Representation of address ranges. + * + * .design: + */ + +#ifndef range_h +#define range_h + +#include "mpmtypes.h" + + +/* Prototypes */ + +#define RangeBase(range) ((range)->base) +#define RangeLimit(range) ((range)->limit) +#define RangeSetBase(range, addr) BEGIN ((range)->base = (addr)); END +#define RangeSetLimit(range, addr) BEGIN ((range)->limit = (addr)); END +#define RangeSize(range) (AddrOffset(RangeBase(range), RangeLimit(range))) +#define RangeContains(range, addr) ((range)->base <= (addr) && (addr) < (range)->limit) +#define RangeIsEmpty(range) (RangeSize(range) == 0) + +extern void RangeInit(Range range, Addr base, Addr limit); +extern void RangeInitSize(Range range, Addr base, Size size); +extern void RangeFinish(Range range); +extern Res RangeDescribe(Range range, mps_lib_FILE *stream, Count depth); +extern Bool RangeCheck(Range range); +extern Bool RangeIsAligned(Range range, Align align); +extern Bool RangesOverlap(Range range1, Range range2); +extern Bool RangesNest(Range outer, Range inner); +extern Bool RangesEqual(Range range1, Range range2); +extern Addr (RangeBase)(Range range); +extern Addr (RangeLimit)(Range range); +extern void (RangeSetBase)(Range range, Addr addr); +extern void (RangeSetLimit)(Range range, Addr addr); +extern Size (RangeSize)(Range range); +extern void RangeCopy(Range to, Range from); + +/* RangeStruct -- address range */ + +typedef struct RangeStruct { + Addr base; + Addr limit; +} RangeStruct; + +#endif /* range_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/rangetree.c b/mps/code/rangetree.c new file mode 100644 index 00000000000..781ce5a93db --- /dev/null +++ b/mps/code/rangetree.c @@ -0,0 +1,99 @@ +/* rangetree.c -- binary trees of address ranges + * + * $Id$ + * Copyright (C) 2016-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "rangetree.h" +#include "tree.h" +#include "range.h" +#include "mpm.h" + + +void RangeTreeInit(RangeTree rangeTree, Range range) +{ + AVER(rangeTree != NULL); + TreeInit(RangeTreeTree(rangeTree)); + RangeCopy(RangeTreeRange(rangeTree), range); + AVERT(RangeTree, rangeTree); +} + + +Bool RangeTreeCheck(RangeTree rangeTree) +{ + CHECKL(rangeTree != NULL); + CHECKD_NOSIG(Tree, RangeTreeTree(rangeTree)); + CHECKD_NOSIG(Range, RangeTreeRange(rangeTree)); + return TRUE; +} + + +void RangeTreeFinish(RangeTree rangeTree) +{ + AVERT(RangeTree, rangeTree); + TreeFinish(RangeTreeTree(rangeTree)); + RangeFinish(RangeTreeRange(rangeTree)); +} + + +/* RangeTreeCompare -- Compare key to [base,limit) + * + * + */ + +Compare RangeTreeCompare(Tree tree, TreeKey key) +{ + Addr base1, base2, limit2; + RangeTree block; + + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(tree != TreeEMPTY); + AVER_CRITICAL(key != NULL); + + base1 = RangeTreeBaseOfKey(key); + block = RangeTreeOfTree(tree); + base2 = RangeTreeBase(block); + limit2 = RangeTreeLimit(block); + + if (base1 < base2) + return CompareLESS; + else if (base1 >= limit2) + return CompareGREATER; + else + return CompareEQUAL; +} + +TreeKey RangeTreeKey(Tree tree) +{ + return RangeTreeKeyOfBaseVar(RangeTreeBase(RangeTreeOfTree(tree))); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/rangetree.h b/mps/code/rangetree.h new file mode 100644 index 00000000000..589634e908e --- /dev/null +++ b/mps/code/rangetree.h @@ -0,0 +1,81 @@ +/* rangetree.c -- binary trees of address ranges + * + * $Id$ + * Copyright (C) 2016-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef rangetree_h +#define rangetree_h + +#include "mpmtypes.h" +#include "range.h" +#include "tree.h" + +#define RangeTreeTree(rangeTree) (&(rangeTree)->treeStruct) +#define RangeTreeRange(rangeTree) (&(rangeTree)->rangeStruct) +#define RangeTreeOfTree(tree) PARENT(RangeTreeStruct, treeStruct, tree) +#define RangeTreeOfRange(range) PARENT(RangeTreeStruct, rangeStruct, range) + +#define RangeTreeBase(block) RangeBase(RangeTreeRange(block)) +#define RangeTreeLimit(block) RangeLimit(RangeTreeRange(block)) +#define RangeTreeSetBase(block, addr) RangeSetBase(RangeTreeRange(block), addr) +#define RangeTreeSetLimit(block, addr) RangeSetLimit(RangeTreeRange(block), addr) +#define RangeTreeSize(block) RangeSize(RangeTreeRange(block)) + +extern void RangeTreeInit(RangeTree rangeTree, Range range); +extern Bool RangeTreeCheck(RangeTree rangeTree); +extern void RangeTreeFinish(RangeTree rangeTree); + + +/* Compare and key functions for use with TreeFind, TreeInsert, etc. + * + * We pass the rangeTree base directly as a TreeKey (void *) assuming + * that Addr can be encoded, possibly breaking . + * On an exotic platform where this isn't true, pass the address of + * base: that is, add an &. + */ + +#define RangeTreeKeyOfBaseVar(baseVar) ((TreeKey)(baseVar)) +#define RangeTreeBaseOfKey(key) ((Addr)(key)) + +extern Compare RangeTreeCompare(Tree tree, TreeKey key); +extern TreeKey RangeTreeKey(Tree tree); + + +/* RangeTreeStruct -- address range in a tree */ + +typedef struct RangeTreeStruct { + TreeStruct treeStruct; + RangeStruct rangeStruct; +} RangeTreeStruct; + +#endif /* rangetree_h */ + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2016-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ref.c b/mps/code/ref.c new file mode 100644 index 00000000000..e0b81a91057 --- /dev/null +++ b/mps/code/ref.c @@ -0,0 +1,322 @@ +/* ref.c: REFERENCES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Implement operations on Ref, RefSet, ZoneSet, and Rank. + * + * .design: and . + */ + +#include "mpm.h" + +SRCID(ref, "$Id$"); + + +/* RankCheck -- check a rank value */ + +Bool RankCheck(Rank rank) +{ + CHECKL(rank < RankLIMIT); + UNUSED(rank); /* */ + return TRUE; +} + + +/* RankSetCheck -- check a rank set */ + +Bool RankSetCheck(RankSet rankSet) +{ + CHECKL(rankSet < ((ULongest)1 << RankLIMIT)); + UNUSED(rankSet); /* */ + return TRUE; +} + + +/* ZoneSetOfRange -- calculate the zone set of a range of addresses */ + +ZoneSet ZoneSetOfRange(Arena arena, Addr base, Addr limit) +{ + Word zbase, zlimit; + + AVERT(Arena, arena); + AVER(limit > base); + + /* The base and limit zones of the range are calculated. The limit */ + /* zone is the zone after the last zone of the range, not the zone of */ + /* the limit address. */ + zbase = (Word)base >> arena->zoneShift; + zlimit = (((Word)limit-1) >> arena->zoneShift) + 1; + + + /* If the range is large enough to span all zones, its zone set is */ + /* universal. */ + if (zlimit - zbase >= MPS_WORD_WIDTH) + return ZoneSetUNIV; + + zbase &= MPS_WORD_WIDTH - 1; + zlimit &= MPS_WORD_WIDTH - 1; + + /* If the base zone is less than the limit zone, the zone set looks */ + /* like 000111100, otherwise it looks like 111000011. */ + if (zbase < zlimit) + return ((ZoneSet)1< base || next == (Addr)0); + if (next >= limit || next < base) + next = limit; + return next; +} + +Bool RangeInZoneSetFirst(Addr *baseReturn, Addr *limitReturn, + Addr base, Addr limit, + Arena arena, ZoneSet zoneSet, Size size) +{ + Size zebra; + Addr searchLimit; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVER(base < limit); + AVERT(Arena, arena); + AVER(size > 0); + AVER(zoneSet != ZoneSetEMPTY); + + /* TODO: Consider whether this search is better done by bit twiddling + zone sets, e.g. by constructing a mask of zone bits as wide as the + size and rotating the zoneSet. */ + + if (AddrOffset(base, limit) < size) + return FALSE; + + if (zoneSet == ZoneSetUNIV) { + *baseReturn = base; + *limitReturn = limit; + return TRUE; + } + + /* A "zebra" is the size of a complete set of stripes. */ + zebra = (sizeof(ZoneSet) * CHAR_BIT) << ArenaZoneShift(arena); + if (size >= zebra) { + AVER(zoneSet != ZoneSetUNIV); + return FALSE; + } + + /* There's no point searching through the zoneSet more than once. */ + searchLimit = AddrAdd(AddrAlignUp(base, ArenaStripeSize(arena)), zebra); + if (searchLimit > base && limit > searchLimit) + limit = searchLimit; + + do { + Addr next; + + /* Search for a stripe in the zoneSet and within the block. */ + /* (Find the first set bit in the zoneSet not below the base zone.) */ + while (!ZoneSetHasAddr(arena, zoneSet, base)) { + base = nextStripe(base, limit, arena); + if (base >= limit) + return FALSE; + } + + /* Search for a run stripes in the zoneSet and within the block. */ + /* (Find a run of set bits in the zoneSet.) */ + next = base; + do + next = nextStripe(next, limit, arena); + while (next < limit && ZoneSetHasAddr(arena, zoneSet, next)); + + /* Is the run big enough to satisfy the size? */ + if (AddrOffset(base, next) >= size) { + *baseReturn = base; + *limitReturn = next; + return TRUE; + } + + base = next; + } while (base < limit); + + return FALSE; +} + + +/* RangeInZoneSetLast -- find an area of address space within a zone set + * + * Given a range of addresses, find the last sub-range of at least size that + * is also within a zone set. i.e. ZoneSetOfRange is a subset of the zone set. + * Returns FALSE if no range satisfying the conditions could be found. + */ + +static Addr prevStripe(Addr base, Addr limit, Arena arena) +{ + Addr prev; + AVER(limit != (Addr)0); + prev = AddrAlignDown(AddrSub(limit, 1), ArenaStripeSize(arena)); + AVER(prev < limit); + if (prev < base) + prev = base; + return prev; +} + +Bool RangeInZoneSetLast(Addr *baseReturn, Addr *limitReturn, + Addr base, Addr limit, + Arena arena, ZoneSet zoneSet, Size size) +{ + Size zebra; + Addr searchBase; + + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVER(base < limit); + AVERT(Arena, arena); + AVER(size > 0); + AVER(zoneSet != ZoneSetEMPTY); + + /* TODO: Consider whether this search is better done by bit twiddling + zone sets, e.g. by constructing a mask of zone bits as wide as the + size and rotating the zoneSet. */ + + if (AddrOffset(base, limit) < size) + return FALSE; + + if (zoneSet == ZoneSetUNIV) { + *baseReturn = base; + *limitReturn = limit; + return TRUE; + } + + /* A "zebra" is the size of a complete set of stripes. */ + zebra = (sizeof(ZoneSet) * CHAR_BIT) << ArenaZoneShift(arena); + if (size >= zebra) { + AVER(zoneSet != ZoneSetUNIV); + return FALSE; + } + + /* There's no point searching through the zoneSet more than once. */ + searchBase = AddrSub(AddrAlignDown(limit, ArenaStripeSize(arena)), zebra); + if (searchBase < limit && base < searchBase) + base = searchBase; + + do { + Addr prev; + + /* Search for a stripe in the zoneSet and within the block. */ + /* (Find the last set bit in the zoneSet below the limit zone.) */ + while (!ZoneSetHasAddr(arena, zoneSet, AddrSub(limit, 1))) { + limit = prevStripe(base, limit, arena); + if (base >= limit) + return FALSE; + } + + /* Search for a run stripes in the zoneSet and within the block. */ + /* (Find a run of set bits in the zoneSet.) */ + prev = limit; + do + prev = prevStripe(base, prev, arena); + while (prev > base && ZoneSetHasAddr(arena, zoneSet, AddrSub(prev, 1))); + + /* Is the run big enough to satisfy the size? */ + if (AddrOffset(prev, limit) >= size) { + *baseReturn = prev; + *limitReturn = limit; + return TRUE; + } + + limit = prev; + } while (base < limit); + + return FALSE; +} + + +/* ZoneSetBlacklist() -- calculate a zone set of likely false positives + * + * We blacklist the zones that could be referenced by values likely to be + * found in ambiguous roots (such as the stack) and misinterpreted as + * references, in order to avoid nailing down objects. This isn't a + * perfect simulation, but it should catch the common cases. + */ + +ZoneSet ZoneSetBlacklist(Arena arena) +{ + ZoneSet blacklist; + union { + mps_word_t word; + mps_addr_t addr; + int i; + long l; + } nono; + + AVERT(Arena, arena); + + blacklist = ZoneSetEMPTY; + nono.word = 0; + nono.i = 1; + blacklist = ZoneSetAddAddr(arena, blacklist, nono.addr); + nono.i = -1; + blacklist = ZoneSetAddAddr(arena, blacklist, nono.addr); + nono.l = 1; + blacklist = ZoneSetAddAddr(arena, blacklist, nono.addr); + nono.l = -1; + blacklist = ZoneSetAddAddr(arena, blacklist, nono.addr); + + return blacklist; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ring.c b/mps/code/ring.c new file mode 100644 index 00000000000..657f7386e59 --- /dev/null +++ b/mps/code/ring.c @@ -0,0 +1,182 @@ +/* ring.c: RING IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .intro: This is a portable implementation of Rings. + * + * .purpose: Rings are used to manage potentially unbounded collections + * of things. + * + * .sources: + */ + +#include "ring.h" +#include "check.h" +#include "misc.h" + +SRCID(ring, "$Id$"); + + +/* RingCheck, RingCheckSingle -- check the validity of a ring node + * + * RingCheck performs a consistency check on the ring node + * . RingCheckSingle performs the same check, but + * also checks that the ring node is a singleton + * . + */ + +Bool RingCheck(Ring ring) +{ + CHECKL(ring != NULL); + CHECKL(ring->next != NULL); + CHECKL(ring->next->prev == ring); + CHECKL(ring->prev != NULL); + CHECKL(ring->prev->next == ring); + UNUSED(ring); /* */ + return TRUE; +} + +Bool RingCheckSingle(Ring ring) +{ + CHECKL(RingCheck(ring)); + CHECKL(ring->next == ring); + CHECKL(ring->prev == ring); + UNUSED(ring); /* */ + return TRUE; +} + + +/* RingIsSingle -- return true if ring is a singleton + * + * + */ + +Bool RingIsSingle(Ring ring) +{ + AVERT(Ring, ring); + return (ring->next == ring); +} + + +/* RingLength -- return the number of nodes in the ring, not including + * this node + * + * + */ + +Count RingLength(Ring ring) +{ + Count length = 0; + Ring node, next; + AVERT(Ring, ring); + RING_FOR(node, ring, next) + ++ length; + return length; +} + + +/* RingInit -- initialize a ring node + */ + +void (RingInit)(Ring ring) +{ + RingInit(ring); /* */ +} + + +/* RingFinish -- finish a ring node + */ + +void (RingFinish)(Ring ring) +{ + RingFinish(ring); /* */ +} + + +/* RingAppend -- add a ring node to the end of a ring + */ + +void (RingAppend)(Ring ring, Ring new) +{ + RingAppend(ring, new); /* */ +} + + +/* RingInsert -- add a ring node to the start of a ring + */ + +void (RingInsert)(Ring ring, Ring new) +{ + RingInsert(ring, new); /* */ +} + + +/* RingRemove -- remove a node from a ring + */ + +void (RingRemove)(Ring old) +{ + RingRemove(old); /* */ +} + + +/* RingNext -- get the next element of a ring + */ + +Ring (RingNext)(Ring ring) +{ + return RingNext(ring); /* */ +} + +/* RingPrev -- get the previous element of a ring + */ + +Ring (RingPrev)(Ring ring) +{ + return RingPrev(ring); /* */ +} + + +/* RING_ELT -- get the ring element structure + * + * RING_ELT has no function (as it does not have function-like + * behaviour), and is defined in . + */ + + +/* RING_FOR -- ring iterator construct + * + * RING_FOR has no function (as it does not have function-like + * behaviour), and is defined in . + */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ring.h b/mps/code/ring.h new file mode 100644 index 00000000000..f4da54f3497 --- /dev/null +++ b/mps/code/ring.h @@ -0,0 +1,144 @@ +/* ring.h: RING INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + */ + + +#ifndef ring_h +#define ring_h + +#include "check.h" +#include "mpmtypes.h" + + +/* RingStruct -- double-ended queue structure + * + * .ring: The ring structure is used as a field in other structures + * in order to link them together into "rings". See . + */ + +typedef struct RingStruct *Ring; +typedef struct RingStruct { /* double-ended queue structure */ + Ring next, prev; /* links to next and prev element */ +} RingStruct; + + +#define RingNONE ((Ring)0) + +extern Bool RingCheck(Ring ring); +extern Bool RingCheckSingle(Ring ring); +extern Bool RingIsSingle(Ring ring); +extern Count RingLength(Ring ring); + +/* .ring.init: */ +extern void (RingInit)(Ring ring); +#define RingInit(ring) \ + BEGIN \ + Ring _ring = (ring); \ + AVER(_ring != NULL); \ + _ring->next = _ring; \ + _ring->prev = _ring; \ + AVER(RingCheck(_ring)); \ + END + +/* .ring.finish: */ +extern void (RingFinish)(Ring ring); +#define RingFinish(ring) \ + BEGIN \ + Ring _ring = (ring); \ + AVER(RingCheckSingle(_ring)); \ + _ring->next = RingNONE; \ + _ring->prev = RingNONE; \ + END + +/* .ring.append: */ +extern void (RingAppend)(Ring ring, Ring new); +#define RingAppend(ring, new) \ + BEGIN \ + Ring _ring = (ring), _new = (new); \ + AVER(RingCheck(_ring)); \ + AVER(RingCheckSingle(_new)); \ + _new->prev = _ring->prev; \ + _new->next = _ring; \ + _ring->prev->next = _new; \ + _ring->prev = _new; \ + END + +/* .ring.insert: */ +extern void (RingInsert)(Ring ring, Ring new); +#define RingInsert(ring, new) \ + BEGIN \ + Ring _ring = (ring), _new = (new); \ + AVER(RingCheck(_ring)); \ + AVER(RingCheckSingle(_new)); \ + _new->prev = _ring; \ + _new->next = _ring->next; \ + _ring->next->prev = _new; \ + _ring->next = _new; \ + END + +/* .ring.remove: */ +extern void (RingRemove)(Ring old); +#define RingRemove(old) \ + BEGIN \ + Ring _old = (old); \ + AVER(RingCheck(_old)); \ + AVER(!RingIsSingle(_old)); \ + _old->next->prev = _old->prev; \ + _old->prev->next = _old->next; \ + _old->next = _old; \ + _old->prev = _old; \ + END + +/* .ring.next: */ +extern Ring (RingNext)(Ring ring); +#define RingNext(ring) ((ring)->next) + +/* .ring.prev: */ +extern Ring (RingPrev)(Ring ring); +#define RingPrev(ring) ((ring)->prev) + +/* .ring.elt: */ +#define RING_ELT(type, field, node) \ + PARENT(type ## Struct, field, node) + +/* .ring.for: */ +#define RING_FOR(node, ring, next) \ + for (ITER_PARALLEL(node = RingNext(ring), next = RingNext(node)); \ + node != (ring); \ + ITER_PARALLEL(node = (next), next = RingNext(node))) + + +#endif /* ring_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/root.c b/mps/code/root.c new file mode 100644 index 00000000000..ebb4a5f6508 --- /dev/null +++ b/mps/code/root.c @@ -0,0 +1,833 @@ +/* root.c: ROOT IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This is the implementation of the root datatype. + * + * .design: For design, see and + * . */ + +#include "mpm.h" + +SRCID(root, "$Id$"); + + +/* RootStruct -- tracing root structure */ + +#define RootSig ((Sig)0x51960029) /* SIGnature ROOT */ + +typedef union AreaScanUnion { + void *closure; + mps_scan_tag_s tag; /* tag for scanning */ +} AreaScanUnion; + +typedef struct RootStruct { + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->rootSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* attachment to arena */ + Rank rank; /* rank of references in this root */ + TraceSet grey; /* traces for which root is grey */ + RefSet summary; /* summary of references in root */ + RootMode mode; /* mode */ + Bool protectable; /* Can protect root? */ + Addr protBase; /* base of protectable area */ + Addr protLimit; /* limit of protectable area */ + AccessSet pm; /* Protection Mode */ + RootVar var; /* union discriminator */ + union RootUnion { + struct { + mps_root_scan_t scan; /* the function which does the scanning */ + void *p; /* closure for scan function */ + size_t s; /* closure for scan function */ + } fun; + struct { + Word *base; /* base of area to be scanned */ + Word *limit; /* limit of area to be scanned */ + mps_area_scan_t scan_area;/* area scanning function */ + AreaScanUnion the; + } area; + struct { + Thread thread; /* passed to scan */ + mps_area_scan_t scan_area;/* area scanner for stack and registers */ + AreaScanUnion the; + void *stackCold; /* cold end of stack */ + } thread; + struct { + mps_fmt_scan_t scan; /* format-like scanner */ + Addr base, limit; /* passed to scan */ + } fmt; + } the; +} RootStruct; + + +/* RootVarCheck -- check a Root union discriminator + * + * .rootvarcheck: Synchronize with */ + +Bool RootVarCheck(RootVar rootVar) +{ + CHECKL(rootVar == RootAREA || rootVar == RootAREA_TAGGED + || rootVar == RootFUN || rootVar == RootFMT + || rootVar == RootTHREAD + || rootVar == RootTHREAD_TAGGED); + UNUSED(rootVar); + return TRUE; +} + + +/* RootModeCheck */ + +Bool RootModeCheck(RootMode mode) +{ + CHECKL((mode & (RootModeCONSTANT | RootModePROTECTABLE + | RootModePROTECTABLE_INNER)) + == mode); + /* RootModePROTECTABLE_INNER implies RootModePROTECTABLE */ + CHECKL((mode & RootModePROTECTABLE_INNER) == 0 + || (mode & RootModePROTECTABLE)); + UNUSED(mode); + + return TRUE; +} + + +/* RootCheck -- check the consistency of a root structure + * + * .rootcheck: Keep synchronized with . */ + +Bool RootCheck(Root root) +{ + CHECKS(Root, root); + CHECKU(Arena, root->arena); + CHECKL(root->serial < ArenaGlobals(root->arena)->rootSerial); + CHECKD_NOSIG(Ring, &root->arenaRing); + CHECKL(RankCheck(root->rank)); + CHECKL(TraceSetCheck(root->grey)); + /* Don't need to check var here, because of the switch below */ + switch(root->var) + { + case RootAREA: + CHECKL(root->the.area.base != 0); + CHECKL(root->the.area.base < root->the.area.limit); + CHECKL(FUNCHECK(root->the.area.scan_area)); + /* Can't check anything about closure */ + break; + + case RootAREA_TAGGED: + CHECKL(root->the.area.base != 0); + CHECKL(root->the.area.base < root->the.area.limit); + CHECKL(FUNCHECK(root->the.area.scan_area)); + /* Can't check anything about tag as it could mean anything to + scan_area. */ + break; + + case RootFUN: + CHECKL(root->the.fun.scan != NULL); + /* Can't check anything about closure as it could mean anything to + scan. */ + break; + + case RootTHREAD: + CHECKD_NOSIG(Thread, root->the.thread.thread); /* */ + CHECKL(FUNCHECK(root->the.thread.scan_area)); + /* Can't check anything about closure as it could mean anything to + scan_area. */ + /* Can't check anything about stackCold. */ + break; + + case RootTHREAD_TAGGED: + CHECKD_NOSIG(Thread, root->the.thread.thread); /* */ + CHECKL(FUNCHECK(root->the.thread.scan_area)); + /* Can't check anything about tag as it could mean anything to + scan_area. */ + /* Can't check anything about stackCold. */ + break; + + case RootFMT: + CHECKL(root->the.fmt.scan != NULL); + CHECKL(root->the.fmt.base != 0); + CHECKL(root->the.fmt.base < root->the.fmt.limit); + break; + + default: + NOTREACHED; + } + CHECKL(RootModeCheck(root->mode)); + CHECKL(BoolCheck(root->protectable)); + if (root->protectable) { + CHECKL(root->protBase != (Addr)0); + CHECKL(root->protLimit != (Addr)0); + CHECKL(root->protBase < root->protLimit); + CHECKL(AccessSetCheck(root->pm)); + } else { + CHECKL(root->protBase == (Addr)0); + CHECKL(root->protLimit == (Addr)0); + CHECKL(root->pm == (AccessSet)0); + } + return TRUE; +} + + +/* rootCreate, RootCreateArea, RootCreateThread, RootCreateFmt, RootCreateFun + * + * RootCreate* set up the appropriate union member, and call the generic + * create function to do the actual creation + * + * See for initial value. */ + +static Res rootCreate(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, RootVar type, + union RootUnion *theUnionP) +{ + Root root; + Res res; + void *p; + Globals globals; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(RootMode, mode); + AVERT(RootVar, type); + globals = ArenaGlobals(arena); + + res = ControlAlloc(&p, arena, sizeof(RootStruct)); + if (res != ResOK) + return res; + root = (Root)p; /* Avoid pun */ + + root->arena = arena; + root->rank = rank; + root->var = type; + root->the = *theUnionP; + root->grey = TraceSetEMPTY; + root->summary = RefSetUNIV; + root->mode = mode; + root->pm = AccessSetEMPTY; + root->protectable = FALSE; + root->protBase = (Addr)0; + root->protLimit = (Addr)0; + + /* */ + RingInit(&root->arenaRing); + + root->serial = globals->rootSerial; + ++globals->rootSerial; + root->sig = RootSig; + + AVERT(Root, root); + + RingAppend(&globals->rootRing, &root->arenaRing); + + *rootReturn = root; + return ResOK; +} + +static Res rootCreateProtectable(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, RootVar var, + Addr base, Addr limit, + union RootUnion *theUnion) +{ + Res res; + Root root; + Ring node, next; + + res = rootCreate(&root, arena, rank, mode, var, theUnion); + if (res != ResOK) + return res; + if (mode & RootModePROTECTABLE) { + root->protectable = TRUE; + if (mode & RootModePROTECTABLE_INNER) { + root->protBase = AddrArenaGrainUp(base, arena); + root->protLimit = AddrArenaGrainDown(limit, arena); + if (!(root->protBase < root->protLimit)) { + /* root had no inner pages */ + root->protectable = FALSE; + root->mode &=~ (RootModePROTECTABLE|RootModePROTECTABLE_INNER); + } + } else { + root->protBase = AddrArenaGrainDown(base, arena); + root->protLimit = AddrArenaGrainUp(limit, arena); + } + } + + /* Check that this root doesn't intersect with any other root */ + RING_FOR(node, &ArenaGlobals(arena)->rootRing, next) { + Root trial = RING_ELT(Root, arenaRing, node); + if (trial != root) { + /* (trial->protLimit <= root->protBase */ + /* || root->protLimit <= trial->protBase) */ + /* is the "okay" state. The negation of this is: */ + if (root->protBase < trial->protLimit + && trial->protBase < root->protLimit) { + NOTREACHED; + RootDestroy(root); + return ResFAIL; + } + } + } + + AVERT(Root, root); + + *rootReturn = root; + return ResOK; +} + +Res RootCreateArea(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, + Word *base, Word *limit, + mps_area_scan_t scan_area, + void *closure) +{ + Res res; + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(RootMode, mode); + AVER(base != 0); + AVER(AddrIsAligned(base, sizeof(Word))); + AVER(base < limit); + AVER(AddrIsAligned(limit, sizeof(Word))); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about closure */ + + theUnion.area.base = base; + theUnion.area.limit = limit; + theUnion.area.scan_area = scan_area; + theUnion.area.the.closure = closure; + + res = rootCreateProtectable(rootReturn, arena, rank, mode, + RootAREA, (Addr)base, (Addr)limit, &theUnion); + return res; +} + +Res RootCreateAreaTagged(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, Word *base, Word *limit, + mps_area_scan_t scan_area, Word mask, Word pattern) +{ + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(RootMode, mode); + AVER(base != 0); + AVER(base < limit); + /* Can't check anything about mask or pattern, as they could mean + anything to scan_area. */ + + theUnion.area.base = base; + theUnion.area.limit = limit; + theUnion.area.scan_area = scan_area; + theUnion.area.the.tag.mask = mask; + theUnion.area.the.tag.pattern = pattern; + + return rootCreateProtectable(rootReturn, arena, rank, mode, RootAREA_TAGGED, + (Addr)base, (Addr)limit, &theUnion); +} + +Res RootCreateThread(Root *rootReturn, Arena arena, + Rank rank, Thread thread, + mps_area_scan_t scan_area, + void *closure, + Word *stackCold) +{ + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(Thread, thread); + AVER(ThreadArena(thread) == arena); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about closure. */ + + theUnion.thread.thread = thread; + theUnion.thread.scan_area = scan_area; + theUnion.thread.the.closure = closure; + theUnion.thread.stackCold = stackCold; + + return rootCreate(rootReturn, arena, rank, (RootMode)0, RootTHREAD, + &theUnion); +} + +Res RootCreateThreadTagged(Root *rootReturn, Arena arena, + Rank rank, Thread thread, + mps_area_scan_t scan_area, + Word mask, Word pattern, + Word *stackCold) +{ + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(Thread, thread); + AVER(ThreadArena(thread) == arena); + AVER(FUNCHECK(scan_area)); + /* Can't check anything about mask or pattern, as they could mean + anything to scan_area. */ + + theUnion.thread.thread = thread; + theUnion.thread.scan_area = scan_area; + theUnion.thread.the.tag.mask = mask; + theUnion.thread.the.tag.pattern = pattern; + theUnion.thread.stackCold = stackCold; + + return rootCreate(rootReturn, arena, rank, (RootMode)0, RootTHREAD_TAGGED, + &theUnion); +} + +/* RootCreateFmt -- create root from block of formatted objects + * + * .fmt.no-align-check: Note that we don't check the alignment of base + * and limit. That's because we're only given the scan function, so we + * don't know the format's alignment requirements. + */ + +Res RootCreateFmt(Root *rootReturn, Arena arena, + Rank rank, RootMode mode, mps_fmt_scan_t scan, + Addr base, Addr limit) +{ + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVERT(RootMode, mode); + AVER(FUNCHECK(scan)); + AVER(base != 0); + AVER(base < limit); + + theUnion.fmt.scan = scan; + theUnion.fmt.base = base; + theUnion.fmt.limit = limit; + + return rootCreateProtectable(rootReturn, arena, rank, mode, + RootFMT, base, limit, &theUnion); +} + +Res RootCreateFun(Root *rootReturn, Arena arena, Rank rank, + mps_root_scan_t scan, void *p, size_t s) +{ + union RootUnion theUnion; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + AVERT(Rank, rank); + AVER(FUNCHECK(scan)); + + theUnion.fun.scan = scan; + theUnion.fun.p = p; + theUnion.fun.s = s; + + return rootCreate(rootReturn, arena, rank, (RootMode)0, RootFUN, &theUnion); +} + + +/* RootDestroy -- destroy a root */ + +void RootDestroy(Root root) +{ + Arena arena; + + AVERT(Root, root); + + arena = RootArena(root); + + AVERT(Arena, arena); + + RingRemove(&root->arenaRing); + RingFinish(&root->arenaRing); + + root->sig = SigInvalid; + + ControlFree(arena, root, sizeof(RootStruct)); +} + + +/* RootArena -- return the arena of a root + * + * Must be thread-safe. */ + +Arena RootArena(Root root) +{ + AVER(TESTT(Root, root)); + return root->arena; +} + + +/* RootRank -- return the rank of a root */ + +Rank RootRank(Root root) +{ + AVERT(Root, root); + return root->rank; +} + + +/* RootPM -- return the protection mode of a root */ + +AccessSet RootPM(Root root) +{ + AVERT(Root, root); + return root->pm; +} + + +/* RootSummary -- return the summary of a root */ + +RefSet RootSummary(Root root) +{ + AVERT(Root, root); + return root->summary; +} + + +/* RootGrey -- mark root grey */ + +void RootGrey(Root root, Trace trace) +{ + AVERT(Root, root); + AVERT(Trace, trace); + + root->grey = TraceSetAdd(root->grey, trace); +} + + +static void rootSetSummary(Root root, RefSet summary) +{ + AVERT(Root, root); + /* Can't check summary */ + if (root->protectable) { + if (summary == RefSetUNIV) { + root->summary = summary; + root->pm &= ~AccessWRITE; + } else { + root->pm |= AccessWRITE; + root->summary = summary; + } + } else + AVER(root->summary == RefSetUNIV); +} + + +/* RootScan -- scan root */ + +Res RootScan(ScanState ss, Root root) +{ + Res res; + + AVERT(Root, root); + AVERT(ScanState, ss); + AVER(root->rank == ss->rank); + + if (TraceSetInter(root->grey, ss->traces) == TraceSetEMPTY) + return ResOK; + + AVER(ScanStateSummary(ss) == RefSetEMPTY); + + if (root->pm != AccessSetEMPTY) { + ProtSet(root->protBase, root->protLimit, AccessSetEMPTY); + } + + switch(root->var) { + case RootAREA: + res = TraceScanArea(ss, + root->the.area.base, + root->the.area.limit, + root->the.area.scan_area, + root->the.area.the.closure); + if (res != ResOK) + goto failScan; + break; + + case RootAREA_TAGGED: + res = TraceScanArea(ss, + root->the.area.base, + root->the.area.limit, + root->the.area.scan_area, + &root->the.area.the.tag); + if (res != ResOK) + goto failScan; + break; + + case RootFUN: + res = root->the.fun.scan(&ss->ss_s, + root->the.fun.p, + root->the.fun.s); + if (res != ResOK) + goto failScan; + break; + + case RootTHREAD: + res = ThreadScan(ss, root->the.thread.thread, + root->the.thread.stackCold, + root->the.thread.scan_area, + root->the.thread.the.closure); + if (res != ResOK) + goto failScan; + break; + + case RootTHREAD_TAGGED: + res = ThreadScan(ss, root->the.thread.thread, + root->the.thread.stackCold, + root->the.thread.scan_area, + &root->the.thread.the.tag); + if (res != ResOK) + goto failScan; + break; + + case RootFMT: + res = (*root->the.fmt.scan)(&ss->ss_s, root->the.fmt.base, root->the.fmt.limit); + ss->scannedSize += AddrOffset(root->the.fmt.base, root->the.fmt.limit); + if (res != ResOK) + goto failScan; + break; + + default: + NOTREACHED; + res = ResUNIMPL; + goto failScan; + } + + AVER(res == ResOK); + root->grey = TraceSetDiff(root->grey, ss->traces); + rootSetSummary(root, ScanStateSummary(ss)); + EVENT3(RootScan, root, ss->traces, ScanStateSummary(ss)); + +failScan: + if (root->pm != AccessSetEMPTY) { + ProtSet(root->protBase, root->protLimit, root->pm); + } + + return res; +} + + +/* RootOfAddr -- return the root at addr + * + * Returns TRUE if the addr is in a root (and returns the root in + * *rootReturn) otherwise returns FALSE. Cf. SegOfAddr. */ + +Bool RootOfAddr(Root *rootReturn, Arena arena, Addr addr) +{ + Ring node, next; + + AVER(rootReturn != NULL); + AVERT(Arena, arena); + /* addr is arbitrary and can't be checked */ + + RING_FOR(node, &ArenaGlobals(arena)->rootRing, next) { + Root root = RING_ELT(Root, arenaRing, node); + + if (root->protectable && root->protBase <= addr && addr < root->protLimit) { + *rootReturn = root; + return TRUE; + } + } + + return FALSE; +} + + +/* RootAccess -- handle barrier hit on root */ + +void RootAccess(Root root, AccessSet mode) +{ + AVERT(Root, root); + AVERT(AccessSet, mode); + AVER((root->pm & mode) != AccessSetEMPTY); + AVER(mode == AccessWRITE); /* only write protection supported */ + + rootSetSummary(root, RefSetUNIV); + + /* Access must now be allowed. */ + AVER((root->pm & mode) == AccessSetEMPTY); + ProtSet(root->protBase, root->protLimit, root->pm); +} + + +/* RootsIterate -- iterate over all the roots in the arena */ + +Res RootsIterate(Globals arena, RootIterateFn f, void *p) +{ + Res res = ResOK; + Ring node, next; + + RING_FOR(node, &arena->rootRing, next) { + Root root = RING_ELT(Root, arenaRing, node); + + res = (*f)(root, p); + if (res != ResOK) + return res; + } + return res; +} + + +/* RootDescribe -- describe a root */ + +Res RootDescribe(Root root, mps_lib_FILE *stream, Count depth) +{ + Res res; + + if (!TESTT(Root, root)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "Root $P ($U) {\n", (WriteFP)root, (WriteFU)root->serial, + " arena $P ($U)\n", (WriteFP)root->arena, + (WriteFU)root->arena->serial, + " rank $U\n", (WriteFU)root->rank, + " grey $B\n", (WriteFB)root->grey, + " summary $B\n", (WriteFB)root->summary, + " mode", + root->mode == 0 ? " NONE" : "", + root->mode & RootModeCONSTANT ? " CONSTANT" : "", + root->mode & RootModePROTECTABLE ? " PROTECTABLE" : "", + root->mode & RootModePROTECTABLE_INNER ? " INNER" : "", + "\n", + " protectable $S", WriteFYesNo(root->protectable), + " protBase $A", (WriteFA)root->protBase, + " protLimit $A", (WriteFA)root->protLimit, + " pm", + root->pm == AccessSetEMPTY ? " EMPTY" : "", + root->pm & AccessREAD ? " READ" : "", + root->pm & AccessWRITE ? " WRITE" : "", + NULL); + if (res != ResOK) + return res; + + switch(root->var) { + case RootAREA: + res = WriteF(stream, depth + 2, + "area base $A limit $A scan_area closure $P\n", + (WriteFA)root->the.area.base, + (WriteFA)root->the.area.limit, + (WriteFP)root->the.area.the.closure, + NULL); + if (res != ResOK) + return res; + break; + + case RootAREA_TAGGED: + res = WriteF(stream, depth + 2, + "area base $A limit $A scan_area mask $B pattern $B\n", + (WriteFA)root->the.area.base, + (WriteFA)root->the.area.limit, + (WriteFB)root->the.area.the.tag.mask, + (WriteFB)root->the.area.the.tag.pattern, + NULL); + if (res != ResOK) + return res; + break; + + case RootFUN: + res = WriteF(stream, depth + 2, + "scan function $F\n", (WriteFF)root->the.fun.scan, + "environment p $P s $W\n", + (WriteFP)root->the.fun.p, + (WriteFW)root->the.fun.s, + NULL); + if (res != ResOK) + return res; + break; + + case RootTHREAD: + res = WriteF(stream, depth + 2, + "thread $P\n", (WriteFP)root->the.thread.thread, + "closure $P\n", + (WriteFP)root->the.thread.the.closure, + "stackCold $P\n", (WriteFP)root->the.thread.stackCold, + NULL); + if (res != ResOK) + return res; + break; + + case RootTHREAD_TAGGED: + res = WriteF(stream, depth + 2, + "thread $P\n", (WriteFP)root->the.thread.thread, + "mask $B\n", (WriteFB)root->the.thread.the.tag.mask, + "pattern $B\n", (WriteFB)root->the.thread.the.tag.pattern, + "stackCold $P\n", (WriteFP)root->the.thread.stackCold, + NULL); + if (res != ResOK) + return res; + break; + + case RootFMT: + res = WriteF(stream, depth + 2, + "scan function $F\n", (WriteFF)root->the.fmt.scan, + "format base $A limit $A\n", + (WriteFA)root->the.fmt.base, (WriteFA)root->the.fmt.limit, + NULL); + if (res != ResOK) + return res; + break; + + default: + NOTREACHED; + } + + res = WriteF(stream, depth, + "} Root $P ($U)\n", (WriteFP)root, (WriteFU)root->serial, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* RootsDescribe -- describe all roots */ + +Res RootsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth) +{ + Res res = ResOK; + Ring node, next; + + RING_FOR(node, &arenaGlobals->rootRing, next) { + Root root = RING_ELT(Root, arenaRing, node); + res = RootDescribe(root, stream, depth); + if (res != ResOK) + return res; + } + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sa.c b/mps/code/sa.c new file mode 100644 index 00000000000..a70ecc2696f --- /dev/null +++ b/mps/code/sa.c @@ -0,0 +1,198 @@ +/* sa.c: SPARSE ARRAY IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "sa.h" +#include "mpm.h" +#include "bt.h" +#include "vm.h" + +static Index pagesLength(SparseArray sa) +{ + return (sa->length * sa->elementSize + VMPageSize(sa->vm) - 1) >> sa->shift; +} + +void SparseArrayInit(SparseArray sa, + void *base, Size elementSize, Index length, + BT mapped, BT pages, VM vm) +{ + AVER(sa != NULL); + + sa->base = base; + sa->elementSize = elementSize; + sa->length = length; + sa->mapped = mapped; + sa->pages = pages; + sa->vm = vm; + AVER(SizeIsP2(VMPageSize(vm))); + sa->shift = SizeLog2(VMPageSize(vm)); + BTResRange(mapped, 0, length); + BTResRange(pages, 0, pagesLength(sa)); + + sa->sig = SparseArraySig; + AVERT(SparseArray, sa); +} + +void SparseArrayFinish(SparseArray sa) +{ + AVERT(SparseArray, sa); + AVER(BTIsResRange(sa->mapped, 0, sa->length)); + AVER(BTIsResRange(sa->pages, 0, pagesLength(sa))); + sa->sig = SigInvalid; +} + +Bool SparseArrayCheck(SparseArray sa) +{ + CHECKL(sa != NULL); + CHECKS(SparseArray, sa); + CHECKL(sa->base != NULL); + CHECKL(sa->elementSize >= 1); + CHECKD_NOSIG(VM, sa->vm); /* */ + CHECKL(sa->elementSize <= VMPageSize(sa->vm)); + CHECKL(sa->length > 0); + CHECKD_NOSIG(BT, sa->mapped); + CHECKD_NOSIG(BT, sa->pages); + CHECKL(sa->shift == SizeLog2(VMPageSize(sa->vm))); + return TRUE; +} + + +/* SparseArrayMap -- map memory for a range of elements in the array + * + * Ensures that the array elements in the unmapped range [baseEI, limitEI) + * have memory. The array elements may then be accessed, but their contents + * will be undefined. + * + * In the MPS we expect this to be called frequently when allocating in + * the arena, and so it's worth having the pages bit table to make this + * fast. Compare with SparseArrayUnmap. + */ + +Res SparseArrayMap(SparseArray sa, Index baseEI, Index limitEI) +{ + Index baseMI, limitMI; + + AVERT(SparseArray, sa); + AVER(NONNEGATIVE(baseEI)); + AVER(baseEI < limitEI); + AVER(limitEI <= sa->length); + AVER(BTIsResRange(sa->mapped, baseEI, limitEI)); + + /* Calculate the index of the page on which the base element resides. + If that's already mapped (because some other element below baseEI + is defined) bump up to the next page. */ + baseMI = (baseEI * sa->elementSize) >> sa->shift; + if (BTGet(sa->pages, baseMI)) + ++baseMI; + + /* Calculate the index of the page on which the last element resides. + If that's already mapped (because some other element not below + limitEI is defined) bump down to the previous page. */ + limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1; + if (BTGet(sa->pages, limitMI - 1)) + --limitMI; + + if (baseMI < limitMI) { + Addr base, limit; + Res res; + AVER(BTIsResRange(sa->pages, baseMI, limitMI)); + base = AddrAdd(sa->base, baseMI << sa->shift); + limit = AddrAdd(sa->base, limitMI << sa->shift); + res = VMMap(sa->vm, base, limit); + if (res != ResOK) + return res; + BTSetRange(sa->pages, baseMI, limitMI); + } + + BTSetRange(sa->mapped, baseEI, limitEI); + + return ResOK; +} + + +/* SparseArrayUnmap -- unmap memory for a range of elements in the array + * + * Declare that the array elements in the range [baseEI, limitEI) can be + * unmapped. After this call they may not be accessed. + * + * In the MPS we expect this to be called infrequently when purging large + * numbers of spare pages at once, so scanning a range of bits to determine + * whether we can unmap isn't too bad. + * + * TODO: Consider keeping a count of the number of array elements defined + * on each page, rather than a bit table, then we can unmap pages with + * zero counts rather than scanning. + */ + +void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI) +{ + Index baseMI, limitMI, i; + + AVERT(SparseArray, sa); + AVER(NONNEGATIVE(baseEI)); + AVER(baseEI < limitEI); + AVER(limitEI <= sa->length); + AVER(BTIsSetRange(sa->mapped, baseEI, limitEI)); + + /* Calculate the index of the lowest element that might be occupying + the page on which the base element resides. If any elements between + there and baseMI are defined, we can't unmap that page, so bump up. */ + baseMI = (baseEI * sa->elementSize) >> sa->shift; + i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize(sa->vm)) / sa->elementSize; + if (i < baseEI && !BTIsResRange(sa->mapped, i, baseEI)) + ++baseMI; + + /* Calculate the index of the highest element that might be occupying + the page on which the last element resides. If any elements between + limitMI and there are defined, we can't unmap that page, so bump down. */ + limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1; + i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize(sa->vm)) + + sa->elementSize - 1) / sa->elementSize; + if (i > sa->length) + i = sa->length; + if (i > limitEI && !BTIsResRange(sa->mapped, limitEI, i)) + --limitMI; + + if (baseMI < limitMI) { + Addr base, limit; + AVER(BTIsSetRange(sa->pages, baseMI, limitMI)); + base = AddrAdd(sa->base, baseMI << sa->shift); + limit = AddrAdd(sa->base, limitMI << sa->shift); + VMUnmap(sa->vm, base, limit); + BTResRange(sa->pages, baseMI, limitMI); + } + + BTResRange(sa->mapped, baseEI, limitEI); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sa.h b/mps/code/sa.h new file mode 100644 index 00000000000..9a3529da612 --- /dev/null +++ b/mps/code/sa.h @@ -0,0 +1,77 @@ +/* sa.h: SPARSE ARRAY INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * A sparse array is an array whose storage is partially mapped from a VM. + * Each element in the array is its own "mapped" status, and may only + * be used if it is mapped. + * + * The main use of sparse arrays is partially mapped page tables in the + * VM arena, where they provide a fast lookup from an address within + * a chunk to a page descriptor, while avoiding mapping memory for + * page descriptors for unused areas of address space, such as unused + * zone stripes or gaps between those stripes. + */ + +#ifndef sa_h +#define sa_h + +#include "mpmtypes.h" + +typedef struct SparseArrayStruct *SparseArray; + +#define SparseArraySig ((Sig)0x5195BA66) /* SIGnature SParse ARRay */ + +typedef struct SparseArrayStruct { + Sig sig; /* design.mps.sig.field */ + void *base; /* base of array, page aligned */ + Size elementSize; /* size of array elements, <= page size */ + Index length; /* number of elements in the array */ + BT mapped; /* whether elements exist in the array */ + BT pages; /* whether underlying pages are mapped */ + VM vm; /* where pages are mapped from */ + Shift shift; /* SizeLog2(VMPageSize(vm)) TODO: VMShift(vm) */ +} SparseArrayStruct; + +extern void SparseArrayInit(SparseArray sa, + void *base, Size elementSize, Index length, + BT mapped, BT pages, VM vm); +extern void SparseArrayFinish(SparseArray sa); +extern Bool SparseArrayCheck(SparseArray sa); + +#define SparseArrayIsMapped(sa, i) BTGet((sa)->mapped, i) + +extern Res SparseArrayMap(SparseArray sa, Index baseEI, Index limitEI); +extern void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI); + +#endif /* sa_h */ + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sac.c b/mps/code/sac.c new file mode 100644 index 00000000000..25913aca24b --- /dev/null +++ b/mps/code/sac.c @@ -0,0 +1,437 @@ +/* sac.c: SEGREGATED ALLOCATION CACHES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "sac.h" + +SRCID(sac, "$Id$"); + + +typedef _mps_sac_freelist_block_s *SACFreeListBlock; + + +/* SACCheck -- check function for SACs */ + +static Bool sacFreeListBlockCheck(SACFreeListBlock fb) +{ + Count j; + Addr cb; + + /* nothing to check about size */ + CHECKL(fb->_count <= fb->_count_max); + /* check the freelist has the right number of blocks */ + cb = fb->_blocks; + for (j = 0; j < fb->_count; ++j) { + CHECKL(cb != NULL); + /* @@@@ ignoring shields for now */ + cb = *ADDR_PTR(Addr, cb); + } + CHECKL(cb == NULL); + return TRUE; +} + + +/* SAC_LARGE_ITER -- iterate over the large classes (the ones above + * middle), setting the variable j to the index of the class, and i to + * the index of the corresponding free list. + */ +#define SAC_LARGE_ITER(middle, classes, i, j) \ + for (ITER_PARALLEL(j = (middle) + 1, i = 0); \ + j < (classes); \ + ITER_PARALLEL(++j, i += 2)) + + +/* SAC_SMALL_ITER -- iterate over the small classes (middle and + * below), setting the variable j to the index of the class, and i to + * the index of the corresponding free list. + */ +#define SAC_SMALL_ITER(middle, i, j) \ + for (ITER_PARALLEL(j = (middle), i = 1); j > 0; ITER_PARALLEL(--j, i += 2)) + + +ATTRIBUTE_UNUSED +static Bool SACCheck(SAC sac) +{ + Index i, j; + Bool b; + Size prevSize; + mps_sac_t esac; + + CHECKS(SAC, sac); + esac = ExternalSACOfSAC(sac); + CHECKU(Pool, sac->pool); + CHECKL(sac->classesCount > 0); + CHECKL(sac->classesCount > sac->middleIndex); + CHECKL(BoolCheck(esac->_trapped)); + CHECKL(esac->_middle > 0); + /* check classes above middle */ + prevSize = esac->_middle; + SAC_LARGE_ITER(sac->middleIndex, sac->classesCount, i, j) { + CHECKL(prevSize < esac->_freelists[i]._size); + b = sacFreeListBlockCheck(&(esac->_freelists[i])); + if (!b) + return b; + prevSize = esac->_freelists[i]._size; + } + /* check overlarge class */ + CHECKL(prevSize < esac->_freelists[i]._size); + b = sacFreeListBlockCheck(&(esac->_freelists[i])); + if (!b) + return b; + CHECKL(esac->_freelists[i]._size == SizeMAX); + CHECKL(esac->_freelists[i]._count == 0); + CHECKL(esac->_freelists[i]._count_max == 0); + CHECKL(esac->_freelists[i]._blocks == NULL); + /* check classes below middle */ + prevSize = esac->_middle; + SAC_SMALL_ITER(sac->middleIndex, i, j) { + CHECKL(prevSize > esac->_freelists[i]._size); + b = sacFreeListBlockCheck(&(esac->_freelists[i])); + if (!b) + return b; + prevSize = esac->_freelists[i]._size; + } + /* check smallest class */ + CHECKL(prevSize > esac->_freelists[i]._size); + CHECKL(esac->_freelists[i]._size == 0); + b = sacFreeListBlockCheck(&(esac->_freelists[i])); + return b; +} + + +/* sacSize -- calculate size of a SAC structure */ + +static Size sacSize(Index middleIndex, Count classesCount) +{ + Index indexMax; /* max index for the freelist */ + SACStruct dummy; + + if (middleIndex + 1 < classesCount - middleIndex) + indexMax = 2 * (classesCount - middleIndex - 1); + else + indexMax = 1 + 2 * middleIndex; + return PointerOffset(&dummy, &dummy.esac_s._freelists[indexMax+1]); +} + + +/* SACCreate -- create an SAC object */ + +Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, + SACClasses classes) +{ + void *p; + SAC sac; + Res res; + Index i, j; + Index middleIndex; /* index of the size in the middle */ + Size prevSize; + unsigned totalFreq = 0; + mps_sac_t esac; + + AVER(sacReturn != NULL); + AVERT(Pool, pool); + AVER(classesCount > 0); + /* In this cache type, there is no upper limit on classesCount. */ + prevSize = sizeof(Addr) - 1; /* must large enough for freelist link */ + /* @@@@ It would be better to dynamically adjust the smallest class */ + /* to be large enough, but that gets complicated, if you have to */ + /* merge classes because of the adjustment. */ + for (i = 0; i < classesCount; ++i) { + AVER(classes[i].mps_block_size > 0); + AVER(SizeIsAligned(classes[i].mps_block_size, PoolAlignment(pool))); + AVER(prevSize < classes[i].mps_block_size); + prevSize = classes[i].mps_block_size; + /* no restrictions on count */ + /* no restrictions on frequency */ + } + + /* Calculate frequency scale */ + for (i = 0; i < classesCount; ++i) { + unsigned oldFreq = totalFreq; + totalFreq += classes[i].mps_frequency; + AVER(oldFreq <= totalFreq); /* check for overflow */ + UNUSED(oldFreq); /* */ + } + + /* Find middle one */ + totalFreq /= 2; + for (i = 0; i < classesCount; ++i) { + if (totalFreq < classes[i].mps_frequency) + break; + totalFreq -= classes[i].mps_frequency; + } + if (totalFreq <= classes[i].mps_frequency / 2) + middleIndex = i; + else + middleIndex = i + 1; /* there must exist another class at i+1 */ + + /* Allocate SAC */ + res = ControlAlloc(&p, PoolArena(pool), sacSize(middleIndex, classesCount)); + if(res != ResOK) + goto failSACAlloc; + sac = p; + + /* Move classes in place */ + /* It's important this matches SACFind. */ + esac = ExternalSACOfSAC(sac); + SAC_LARGE_ITER(middleIndex, classesCount, i, j) { + esac->_freelists[i]._size = classes[j].mps_block_size; + esac->_freelists[i]._count = 0; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; + esac->_freelists[i]._blocks = NULL; + } + esac->_freelists[i]._size = SizeMAX; + esac->_freelists[i]._count = 0; + esac->_freelists[i]._count_max = 0; + esac->_freelists[i]._blocks = NULL; + SAC_SMALL_ITER(middleIndex, i, j) { + esac->_freelists[i]._size = classes[j-1].mps_block_size; + esac->_freelists[i]._count = 0; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; + esac->_freelists[i]._blocks = NULL; + } + esac->_freelists[i]._size = 0; + esac->_freelists[i]._count = 0; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; + esac->_freelists[i]._blocks = NULL; + + /* finish init */ + esac->_trapped = FALSE; + esac->_middle = classes[middleIndex].mps_block_size; + sac->pool = pool; + sac->classesCount = classesCount; + sac->middleIndex = middleIndex; + sac->sig = SACSig; + AVERT(SAC, sac); + *sacReturn = sac; + return ResOK; + +failSACAlloc: + return res; +} + + +/* SACDestroy -- destroy an SAC object */ + +void SACDestroy(SAC sac) +{ + AVERT(SAC, sac); + SACFlush(sac); + sac->sig = SigInvalid; + ControlFree(PoolArena(sac->pool), sac, + sacSize(sac->middleIndex, sac->classesCount)); +} + + +/* sacFind -- find the index corresponding to size + * + * This function replicates the loop in MPS_SAC_ALLOC_FAST, only with + * added checks. + */ + +static void sacFind(Index *iReturn, Size *blockSizeReturn, + SAC sac, Size size) +{ + Index i, j; + mps_sac_t esac; + + esac = ExternalSACOfSAC(sac); + if (size > esac->_middle) { + i = 0; j = sac->middleIndex + 1; + AVER(j <= sac->classesCount); + while (size > esac->_freelists[i]._size) { + AVER(j < sac->classesCount); + i += 2; ++j; + } + *blockSizeReturn = esac->_freelists[i]._size; + } else { + Size prevSize = esac->_middle; + + i = 1; j = sac->middleIndex; + while (size <= esac->_freelists[i]._size) { + AVER(j > 0); + prevSize = esac->_freelists[i]._size; + i += 2; --j; + } + *blockSizeReturn = prevSize; + } + *iReturn = i; +} + + +/* SACFill -- alloc an object, and perhaps fill the cache */ + +Res SACFill(Addr *p_o, SAC sac, Size size) +{ + Index i; + Count blockCount, j; + Size blockSize; + Addr p, fl; + Res res = ResOK; /* stop compiler complaining */ + mps_sac_t esac; + + AVER(p_o != NULL); + AVERT(SAC, sac); + AVER(size != 0); + esac = ExternalSACOfSAC(sac); + + sacFind(&i, &blockSize, sac, size); + /* Check it's empty (in the future, there will be other cases). */ + AVER(esac->_freelists[i]._count == 0); + + /* Fill 1/3 of the cache for this class. */ + blockCount = esac->_freelists[i]._count_max / 3; + /* Adjust size for the overlarge class. */ + if (blockSize == SizeMAX) + /* .align: align 'cause some classes don't accept unaligned. */ + blockSize = SizeAlignUp(size, PoolAlignment(sac->pool)); + fl = esac->_freelists[i]._blocks; + for (j = 0; j <= blockCount; ++j) { + res = PoolAlloc(&p, sac->pool, blockSize); + if (res != ResOK) + break; + /* @@@@ ignoring shields for now */ + *ADDR_PTR(Addr, p) = fl; fl = p; + } + /* If didn't get any, just return. */ + if (j == 0) { + AVER(res != ResOK); + return res; + } + + /* Take the last one off, and return it. */ + esac->_freelists[i]._count = j - 1; + *p_o = fl; + /* @@@@ ignoring shields for now */ + esac->_freelists[i]._blocks = *ADDR_PTR(Addr, fl); + return ResOK; +} + + +/* sacClassFlush -- discard elements from the cache for a given class + * + * blockCount says how many elements to discard. + */ + +static void sacClassFlush(SAC sac, Index i, Size blockSize, + Count blockCount) +{ + Addr cb, fl; + Count j; + mps_sac_t esac; + + esac = ExternalSACOfSAC(sac); + fl = esac->_freelists[i]._blocks; + for (j = 0; j < blockCount; ++j) { + /* @@@@ ignoring shields for now */ + cb = fl; fl = *ADDR_PTR(Addr, cb); + PoolFree(sac->pool, cb, blockSize); + } + esac->_freelists[i]._count -= blockCount; + esac->_freelists[i]._blocks = fl; +} + + +/* SACEmpty -- free an object, and perhaps empty the cache */ + +void SACEmpty(SAC sac, Addr p, Size size) +{ + Index i; + Size blockSize; + mps_sac_t esac; + + AVERT(SAC, sac); + AVER(p != NULL); + AVER(PoolHasAddr(sac->pool, p)); + AVER(size > 0); + esac = ExternalSACOfSAC(sac); + + sacFind(&i, &blockSize, sac, size); + /* Check it's full (in the future, there will be other cases). */ + AVER(esac->_freelists[i]._count + == esac->_freelists[i]._count_max); + + /* Adjust size for the overlarge class. */ + if (blockSize == SizeMAX) + /* see .align */ + blockSize = SizeAlignUp(size, PoolAlignment(sac->pool)); + if (esac->_freelists[i]._count_max > 0) { + Count blockCount; + + /* Flush 2/3 of the cache for this class. */ + /* Computed as count - count/3, so that the rounding works out right. */ + blockCount = esac->_freelists[i]._count; + blockCount -= esac->_freelists[i]._count / 3; + sacClassFlush(sac, i, blockSize, (blockCount > 0) ? blockCount : 1); + /* Leave the current one in the cache. */ + esac->_freelists[i]._count += 1; + /* @@@@ ignoring shields for now */ + *ADDR_PTR(Addr, p) = esac->_freelists[i]._blocks; + esac->_freelists[i]._blocks = p; + } else { + /* Free even the current one. */ + PoolFree(sac->pool, p, blockSize); + } +} + + +/* SACFlush -- flush the cache, releasing all memory held in it */ + +void SACFlush(SAC sac) +{ + Index i, j; + Size prevSize; + mps_sac_t esac; + + AVERT(SAC, sac); + + esac = ExternalSACOfSAC(sac); + SAC_LARGE_ITER(sac->middleIndex, sac->classesCount, i, j) { + sacClassFlush(sac, i, esac->_freelists[i]._size, + esac->_freelists[i]._count); + AVER(esac->_freelists[i]._blocks == NULL); + } + /* no need to flush overlarge, there's nothing there */ + prevSize = esac->_middle; + SAC_SMALL_ITER(sac->middleIndex, i, j) { + sacClassFlush(sac, i, prevSize, esac->_freelists[i]._count); + AVER(esac->_freelists[i]._blocks == NULL); + prevSize = esac->_freelists[i]._size; + } + /* flush smallest class */ + sacClassFlush(sac, i, prevSize, esac->_freelists[i]._count); + AVER(esac->_freelists[i]._blocks == NULL); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sac.h b/mps/code/sac.h new file mode 100644 index 00000000000..f52303a869f --- /dev/null +++ b/mps/code/sac.h @@ -0,0 +1,82 @@ +/* sac.h: SEGREGATED ALLOCATION CACHES INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef sac_h +#define sac_h + +#include "mpmtypes.h" +#include "mpm.h" /* for PoolArena */ + + +#define sacClassLIMIT ((Count)8) + + +/* SAC -- the real segregated allocation caches */ + +#define SACSig ((Sig)0x5195AC99) /* SIGnature SAC */ + +typedef struct SACStruct *SAC; + +typedef struct SACStruct { + Sig sig; /* design.mps.sig.field */ + Pool pool; + Count classesCount; /* number of classes */ + Index middleIndex; /* index of the middle */ + _mps_sac_s esac_s; /* variable length, must be last */ +} SACStruct; + +#define SACOfExternalSAC(esac) PARENT(SACStruct, esac_s, esac) + +#define ExternalSACOfSAC(sac) (&((sac)->esac_s)) + +#define SACArena(sac) PoolArena((sac)->pool) + + +/* SACClasses -- structure for specifying classes in the cache */ +/* .sacc: This structure must match . */ + +typedef struct mps_sac_classes_s *SACClasses; + + +extern Res SACCreate(SAC *sac_o, Pool pool, Count classesCount, + SACClasses classes); +extern void SACDestroy(SAC sac); +extern Res SACFill(Addr *p_o, SAC sac, Size size); +extern void SACEmpty(SAC sac, Addr p, Size size); +extern void SACFlush(SAC sac); + + +#endif /* sac_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sacss.c b/mps/code/sacss.c new file mode 100644 index 00000000000..2228c756545 --- /dev/null +++ b/mps/code/sacss.c @@ -0,0 +1,261 @@ +/* sacss.c: SAC MANUAL ALLOC STRESS TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + */ + +#include "mpscmvff.h" +#include "mpscmfs.h" +#include "mpslib.h" +#include "mpsavm.h" +#include "mps.h" + +#include "testlib.h" +#include "mpslib.h" + +#include +#include +#include + + +#define TRUE 1 +#define FALSE 0 + +#define testArenaSIZE ((((size_t)64)<<20) - 4) +#define testSetSIZE 200 +#define testLOOPS 10 + + +/* make -- allocate an object */ + +static mps_res_t make(mps_addr_t *p, mps_sac_t sac, size_t size) +{ + mps_res_t res; + + MPS_SAC_ALLOC(res, *p, sac, size, FALSE); + return res; +} + + +/* stress -- create a pool of the requested type and allocate in it */ + +static mps_res_t stress(mps_arena_t arena, mps_align_t align, + size_t (*size)(size_t i), + const char *name, mps_pool_class_t pool_class, + mps_arg_s *args) +{ + mps_res_t res; + mps_pool_t pool; + mps_sac_t sac; + size_t i, k; + int *ps[testSetSIZE]; + size_t ss[testSetSIZE]; + mps_sac_classes_s classes[4] = { + {1, 1, 1}, + {2, 1, 2}, + {16, 9, 5}, + {100, 9, 4}, + }; + size_t classes_count = sizeof classes / sizeof *classes; + for (i = 0; i < classes_count; ++i) { + classes[i].mps_block_size *= alignUp(align, sizeof(void *)); + } + + printf("%s\n", name); + + res = mps_pool_create_k(&pool, arena, pool_class, args); + if (res != MPS_RES_OK) + return res; + + die(mps_sac_create(&sac, pool, classes_count, classes), + "SACCreate"); + + /* allocate a load of objects */ + for (i = 0; i < testSetSIZE; ++i) { + mps_addr_t obj; + ss[i] = (*size)(i); + res = make(&obj, sac, ss[i]); + if (res != MPS_RES_OK) + return res; + ps[i] = obj; + if (ss[i] >= sizeof(ps[i])) + *ps[i] = 1; /* Write something, so it gets swap. */ + } + + mps_pool_check_fenceposts(pool); + + for (k = 0; k < testLOOPS; ++k) { + /* shuffle all the objects */ + for (i=0; i> (i / 10)), 2) + 1; + return size; +} + + +/* fixedSize -- produce always the same size */ + +static size_t fixedSizeSize = 0; + +static size_t fixedSize(size_t i) +{ + testlib_unused(i); + return fixedSizeSize; +} + + +static mps_pool_debug_option_s debugOptions = { + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", + /* .free_size = */ 4 +}; + + +/* testInArena -- test all the pool classes in the given arena */ + +static void testInArena(mps_arena_class_t arena_class, mps_arg_s *arena_args) +{ + mps_arena_t arena; + size_t arena_grain_size = 4096; + + die(mps_arena_create_k(&arena, arena_class, arena_args), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + die(stress(arena, align, randomSize, "MVFF", mps_class_mvff(), args), + "stress MVFF"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = rnd_align(sizeof(void *), arena_grain_size); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &debugOptions); + die(stress(arena, align, randomSize, "MVFF debug", + mps_class_mvff_debug(), args), + "stress MVFF debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + fixedSizeSize = MPS_PF_ALIGN * (1 + rnd() % 100); + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize); + die(stress(arena, fixedSizeSize, fixedSize, "MFS", mps_class_mfs(), args), + "stress MFS"); + } MPS_ARGS_END(args); + + mps_arena_destroy(arena); +} + + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + testInArena(mps_arena_class_vm(), args); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE); + testInArena(mps_arena_class_vm(), args); + } MPS_ARGS_END(args); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/scan.c b/mps/code/scan.c new file mode 100644 index 00000000000..308cffc2c37 --- /dev/null +++ b/mps/code/scan.c @@ -0,0 +1,174 @@ +/* scan.c: SCANNING FUNCTIONS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. + * See end of file for license. + * + * .outside: The code in this file is written as if *outside* the MPS, + * and so is restricted to facilities in the MPS interface. MPS users + * are invited to read this code and use it as a basis for their own + * scanners. See topic "Area Scanners" in the MPS manual. + * + * TODO: Design document. + */ + +#include "mps.h" +#include "mpstd.h" /* for MPS_BUILD_MV */ + + +#ifdef MPS_BUILD_MV +/* MSVC warning 4127 = conditional expression is constant */ +/* Objects to: MPS_SCAN_AREA(1). */ +#pragma warning( disable : 4127 ) +#endif + + +#define MPS_SCAN_AREA(test) \ + MPS_SCAN_BEGIN(ss) { \ + mps_word_t *p = base; \ + while (p < (mps_word_t *)limit) { \ + mps_word_t word = *p; \ + mps_word_t tag_bits = word & mask; \ + if (test) { \ + mps_addr_t ref = (mps_addr_t)(word ^ tag_bits); \ + if (MPS_FIX1(ss, ref)) { \ + mps_res_t res = MPS_FIX2(ss, &ref); \ + if (res != MPS_RES_OK) \ + return res; \ + *p = (mps_word_t)ref | tag_bits; \ + } \ + } \ + ++p; \ + } \ + } MPS_SCAN_END(ss); + + +/* mps_scan_area -- scan contiguous area of references + * + * This is a convenience function for scanning the contiguous area + * [base, limit). I.e., it calls Fix on all words from base up to + * limit, inclusive of base and exclusive of limit. + * + * This scanner is appropriate for use when all words in the area are + * simple untagged references. + */ + +mps_res_t mps_scan_area(mps_ss_t ss, + void *base, void *limit, + void *closure) +{ + mps_word_t mask = 0; + + (void)closure; /* unused */ + + MPS_SCAN_AREA(1); + + return MPS_RES_OK; +} + + +/* mps_scan_area_masked -- scan area masking off tag bits + * + * Like mps_scan_area, but removes tag bits before fixing references, + * and restores them afterwards. + * + * For example, if mask is 7, then this scanner will clear the bottom + * three bits of each word before fixing. + * + * This scanner is useful when all words in the area must be treated + * as references no matter what tag they have. + */ + +mps_res_t mps_scan_area_masked(mps_ss_t ss, + void *base, void *limit, + void *closure) +{ + mps_scan_tag_t tag = closure; + mps_word_t mask = tag->mask; + + MPS_SCAN_AREA(1); + + return MPS_RES_OK; +} + + +/* mps_scan_area_tagged -- scan area selecting by tag + * + * Like mps_scan_area_masked, except only references whose masked bits + * match a particular tag pattern are fixed. + * + * For example, if mask is 7 and pattern is 5, then this scanner will + * only fix words whose low order bits are 0b101. + */ + +mps_res_t mps_scan_area_tagged(mps_ss_t ss, + void *base, void *limit, + void *closure) +{ + mps_scan_tag_t tag = closure; + mps_word_t mask = tag->mask; + mps_word_t pattern = tag->pattern; + + MPS_SCAN_AREA(tag_bits == pattern); + + return MPS_RES_OK; +} + + +/* mps_scan_area_tagged_or_zero -- scan area selecting by tag or zero + * + * Like mps_scan_area_tagged, except references whose masked bits are + * zero are fixed in addition to those that match the pattern. + * + * For example, if mask is 7 and pattern is 3, then this scanner will + * fix words whose low order bits are 0b011 and words whose low order + * bits are 0b000, but not any others. + * + * This scanner is most useful for ambiguously scanning the stack and + * registers when using an optimising C compiler and non-zero tags on + * references, since the compiler is likely to leave untagged + * addresses of objects around which must not be ignored. + */ + +mps_res_t mps_scan_area_tagged_or_zero(mps_ss_t ss, + void *base, void *limit, + void *closure) +{ + mps_scan_tag_t tag = closure; + mps_word_t mask = tag->mask; + mps_word_t pattern = tag->pattern; + + MPS_SCAN_AREA(tag_bits == 0 || tag_bits == pattern); + + return MPS_RES_OK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/seg.c b/mps/code/seg.c new file mode 100644 index 00000000000..9782e43b3c7 --- /dev/null +++ b/mps/code/seg.c @@ -0,0 +1,2244 @@ +/* seg.c: SEGMENTS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: The design for this module is . + * + * PURPOSE + * + * .purpose: This is the implementation of the generic segment + * interface and the segment classes Seg, GCSeg and MutatorSeg. + */ + +#include "tract.h" +#include "mpm.h" + +SRCID(seg, "$Id$"); + + +/* forward declarations */ + +static void SegFinish(Seg seg); + +static Res SegInit(Seg seg, SegClass klass, Pool pool, + Addr base, Size size, ArgList args); + + +/* Generic interface support */ + + +/* SegAlloc -- allocate a segment from the arena */ + +Res SegAlloc(Seg *segReturn, SegClass klass, LocusPref pref, + Size size, Pool pool, ArgList args) +{ + Res res; + Arena arena; + Seg seg; + Addr base; + void *p; + + AVER(segReturn != NULL); + AVERT(SegClass, klass); + AVERT(LocusPref, pref); + AVER(size > (Size)0); + AVERT(Pool, pool); + + arena = PoolArena(pool); + AVERT(Arena, arena); + AVER(SizeIsArenaGrains(size, arena)); + + /* allocate the memory from the arena */ + res = ArenaAlloc(&base, pref, size, pool); + if (res != ResOK) + goto failArena; + + /* allocate the segment object from the control pool */ + res = ControlAlloc(&p, arena, klass->size); + if (res != ResOK) + goto failControl; + seg = p; + + res = SegInit(seg, klass, pool, base, size, args); + if (res != ResOK) + goto failInit; + + EVENT5(SegAlloc, arena, seg, SegBase(seg), size, pool); + *segReturn = seg; + return ResOK; + +failInit: + ControlFree(arena, seg, klass->size); +failControl: + ArenaFree(base, size, pool); +failArena: + EVENT4(SegAllocFail, arena, size, pool, (unsigned)res); + return res; +} + + +/* SegFree -- free a segment to the arena */ + +void SegFree(Seg seg) +{ + Arena arena; + Pool pool; + Addr base; + Size size, structSize; + + AVERT(Seg, seg); + pool = SegPool(seg); + AVERT(Pool, pool); + arena = PoolArena(pool); + AVERT(Arena, arena); + base = SegBase(seg); + size = SegSize(seg); + structSize = ClassOfPoly(Seg, seg)->size; + + SegFinish(seg); + ControlFree(arena, seg, structSize); + ArenaFree(base, size, pool); + + EVENT2(SegFree, arena, seg); +} + + +/* SegInit -- initialize a segment */ + +static Res segAbsInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + Arena arena; + Addr addr, limit; + Tract tract; + + AVER(seg != NULL); + AVERT(Pool, pool); + arena = PoolArena(pool); + AVER(AddrIsArenaGrain(base, arena)); + AVER(SizeIsArenaGrains(size, arena)); + AVERT(ArgList, args); + + NextMethod(Inst, Seg, init)(CouldBeA(Inst, seg)); + + limit = AddrAdd(base, size); + seg->limit = limit; + seg->rankSet = RankSetEMPTY; + seg->white = TraceSetEMPTY; + seg->nailed = TraceSetEMPTY; + seg->grey = TraceSetEMPTY; + seg->pm = AccessSetEMPTY; + seg->sm = AccessSetEMPTY; + seg->defer = WB_DEFER_INIT; + seg->depth = 0; + seg->queued = FALSE; + seg->firstTract = NULL; + RingInit(SegPoolRing(seg)); + + TRACT_FOR(tract, addr, arena, base, limit) { + AVERT(Tract, tract); + AVER(!TractHasSeg(tract)); + AVER(TractPool(tract) == pool); + TRACT_SET_SEG(tract, seg); + if (addr == base) { + AVER(seg->firstTract == NULL); + seg->firstTract = tract; + } + AVER(seg->firstTract != NULL); + } + AVER(addr == seg->limit); + + SetClassOfPoly(seg, CLASS(Seg)); + seg->sig = SegSig; + AVERC(Seg, seg); + + RingAppend(&pool->segRing, SegPoolRing(seg)); + + return ResOK; +} + +static Res SegInit(Seg seg, SegClass klass, Pool pool, Addr base, Size size, ArgList args) +{ + Res res; + + AVERT(SegClass, klass); + + /* Klass specific initialization comes last */ + res = klass->init(seg, pool, base, size, args); + if (res != ResOK) + return res; + + AVERT(Seg, seg); + + return ResOK; +} + + +/* SegFinish -- finish a segment */ + +static void segAbsFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + Arena arena; + Addr addr, limit; + Tract tract; + + AVERT(Seg, seg); + + RingRemove(SegPoolRing(seg)); + + arena = PoolArena(SegPool(seg)); + + /* TODO: It would be good to avoid deprotecting segments eagerly + when we free them, especially if they're going to be + unmapped. This would require tracking of protection independent + of the existence of a SegStruct. */ + if (seg->sm != AccessSetEMPTY) { + ShieldLower(arena, seg, seg->sm); + } + + seg->rankSet = RankSetEMPTY; + + /* See */ + AVER(seg->depth == 0); + if (seg->queued) + ShieldFlush(PoolArena(SegPool(seg))); + AVER(!seg->queued); + + limit = SegLimit(seg); + + TRACT_TRACT_FOR(tract, addr, arena, seg->firstTract, limit) { + AVERT(Tract, tract); + TRACT_UNSET_SEG(tract); + } + AVER(addr == seg->limit); + + RingFinish(SegPoolRing(seg)); + + /* Check that the segment is not exposed, or in the shield */ + /* cache . */ + AVER(seg->depth == 0); + /* Check not shielded or protected (so that pages in hysteresis */ + /* fund are not protected) */ + AVER(seg->sm == AccessSetEMPTY); + AVER(seg->pm == AccessSetEMPTY); + + seg->sig = SigInvalid; + InstFinish(CouldBeA(Inst, seg)); +} + +static void SegFinish(Seg seg) +{ + AVERC(Seg, seg); + Method(Inst, seg, finish)(MustBeA(Inst, seg)); +} + + +/* SegSetGrey -- change the greyness of a segment + * + * Sets the segment greyness to the trace set grey. + */ + +void SegSetGrey(Seg seg, TraceSet grey) +{ + AVERT(Seg, seg); + AVERT(TraceSet, grey); + AVER(grey == TraceSetEMPTY || SegRankSet(seg) != RankSetEMPTY); + + /* Don't dispatch to the class method if there's no actual change in + greyness, or if the segment doesn't contain any references. */ + if (grey != SegGrey(seg) && SegRankSet(seg) != RankSetEMPTY) + Method(Seg, seg, setGrey)(seg, grey); + + EVENT3(SegSetGrey, PoolArena(SegPool(seg)), seg, grey); +} + + +/* SegFlip -- update barriers for trace that's about to flip */ + +void SegFlip(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + + /* Don't dispatch to the class method unless the segment is grey for + the trace that's about to flip, and contains references. */ + if (TraceSetIsMember(SegGrey(seg), trace) && SegRankSet(seg) != RankSetEMPTY) + Method(Seg, seg, flip)(seg, trace); +} + + +/* SegSetWhite -- change the whiteness of a segment + * + * Sets the segment whiteness to the trace set ts. + */ + +void SegSetWhite(Seg seg, TraceSet white) +{ + AVERT(Seg, seg); + AVERT(TraceSet, white); + Method(Seg, seg, setWhite)(seg, white); +} + + +/* SegSetRankSet -- set the rank set of a segment + * + * The caller must set the summary to empty before setting the rank + * set to empty. The caller must set the rank set to non-empty before + * setting the summary to non-empty. + */ + +void SegSetRankSet(Seg seg, RankSet rankSet) +{ + AVERT(Seg, seg); + AVERT(RankSet, rankSet); + AVER(rankSet != RankSetEMPTY || SegSummary(seg) == RefSetEMPTY); + Method(Seg, seg, setRankSet)(seg, rankSet); +} + + +/* SegSetSummary -- change the summary on a segment */ + +void SegSetSummary(Seg seg, RefSet summary) +{ + AVERT(Seg, seg); + AVER(summary == RefSetEMPTY || SegRankSet(seg) != RankSetEMPTY); + +#if defined(REMEMBERED_SET_NONE) + /* Without protection, we can't maintain the remembered set because + there are writes we don't know about. */ + summary = RefSetUNIV; +#endif + + if (summary != SegSummary(seg)) + Method(Seg, seg, setSummary)(seg, summary); +} + + +/* SegSetRankAndSummary -- set both the rank set and the summary */ + +void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary) +{ + AVERT(Seg, seg); + AVERT(RankSet, rankSet); + +#if defined(REMEMBERED_SET_NONE) + if (rankSet != RankSetEMPTY) { + summary = RefSetUNIV; + } +#endif + + Method(Seg, seg, setRankSummary)(seg, rankSet, summary); +} + + +/* SegHasBuffer -- segment has a buffer? */ + +Bool SegHasBuffer(Seg seg) +{ + Buffer buffer; + return SegBuffer(&buffer, seg); +} + + +/* SegBuffer -- get the buffer of a segment */ + +Bool SegBuffer(Buffer *bufferReturn, Seg seg) +{ + AVERT_CRITICAL(Seg, seg); /* .seg.critical */ + return Method(Seg, seg, buffer)(bufferReturn, seg); +} + + +/* SegSetBuffer -- change the buffer on a segment */ + +void SegSetBuffer(Seg seg, Buffer buffer) +{ + AVERT(Seg, seg); + AVERT(Buffer, buffer); + Method(Seg, seg, setBuffer)(seg, buffer); +} + + +/* SegUnsetBuffer -- remove the buffer from a segment */ + +void SegUnsetBuffer(Seg seg) +{ + AVERT(Seg, seg); + Method(Seg, seg, unsetBuffer)(seg); +} + + +/* SegBufferScanLimit -- limit of scannable objects in segment */ + +Addr SegBufferScanLimit(Seg seg) +{ + Addr limit; + Buffer buf; + + AVERT(Seg, seg); + + if (!SegBuffer(&buf, seg)) { + /* Segment is unbuffered: entire segment scannable */ + limit = SegLimit(seg); + } else { + /* Segment is buffered: scannable up to limit of initialized objects. */ + limit = BufferScanLimit(buf); + } + return limit; +} + + +/* SegBufferFill -- allocate to a buffer from a segment */ + +Bool SegBufferFill(Addr *baseReturn, Addr *limitReturn, Seg seg, Size size, + RankSet rankSet) +{ + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Seg, seg); + AVER(size > 0); + AVERT(RankSet, rankSet); + return Method(Seg, seg, bufferFill)(baseReturn, limitReturn, + seg, size, rankSet); +} + + +/* SegDescribe -- describe a segment */ + +static Res segAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + Seg seg = CouldBeA(Seg, inst); + Res res; + Pool pool; + + if (!TESTC(Seg, seg)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + res = NextMethod(Inst, Seg, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + pool = SegPool(seg); + + res = WriteF(stream, depth + 2, + "base $A\n", (WriteFA)SegBase(seg), + "limit $A\n", (WriteFA)SegLimit(seg), + "pool $P ($U)\n", (WriteFP)pool, (WriteFU)pool->serial, + "depth $U\n", seg->depth, + "pm", + seg->pm == AccessSetEMPTY ? " EMPTY" : "", + seg->pm & AccessREAD ? " READ" : "", + seg->pm & AccessWRITE ? " WRITE" : "", + "\n", + "sm", + seg->sm == AccessSetEMPTY ? " EMPTY" : "", + seg->sm & AccessREAD ? " READ" : "", + seg->sm & AccessWRITE ? " WRITE" : "", + "\n", + "grey $B\n", (WriteFB)seg->grey, + "white $B\n", (WriteFB)seg->white, + "nailed $B\n", (WriteFB)seg->nailed, + "rankSet", + seg->rankSet == RankSetEMPTY ? " EMPTY" : "", + BS_IS_MEMBER(seg->rankSet, RankAMBIG) ? " AMBIG" : "", + BS_IS_MEMBER(seg->rankSet, RankEXACT) ? " EXACT" : "", + BS_IS_MEMBER(seg->rankSet, RankFINAL) ? " FINAL" : "", + BS_IS_MEMBER(seg->rankSet, RankWEAK) ? " WEAK" : "", + "\n", + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + +Res SegDescribe(Seg seg, mps_lib_FILE *stream, Count depth) +{ + return Method(Inst, seg, describe)(MustBeA(Inst, seg), stream, depth); +} + + +/* .seg.critical: These seg functions are low-level and used + * through-out. They are therefore on the + * [critical path](../design/critical-path.txt) and their AVERs are so-marked. + */ + +/* SegBase -- return the base address of a seg */ + +Addr (SegBase)(Seg seg) +{ + AVERT_CRITICAL(Seg, seg); + return SegBase(seg); +} + + +/* SegLimit -- return the limit address of a segment */ + +Addr (SegLimit)(Seg seg) +{ + AVERT_CRITICAL(Seg, seg); + return SegLimit(seg); +} + + +/* SegSize -- return the size of a seg */ + +Size SegSize(Seg seg) +{ + AVERT_CRITICAL(Seg, seg); + return AddrOffset(SegBase(seg), SegLimit(seg)); +} + + +/* SegOfAddr -- return the seg the given address is in, if any */ + +Bool SegOfAddr(Seg *segReturn, Arena arena, Addr addr) +{ + Tract tract; + AVER_CRITICAL(segReturn != NULL); /* .seg.critical */ + AVERT_CRITICAL(Arena, arena); /* .seg.critical */ + if (TractOfAddr(&tract, arena, addr)) { + return TRACT_SEG(segReturn, tract); + } else { + return FALSE; + } +} + + +/* SegFirst -- return the first seg in the arena + * + * This is used to start an iteration over all segs in the arena. + */ + +static Bool PoolFirst(Pool *poolReturn, Arena arena) +{ + AVER(poolReturn != NULL); + AVERT(Arena, arena); + if (RingIsSingle(ArenaPoolRing(arena))) + return FALSE; + *poolReturn = PoolOfArenaRing(RingNext(ArenaPoolRing(arena))); + return TRUE; +} + +static Bool PoolNext(Pool *poolReturn, Arena arena, Pool pool) +{ + /* Was that the last pool? */ + if (RingNext(PoolArenaRing(pool)) == ArenaPoolRing(arena)) + return FALSE; + *poolReturn = PoolOfArenaRing(RingNext(PoolArenaRing(pool))); + return TRUE; +} + +static Bool PoolWithSegs(Pool *poolReturn, Arena arena, Pool pool) +{ + AVER(poolReturn != NULL); + AVERT(Arena, arena); + AVERT(Pool, pool); + + while (RingIsSingle(PoolSegRing(pool))) + if (!PoolNext(&pool, arena, pool)) + return FALSE; + + *poolReturn = pool; + return TRUE; +} + +Bool SegFirst(Seg *segReturn, Arena arena) +{ + Pool pool; + + AVER(segReturn != NULL); + AVERT(Arena, arena); + + if (!PoolFirst(&pool, arena) || /* unlikely, but still... */ + !PoolWithSegs(&pool, arena, pool)) + return FALSE; + + *segReturn = SegOfPoolRing(RingNext(PoolSegRing(pool))); + return TRUE; +} + + +/* SegNext -- return the "next" seg in the arena + * + * This is used as the iteration step when iterating over all + * segs in the arena. + * + * Pool is the pool of the previous segment, and next is the + * RingNext(SegPoolRing(seg)) of the previous segment. This allows for + * segment deletion during iteration. + */ + +Bool SegNextOfRing(Seg *segReturn, Arena arena, Pool pool, Ring next) +{ + AVER_CRITICAL(segReturn != NULL); /* .seg.critical */ + AVERT_CRITICAL(Arena, arena); + AVERT_CRITICAL(Pool, pool); + AVERT_CRITICAL(Ring, next); + + if (next == PoolSegRing(pool)) { + if (!PoolNext(&pool, arena, pool) || + !PoolWithSegs(&pool, arena, pool)) + return FALSE; + *segReturn = SegOfPoolRing(RingNext(PoolSegRing(pool))); + return TRUE; + } + + *segReturn = SegOfPoolRing(next); + return TRUE; +} + +Bool SegNext(Seg *segReturn, Arena arena, Seg seg) +{ + AVERT_CRITICAL(Seg, seg); + return SegNextOfRing(segReturn, arena, + SegPool(seg), RingNext(SegPoolRing(seg))); +} + + +/* SegMerge -- Merge two adjacent segments + * + * + */ + +Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi) +{ + SegClass klass; + Addr base, mid, limit; + Arena arena; + Res res; + + AVER(NULL != mergedSegReturn); + AVERT(Seg, segLo); + AVERT(Seg, segHi); + klass = ClassOfPoly(Seg, segLo); + AVER(ClassOfPoly(Seg, segHi) == klass); + AVER(SegPool(segLo) == SegPool(segHi)); + base = SegBase(segLo); + mid = SegLimit(segLo); + limit = SegLimit(segHi); + AVER(SegBase(segHi) == SegLimit(segLo)); + arena = PoolArena(SegPool(segLo)); + + if (segLo->queued || segHi->queued) + ShieldFlush(arena); /* see */ + + /* Invoke class-specific methods to do the merge */ + res = Method(Seg, segLo, merge)(segLo, segHi, base, mid, limit); + if (ResOK != res) + goto failMerge; + + EVENT2(SegMerge, segLo, segHi); + /* Deallocate segHi object */ + ControlFree(arena, segHi, klass->size); + AVERT(Seg, segLo); + *mergedSegReturn = segLo; + return ResOK; + +failMerge: + AVERT(Seg, segLo); /* check original segs are still valid */ + AVERT(Seg, segHi); + return res; +} + + +/* SegSplit -- Split a segment + * + * The segment is split at the indicated position. + * + */ + +Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at) +{ + Addr base, limit; + SegClass klass; + Seg segNew; + Arena arena; + Res res; + void *p; + Buffer buffer; + + AVER(NULL != segLoReturn); + AVER(NULL != segHiReturn); + AVERT(Seg, seg); + klass = ClassOfPoly(Seg, seg); + arena = PoolArena(SegPool(seg)); + base = SegBase(seg); + limit = SegLimit(seg); + AVERT(Arena, arena); + AVER(AddrIsArenaGrain(at, arena)); + AVER(at > base); + AVER(at < limit); + + /* Can only split a buffered segment if the entire buffer is below + * the split point. */ + AVER(!SegBuffer(&buffer, seg) || BufferLimit(buffer) <= at); + + if (seg->queued) + ShieldFlush(arena); /* see */ + AVER(SegSM(seg) == SegPM(seg)); + + /* Allocate the new segment object from the control pool */ + res = ControlAlloc(&p, arena, klass->size); + if (ResOK != res) + goto failControl; + segNew = p; + + /* Invoke class-specific methods to do the split */ + res = Method(Seg, seg, split)(seg, segNew, base, at, limit); + if (ResOK != res) + goto failSplit; + + EVENT4(SegSplit, seg, segNew, seg, at); + AVERT(Seg, seg); + AVERT(Seg, segNew); + *segLoReturn = seg; + *segHiReturn = segNew; + return ResOK; + +failSplit: + ControlFree(arena, segNew, klass->size); +failControl: + AVERT(Seg, seg); /* check the original seg is still valid */ + return res; +} + + +/* SegAccess -- mutator read/write access to a segment */ + +Res SegAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context) +{ + AVERT(Seg, seg); + AVERT(Arena, arena); + AVER(arena == PoolArena(SegPool(seg))); + AVER(SegBase(seg) <= addr); + AVER(addr < SegLimit(seg)); + AVERT(AccessSet, mode); + AVERT(MutatorContext, context); + + return Method(Seg, seg, access)(seg, arena, addr, mode, context); +} + + +/* SegWhiten -- whiten objects */ + +Res SegWhiten(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + return Method(Seg, seg, whiten)(seg, trace); +} + + +/* SegGreyen -- greyen non-white objects */ + +void SegGreyen(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + Method(Seg, seg, greyen)(seg, trace); +} + + +/* SegBlacken -- blacken grey objects without scanning */ + +void SegBlacken(Seg seg, TraceSet traceSet) +{ + AVERT(Seg, seg); + AVERT(TraceSet, traceSet); + Method(Seg, seg, blacken)(seg, traceSet); +} + + +/* SegScan -- scan a segment */ + +Res SegScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + AVER(totalReturn != NULL); + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(PoolArena(SegPool(seg)) == ss->arena); + + /* We check that either ss->rank is in the segment's + * ranks, or that ss->rank is exact. The check is more complicated if + * we actually have multiple ranks in a seg. + * See */ + AVER(ss->rank == RankEXACT || RankSetIsMember(SegRankSet(seg), ss->rank)); + + EVENT5(SegScan, seg, SegPool(seg), ss->arena, ss->traces, ss->rank); + return Method(Seg, seg, scan)(totalReturn, seg, ss); +} + + +/* SegFix* -- fix a reference to an object in this segment + * + * . + */ + +Res SegFix(Seg seg, ScanState ss, Addr *refIO) +{ + AVERT_CRITICAL(Seg, seg); + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(refIO != NULL); + + /* Should only be fixing references to white segments. */ + AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); + + return Method(Seg, seg, fix)(seg, ss, refIO); +} + +Res SegFixEmergency(Seg seg, ScanState ss, Addr *refIO) +{ + Res res; + + AVERT_CRITICAL(Seg, seg); + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(refIO != NULL); + + /* Should only be fixing references to white segments. */ + AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); + + res = Method(Seg, seg, fixEmergency)(seg, ss, refIO); + AVER_CRITICAL(res == ResOK); + return res; +} + + +/* SegReclaim -- reclaim a segment */ + +void SegReclaim(Seg seg, Trace trace) +{ + AVERT_CRITICAL(Seg, seg); + AVERT_CRITICAL(Trace, trace); + AVER_CRITICAL(PoolArena(SegPool(seg)) == trace->arena); + + /* There shouldn't be any grey things left for this trace. */ + AVER_CRITICAL(!TraceSetIsMember(SegGrey(seg), trace)); + /* Should only be reclaiming segments which are still white. */ + AVER_CRITICAL(TraceSetIsMember(SegWhite(seg), trace)); + + EVENT4(SegReclaim, trace->arena, SegPool(seg), trace, seg); + Method(Seg, seg, reclaim)(seg, trace); +} + + +/* SegWalk -- walk objects in this segment */ + +void SegWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AVERT(Seg, seg); + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary values, hence can't be checked. */ + + Method(Seg, seg, walk)(seg, format, f, p, s); +} + + +/* Class Seg -- The most basic segment class + * + * .seg.method.check: Many seg methods are lightweight and used + * frequently. Their parameters are checked by the corresponding + * dispatching function, and so the their parameter AVERs are + * marked as critical. + */ + + +/* SegCheck -- check the integrity of a segment */ + +Bool SegCheck(Seg seg) +{ + Arena arena; + Pool pool; + + CHECKS(Seg, seg); + CHECKC(Seg, seg); + CHECKL(TraceSetCheck(seg->white)); + + /* can't assume nailed is subset of white - mightn't be during whiten */ + /* CHECKL(TraceSetSub(seg->nailed, seg->white)); */ + CHECKL(TraceSetCheck(seg->grey)); + CHECKD_NOSIG(Tract, seg->firstTract); + pool = SegPool(seg); + CHECKU(Pool, pool); + arena = PoolArena(pool); + CHECKU(Arena, arena); + CHECKL(AddrIsArenaGrain(TractBase(seg->firstTract), arena)); + CHECKL(AddrIsArenaGrain(seg->limit, arena)); + CHECKL(seg->limit > TractBase(seg->firstTract)); + /* CHECKL(BoolCheck(seq->queued)); */ + + /* Each tract of the segment must agree about the segment and its + * pool. Note that even if the CHECKs are compiled away there is + * still a significant cost in looping over the tracts, hence the + * guard. See job003778. */ +#if defined(AVER_AND_CHECK_ALL) + { + Tract tract; + Addr addr; + TRACT_TRACT_FOR(tract, addr, arena, seg->firstTract, seg->limit) { + Seg trseg = NULL; /* suppress compiler warning */ + + CHECKD_NOSIG(Tract, tract); + CHECKL(TRACT_SEG(&trseg, tract)); + CHECKL(trseg == seg); + CHECKL(TractPool(tract) == pool); + } + CHECKL(addr == seg->limit); + } +#endif /* AVER_AND_CHECK_ALL */ + + /* The segment must belong to some pool, so it should be on a */ + /* pool's segment ring. (Actually, this isn't true just after */ + /* the segment is initialized.) */ + /* CHECKL(RingNext(&seg->poolRing) != &seg->poolRing); */ + + CHECKD_NOSIG(Ring, &seg->poolRing); + + /* Shield invariants -- see . */ + + /* The protection mode is never more than the shield mode + . */ + CHECKL(BS_DIFF(seg->pm, seg->sm) == 0); + + /* All unsynced segments have positive depth or are in the queue + . */ + CHECKL(seg->sm == seg->pm || seg->depth > 0 || seg->queued); + + CHECKL(RankSetCheck(seg->rankSet)); + if (seg->rankSet == RankSetEMPTY) { + /* : If there are no refs */ + /* in the segment then it cannot contain black or grey refs. */ + CHECKL(seg->grey == TraceSetEMPTY); + CHECKL(seg->sm == AccessSetEMPTY); + CHECKL(seg->pm == AccessSetEMPTY); + } else { + /* : The Tracer only permits */ + /* one rank per segment [ref?] so this field is either empty or a */ + /* singleton. */ + CHECKL(RankSetIsSingle(seg->rankSet)); + /* Can't check barrier invariants because SegCheck is called */ + /* when raising or lowering the barrier. */ + /* .check.wb: If summary isn't universal then it must be */ + /* write shielded. */ + /* CHECKL(seg->_summary == RefSetUNIV || (seg->_sm & AccessWRITE)); */ + /* @@@@ What can be checked about the read barrier? */ + /* TODO: Need gcSegCheck? What does RankSet imply about being a gcSeg? */ + } + return TRUE; +} + + +/* segNoSetGrey -- non-method to change the greyness of a segment */ + +static void segNoSetGrey(Seg seg, TraceSet grey) +{ + AVERT(Seg, seg); + AVERT(TraceSet, grey); + AVER(seg->rankSet != RankSetEMPTY); + NOTREACHED; +} + + +/* segTrivFlip -- ignore trace that's about to flip */ + +static void segTrivFlip(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(seg->rankSet != RankSetEMPTY); +} + + +/* segNoSetWhite -- non-method to change the whiteness of a segment */ + +static void segNoSetWhite(Seg seg, TraceSet white) +{ + AVERT(Seg, seg); + AVERT(TraceSet, white); + NOTREACHED; +} + + +/* segNoSetRankSet -- non-method to set the rank set of a segment */ + +static void segNoSetRankSet(Seg seg, RankSet rankSet) +{ + AVERT(Seg, seg); + AVERT(RankSet, rankSet); + NOTREACHED; +} + + +/* segNoSetSummary -- non-method to set the summary of a segment */ + +static void segNoSetSummary(Seg seg, RefSet summary) +{ + AVERT(Seg, seg); + UNUSED(summary); + NOTREACHED; +} + + +/* segNoSetRankSummary -- non-method to set the rank set & summary */ + +static void segNoSetRankSummary(Seg seg, RankSet rankSet, RefSet summary) +{ + AVERT(Seg, seg); + AVERT(RankSet, rankSet); + UNUSED(summary); + NOTREACHED; +} + + +/* segNoBuffer -- non-method to return the buffer of a segment */ + +static Bool segNoBuffer(Buffer *bufferReturn, Seg seg) +{ + AVERT(Seg, seg); + AVER(bufferReturn != NULL); + NOTREACHED; + return FALSE; +} + + +/* segNoSetBuffer -- non-method to set the buffer of a segment */ + +static void segNoSetBuffer(Seg seg, Buffer buffer) +{ + AVERT(Seg, seg); + AVERT(Buffer, buffer); + NOTREACHED; +} + + +/* segNoSetBuffer -- non-method to set the buffer of a segment */ + +static void segNoUnsetBuffer(Seg seg) +{ + AVERT(Seg, seg); + NOTREACHED; +} + + +/* segNoBufferFill -- non-method to fill buffer from segment */ + +static Bool segNoBufferFill(Addr *baseReturn, Addr *limitReturn, + Seg seg, Size size, RankSet rankSet) +{ + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + AVERT(Seg, seg); + AVER(size > 0); + AVERT(RankSet, rankSet); + NOTREACHED; + return FALSE; +} + + +/* segNoBufferEmpty -- non-method to empty buffer to segment */ + +static void segNoBufferEmpty(Seg seg, Buffer buffer) +{ + AVERT(Seg, seg); + AVERT(Buffer, buffer); + NOTREACHED; +} + + +/* segNoMerge -- merge method for segs which don't support merge */ + +static Res segNoMerge(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + AVERT(Seg, seg); + AVERT(Seg, segHi); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == mid); + AVER(SegBase(segHi) == mid); + AVER(SegLimit(segHi) == limit); + NOTREACHED; + return ResFAIL; +} + + +/* segTrivMerge -- Basic Seg merge method + * + * .similar: Segments must be "sufficiently similar". + * + */ + +static Res segTrivMerge(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + Pool pool; + Arena arena; + Tract tract; + Addr addr; + + AVERT(Seg, seg); + AVERT(Seg, segHi); + pool = SegPool(seg); + arena = PoolArena(pool); + AVER(AddrIsArenaGrain(base, arena)); + AVER(AddrIsArenaGrain(mid, arena)); + AVER(AddrIsArenaGrain(limit, arena)); + AVER(base < mid); + AVER(mid < limit); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == mid); + AVER(SegBase(segHi) == mid); + AVER(SegLimit(segHi) == limit); + + /* .similar. */ + AVER(seg->rankSet == segHi->rankSet); + AVER(seg->white == segHi->white); + AVER(seg->nailed == segHi->nailed); + AVER(seg->grey == segHi->grey); + AVER(seg->pm == segHi->pm); + AVER(seg->sm == segHi->sm); + AVER(seg->depth == segHi->depth); + AVER(seg->queued == segHi->queued); + /* Neither segment may be exposed, or in the shield cache */ + /* & */ + AVER(seg->depth == 0); + AVER(!seg->queued); + + /* no need to update fields which match. See .similar */ + + seg->limit = limit; + TRACT_FOR(tract, addr, arena, mid, limit) { + AVERT(Tract, tract); + AVER(segHi == TractSeg(tract)); + AVER(TractPool(tract) == pool); + TRACT_SET_SEG(tract, seg); + } + AVER(addr == seg->limit); + + /* Finish segHi. */ + RingRemove(SegPoolRing(segHi)); + RingFinish(SegPoolRing(segHi)); + segHi->sig = SigInvalid; + + AVERT(Seg, seg); + return ResOK; +} + + +/* segNoSplit -- split method for segs which don't support splitting */ + +static Res segNoSplit(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + AVERT(Seg, seg); + AVER(segHi != NULL); /* can't check fully, it's not initialized */ + AVER(base < mid); + AVER(mid < limit); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == limit); + NOTREACHED; + return ResFAIL; +} + + +/* segTrivSplit -- Basic Seg split method */ + +static Res segTrivSplit(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + Pool pool = SegPool(MustBeA(Seg, seg)); + Arena arena = PoolArena(pool); + SegClass klass; + Tract tract; + Addr addr; + + AVER(segHi != NULL); /* can't check fully, it's not initialized */ + AVER(AddrIsArenaGrain(base, arena)); + AVER(AddrIsArenaGrain(mid, arena)); + AVER(AddrIsArenaGrain(limit, arena)); + AVER(base < mid); + AVER(mid < limit); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == limit); + + /* Segment may not be exposed, or in the shield queue */ + /* & */ + AVER(seg->depth == 0); + AVER(!seg->queued); + + /* Full initialization for segHi. Just modify seg. */ + seg->limit = mid; + AVERT(Seg, seg); + + InstInit(CouldBeA(Inst, segHi)); + segHi->limit = limit; + segHi->rankSet = seg->rankSet; + segHi->white = seg->white; + segHi->nailed = seg->nailed; + segHi->grey = seg->grey; + segHi->pm = seg->pm; + segHi->sm = seg->sm; + segHi->depth = seg->depth; + segHi->queued = seg->queued; + segHi->firstTract = NULL; + RingInit(SegPoolRing(segHi)); + + TRACT_FOR(tract, addr, arena, mid, limit) { + AVERT(Tract, tract); + AVER(seg == TractSeg(tract)); + AVER(TractPool(tract) == pool); + TRACT_SET_SEG(tract, segHi); + if (addr == mid) { + AVER(segHi->firstTract == NULL); + segHi->firstTract = tract; + } + AVER(segHi->firstTract != NULL); + } + AVER(addr == segHi->limit); + + klass = ClassOfPoly(Seg, seg); + SetClassOfPoly(segHi, klass); + segHi->sig = SegSig; + AVERC(Seg, segHi); + + RingAppend(&pool->segRing, SegPoolRing(segHi)); + + return ResOK; +} + + +/* segNoAccess -- access method for non-GC segs + * + * Should be used (for the access method) by segment classes which do + * not expect to ever have pages which the mutator will fault on. That + * is, no protected pages, or only pages which are inaccessible by the + * mutator are protected. + */ +static Res segNoAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context) +{ + AVERT(Seg, seg); + AVERT(Arena, arena); + AVER(SegBase(seg) <= addr); + AVER(addr < SegLimit(seg)); + AVERT(AccessSet, mode); + AVERT(MutatorContext, context); + UNUSED(mode); + UNUSED(context); + + NOTREACHED; + return ResUNIMPL; +} + + +/* SegWholeAccess + * + * See also SegSingleAccess + * + * Should be used (for the access method) by segment classes which + * intend to handle page faults by scanning the entire segment and + * lowering the barrier. + */ +Res SegWholeAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context) +{ + AVERT(Seg, seg); + AVERT(Arena, arena); + AVER(arena == PoolArena(SegPool(seg))); + AVER(SegBase(seg) <= addr); + AVER(addr < SegLimit(seg)); + AVERT(AccessSet, mode); + AVERT(MutatorContext, context); + + UNUSED(addr); + UNUSED(context); + TraceSegAccess(arena, seg, mode); + return ResOK; +} + + +/* SegSingleAccess + * + * See also ArenaRead, and SegWhileAccess. + * + * Handles page faults by attempting emulation. If the faulting + * instruction cannot be emulated then this function returns ResFAIL. + * + * Due to the assumptions made below, segment classes should only use + * this function if all words in an object are tagged or traceable. + * + * .single-access.assume.ref: It currently assumes that the address + * being faulted on contains a plain reference or a tagged + * non-reference. + * + * .single-access.improve.format: Later this will be abstracted + * through the client object format interface, so that no such + * assumption is necessary. + */ +Res SegSingleAccess(Seg seg, Arena arena, Addr addr, + AccessSet mode, MutatorContext context) +{ + AVERT(Seg, seg); + AVERT(Arena, arena); + AVER(arena == PoolArena(SegPool(seg))); + AVER(SegBase(seg) <= addr); + AVER(addr < SegLimit(seg)); + AVERT(AccessSet, mode); + AVERT(MutatorContext, context); + + if (MutatorContextCanStepInstruction(context)) { + Ref ref; + Res res; + + ShieldExpose(arena, seg); + + if(mode & SegSM(seg) & AccessREAD) { + /* Read access. */ + /* .single-access.assume.ref */ + /* .single-access.improve.format */ + ref = *(Ref *)addr; + /* .tagging: Check that the reference is aligned to a word boundary */ + /* (we assume it is not a reference otherwise). */ + if(WordIsAligned((Word)ref, sizeof(Word))) { + Rank rank; + /* See the note in TraceRankForAccess */ + /* . */ + + rank = TraceRankForAccess(arena, seg); + TraceScanSingleRef(arena->flippedTraces, rank, arena, + seg, (Ref *)addr); + } + } + res = MutatorContextStepInstruction(context); + AVER(res == ResOK); + + /* Update SegSummary according to the possibly changed reference. */ + ref = *(Ref *)addr; + /* .tagging: ought to check the reference for a tag. But + * this is conservative. */ + SegSetSummary(seg, RefSetAdd(arena, SegSummary(seg), ref)); + + ShieldCover(arena, seg); + + return ResOK; + } else { + /* couldn't single-step instruction */ + return ResFAIL; + } +} + + +/* segNoWhiten -- whiten method for non-GC segs */ + +static Res segNoWhiten(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + NOTREACHED; + return ResUNIMPL; +} + + +/* segNoGreyen -- greyen method for non-GC segs */ + +static void segNoGreyen(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + NOTREACHED; +} + + +/* segNoBlacken -- blacken method for non-GC segs */ + +static void segNoBlacken(Seg seg, TraceSet traceSet) +{ + AVERT(Seg, seg); + AVERT(TraceSet, traceSet); + NOTREACHED; +} + + +/* segNoScan -- scan method for non-GC segs */ + +static Res segNoScan(Bool *totalReturn, Seg seg, ScanState ss) +{ + AVER(totalReturn != NULL); + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(PoolArena(SegPool(seg)) == ss->arena); + NOTREACHED; + return ResUNIMPL; +} + + +/* segNoFix -- fix method for non-GC segs */ + +static Res segNoFix(Seg seg, ScanState ss, Ref *refIO) +{ + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(refIO != NULL); + NOTREACHED; + return ResUNIMPL; +} + + +/* segNoReclaim -- reclaim method for non-GC segs */ + +static void segNoReclaim(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + NOTREACHED; +} + + +/* segTrivWalk -- walk method for non-formatted segs */ + +static void segTrivWalk(Seg seg, Format format, FormattedObjectsVisitor f, + void *p, size_t s) +{ + AVERT(Seg, seg); + AVERT(Format, format); + AVER(FUNCHECK(f)); + /* p and s are arbitrary, hence can't be checked */ + UNUSED(p); + UNUSED(s); + NOOP; +} + + +/* Class GCSeg -- collectable segment class */ + +/* GCSegCheck -- check the integrity of a GCSeg */ + +Bool GCSegCheck(GCSeg gcseg) +{ + Seg seg; + CHECKS(GCSeg, gcseg); + seg = &gcseg->segStruct; + CHECKD(Seg, seg); + + if (gcseg->buffer != NULL) { + CHECKU(Buffer, gcseg->buffer); + /* */ + CHECKL(BufferPool(gcseg->buffer) == SegPool(seg)); + CHECKL(BufferRankSet(gcseg->buffer) == SegRankSet(seg)); + } + + /* The segment should be on a grey ring if and only if it is grey. */ + CHECKD_NOSIG(Ring, &gcseg->greyRing); + CHECKL((seg->grey == TraceSetEMPTY) == + RingIsSingle(&gcseg->greyRing)); + + if (seg->rankSet == RankSetEMPTY) { + /* */ + CHECKL(gcseg->summary == RefSetEMPTY); + } + + CHECKD_NOSIG(Ring, &gcseg->genRing); + + return TRUE; +} + + +/* gcSegInit -- method to initialize a GC segment */ + +static Res gcSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + GCSeg gcseg; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, GCSeg, init)(seg, pool, base, size, args); + if (ResOK != res) + return res; + gcseg = CouldBeA(GCSeg, seg); + + gcseg->summary = RefSetEMPTY; + gcseg->buffer = NULL; + RingInit(&gcseg->greyRing); + RingInit(&gcseg->genRing); + + SetClassOfPoly(seg, CLASS(GCSeg)); + gcseg->sig = GCSegSig; + AVERC(GCSeg, gcseg); + + return ResOK; +} + + +/* gcSegFinish -- finish a GC segment */ + +static void gcSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + GCSeg gcseg = MustBeA(GCSeg, seg); + + if (SegGrey(seg) != TraceSetEMPTY) { + RingRemove(&gcseg->greyRing); + seg->grey = TraceSetEMPTY; + } + + EVENT5(SegSetSummary, PoolArena(SegPool(seg)), seg, SegSize(seg), + gcseg->summary, RefSetEMPTY); + + gcseg->summary = RefSetEMPTY; + + gcseg->sig = SigInvalid; + + /* Don't leave a dangling buffer allocating into hyperspace. */ + AVER(gcseg->buffer == NULL); /* */ + + RingFinish(&gcseg->greyRing); + RingFinish(&gcseg->genRing); + + /* finish the superclass fields last */ + NextMethod(Inst, GCSeg, finish)(inst); +} + + +/* gcSegSetGreyInternal -- change the greyness of a segment + * + * Internal method for updating the greyness of a GCSeg. + * Updates the grey ring and the grey seg count. + * Doesn't affect the shield (so it can be used by split + * & merge methods). + */ + +static void gcSegSetGreyInternal(Seg seg, TraceSet oldGrey, TraceSet grey) +{ + GCSeg gcseg; + Arena arena; + Rank rank; + + /* Internal method. Parameters are checked by caller */ + gcseg = SegGCSeg(seg); + arena = PoolArena(SegPool(seg)); + seg->grey = BS_BITFIELD(Trace, grey); + + /* If the segment is now grey and wasn't before, add it to the */ + /* appropriate grey list so that TraceFindGrey can locate it */ + /* quickly later. If it is no longer grey and was before, */ + /* remove it from the list. */ + if (oldGrey == TraceSetEMPTY) { + if (grey != TraceSetEMPTY) { + AVER(RankSetIsSingle(seg->rankSet)); + for(rank = RankMIN; rank < RankLIMIT; ++rank) + if (RankSetIsMember(seg->rankSet, rank)) { + /* NOTE: We push the segment onto the front of the queue, so that + we preserve some locality of scanning, and so that we tend to + forward objects that are closely linked to the same or nearby + segments. */ + RingInsert(ArenaGreyRing(arena, rank), &gcseg->greyRing); + break; + } + AVER(rank != RankLIMIT); /* there should've been a match */ + } + } else { + if (grey == TraceSetEMPTY) + RingRemove(&gcseg->greyRing); + } + + STATISTIC({ + TraceId ti; Trace trace; + TraceSet diff; + + diff = TraceSetDiff(grey, oldGrey); + TRACE_SET_ITER(ti, trace, diff, arena) + ++trace->greySegCount; + if (trace->greySegCount > trace->greySegMax) + trace->greySegMax = trace->greySegCount; + TRACE_SET_ITER_END(ti, trace, diff, arena); + + diff = TraceSetDiff(oldGrey, grey); + TRACE_SET_ITER(ti, trace, diff, arena) + --trace->greySegCount; + TRACE_SET_ITER_END(ti, trace, diff, arena); + }); +} + + +/* gcSegSetGrey -- GCSeg method to change the greyness of a segment + * + * Sets the segment greyness to the trace set grey. + */ + +static void gcSegSetGrey(Seg seg, TraceSet grey) +{ + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + AVERT_CRITICAL(TraceSet, grey); /* .seg.method.check */ + AVER_CRITICAL(seg->rankSet != RankSetEMPTY); + + gcSegSetGreyInternal(seg, seg->grey, grey); /* do the work */ +} + + +/* mutatorSegSetGrey -- MutatorSeg method to change greyness of segment + * + * As gcSegSetGrey, but also raise or lower the read barrier. + */ + +static void mutatorSegSetGrey(Seg seg, TraceSet grey) +{ + TraceSet oldGrey, flippedTraces; + Arena arena; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + + oldGrey = seg->grey; + + NextMethod(Seg, MutatorSeg, setGrey)(seg, grey); + + /* The read barrier is raised when the segment is grey for */ + /* some _flipped_ trace, i.e., is grey for a trace for which */ + /* the mutator is black. */ + arena = PoolArena(SegPool(seg)); + flippedTraces = arena->flippedTraces; + if (TraceSetInter(oldGrey, flippedTraces) == TraceSetEMPTY) { + if (TraceSetInter(grey, flippedTraces) != TraceSetEMPTY) + ShieldRaise(arena, seg, AccessREAD); + } else { + if (TraceSetInter(grey, flippedTraces) == TraceSetEMPTY) + ShieldLower(arena, seg, AccessREAD); + } +} + +/* mutatorSegFlip -- update barriers for a trace that's about to flip */ + +static void mutatorSegFlip(Seg seg, Trace trace) +{ + TraceSet flippedTraces; + Arena arena; + + NextMethod(Seg, MutatorSeg, flip)(seg, trace); + + arena = PoolArena(SegPool(seg)); + flippedTraces = arena->flippedTraces; + AVER(!TraceSetIsMember(flippedTraces, trace)); + + /* Raise the read barrier if the segment was not grey for any + currently flipped trace. */ + if (TraceSetInter(SegGrey(seg), flippedTraces) == TraceSetEMPTY) { + ShieldRaise(arena, seg, AccessREAD); + } else { + /* If the segment is grey for some currently flipped trace then + the read barrier must already have been raised, either in this + method or in mutatorSegSetGrey. */ + AVER(SegSM(seg) & AccessREAD); + } +} + + +/* gcSegSetWhite -- GCSeg method to change whiteness of a segment + * + * Sets the segment whiteness to the trace set ts. + */ + +static void gcSegSetWhite(Seg seg, TraceSet white) +{ + GCSeg gcseg; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + AVERT_CRITICAL(TraceSet, white); /* .seg.method.check */ + gcseg = SegGCSeg(seg); + AVERT_CRITICAL(GCSeg, gcseg); + AVER_CRITICAL(&gcseg->segStruct == seg); + + seg->white = BS_BITFIELD(Trace, white); +} + + +/* gcSegSetRankSet -- GCSeg method to set the rank set of a segment + * + * The caller must set the summary to empty before setting the rank + * set to empty. The caller must set the rank set to non-empty before + * setting the summary to non-empty. + */ + +static void gcSegSetRankSet(Seg seg, RankSet rankSet) +{ + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + AVERT_CRITICAL(RankSet, rankSet); /* .seg.method.check */ + AVER_CRITICAL(rankSet == RankSetEMPTY + || RankSetIsSingle(rankSet)); /* .seg.method.check */ + + seg->rankSet = BS_BITFIELD(Rank, rankSet); +} + + +/* mutatorSegSetRankSet -- MutatorSeg method to set rank set of segment + * + * As gcSegSetRankSet, but also sets or clears the write barrier on + * the segment. + * + * If the rank set is made non-empty then the segment's summary is now + * a subset of the mutator's (which is assumed to be RefSetUNIV) so + * the write barrier must be imposed on the segment. If the rank set + * is made empty then there are no longer any references on the + * segment so the barrier is removed. + */ + +static void mutatorSegSetRankSet(Seg seg, RankSet rankSet) +{ + RankSet oldRankSet; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + oldRankSet = seg->rankSet; + + NextMethod(Seg, MutatorSeg, setRankSet)(seg, rankSet); + + if (oldRankSet == RankSetEMPTY) { + if (rankSet != RankSetEMPTY) { + AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY); + ShieldRaise(PoolArena(SegPool(seg)), seg, AccessWRITE); + } + } else { + if (rankSet == RankSetEMPTY) { + AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY); + ShieldLower(PoolArena(SegPool(seg)), seg, AccessWRITE); + } + } +} + + +/* mutatorSegSyncWriteBarrier -- raise or lower write barrier on segment + * + * We only need to raise the write barrier if the segment contains + * references, and its summary is strictly smaller than the summary of + * the unprotectable data (that is, the mutator). We don't maintain + * such a summary, assuming that the mutator can access all + * references, so its summary is RefSetUNIV. + */ + +static void mutatorSegSyncWriteBarrier(Seg seg) +{ + Arena arena = PoolArena(SegPool(seg)); + /* Can't check seg -- this function enforces invariants tested by SegCheck. */ + if (SegSummary(seg) == RefSetUNIV) + ShieldLower(arena, seg, AccessWRITE); + else + ShieldRaise(arena, seg, AccessWRITE); +} + + +/* gcSegSetSummary -- GCSeg method to change the summary on a segment */ + +static void gcSegSetSummary(Seg seg, RefSet summary) +{ + GCSeg gcseg; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + gcseg = SegGCSeg(seg); + AVERT_CRITICAL(GCSeg, gcseg); + AVER_CRITICAL(&gcseg->segStruct == seg); + + EVENT5(SegSetSummary, PoolArena(SegPool(seg)), seg, SegSize(seg), + gcseg->summary, summary); + + gcseg->summary = summary; + + AVER_CRITICAL(seg->rankSet != RankSetEMPTY); +} + + +/* mutatorSegSetSummary -- MutatorSeg method to change summary on segment + * + * As gcSegSetSummary, but also raise or lower the write barrier. + */ + +static void mutatorSegSetSummary(Seg seg, RefSet summary) +{ + NextMethod(Seg, MutatorSeg, setSummary)(seg, summary); + mutatorSegSyncWriteBarrier(seg); +} + + + +/* gcSegSetRankSummary -- GCSeg method to set both rank set and summary */ + +static void gcSegSetRankSummary(Seg seg, RankSet rankSet, RefSet summary) +{ + GCSeg gcseg; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + AVERT_CRITICAL(RankSet, rankSet); /* .seg.method.check */ + AVER_CRITICAL(rankSet == RankSetEMPTY + || RankSetIsSingle(rankSet)); /* .seg.method.check */ + gcseg = SegGCSeg(seg); + AVERT_CRITICAL(GCSeg, gcseg); + AVER_CRITICAL(&gcseg->segStruct == seg); + + /* rankSet == RankSetEMPTY implies summary == RefSetEMPTY */ + AVER_CRITICAL(rankSet != RankSetEMPTY || summary == RefSetEMPTY); + + seg->rankSet = BS_BITFIELD(Rank, rankSet); + EVENT5(SegSetSummary, PoolArena(SegPool(seg)), seg, SegSize(seg), + gcseg->summary, summary); + gcseg->summary = summary; +} + +static void mutatorSegSetRankSummary(Seg seg, RankSet rankSet, RefSet summary) +{ + NextMethod(Seg, MutatorSeg, setRankSummary)(seg, rankSet, summary); + if (rankSet != RankSetEMPTY) + mutatorSegSyncWriteBarrier(seg); +} + + +/* gcSegBuffer -- GCSeg method to return the buffer of a segment */ + +static Bool gcSegBuffer(Buffer *bufferReturn, Seg seg) +{ + GCSeg gcseg; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + gcseg = SegGCSeg(seg); + AVERT_CRITICAL(GCSeg, gcseg); /* .seg.method.check */ + AVER_CRITICAL(&gcseg->segStruct == seg); + + if (gcseg->buffer != NULL) { + *bufferReturn = gcseg->buffer; + return TRUE; + } + + return FALSE; +} + + +/* gcSegSetBuffer -- GCSeg method to change the buffer of a segment */ + +static void gcSegSetBuffer(Seg seg, Buffer buffer) +{ + GCSeg gcseg; + + AVERT_CRITICAL(Seg, seg); /* .seg.method.check */ + if (buffer != NULL) + AVERT_CRITICAL(Buffer, buffer); + gcseg = SegGCSeg(seg); + AVERT_CRITICAL(GCSeg, gcseg); + AVER_CRITICAL(&gcseg->segStruct == seg); + + gcseg->buffer = buffer; +} + + +/* gcSegUnsetBuffer -- GCSeg method to remove the buffer from a segment */ + +static void gcSegUnsetBuffer(Seg seg) +{ + GCSeg gcseg = MustBeA_CRITICAL(GCSeg, seg); /* .seg.method.check */ + gcseg->buffer = NULL; +} + + +/* gcSegMerge -- GCSeg merge method + * + * .buffer: Can't merge two segments both with buffers. + * . + */ + +static Res gcSegMerge(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + GCSeg gcseg, gcsegHi; + TraceSet grey; + RefSet summary; + Buffer buf; + Res res; + + AVERT(Seg, seg); + AVERT(Seg, segHi); + gcseg = SegGCSeg(seg); + gcsegHi = SegGCSeg(segHi); + AVERT(GCSeg, gcseg); + AVERT(GCSeg, gcsegHi); + AVER(base < mid); + AVER(mid < limit); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == mid); + AVER(SegBase(segHi) == mid); + AVER(SegLimit(segHi) == limit); + + buf = gcsegHi->buffer; /* any buffer on segHi must be reassigned */ + AVER(buf == NULL || gcseg->buffer == NULL); /* See .buffer */ + grey = SegGrey(segHi); /* check greyness */ + AVER(SegGrey(seg) == grey); + + /* Assume that the write barrier shield is being used to implement + the remembered set only, and so we can merge the shield and + protection modes by unioning the segment summaries. See also + . */ + summary = RefSetUnion(gcseg->summary, gcsegHi->summary); + SegSetSummary(seg, summary); + SegSetSummary(segHi, summary); + AVER(SegSM(seg) == SegSM(segHi)); + if (SegPM(seg) != SegPM(segHi)) { + /* This shield won't cope with a partially-protected segment, so + flush the shield queue to bring both halves in sync. See also + . */ + ShieldFlush(PoolArena(SegPool(seg))); + } + + /* Merge the superclass fields via next-method call */ + res = NextMethod(Seg, GCSeg, merge)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + /* Update fields of gcseg. Finish gcsegHi. */ + gcSegSetGreyInternal(segHi, grey, TraceSetEMPTY); + gcsegHi->summary = RefSetEMPTY; + gcsegHi->sig = SigInvalid; + RingFinish(&gcsegHi->greyRing); + RingRemove(&gcsegHi->genRing); + RingFinish(&gcsegHi->genRing); + + /* Reassign any buffer that was connected to segHi */ + if (NULL != buf) { + AVER(gcseg->buffer == NULL); + gcseg->buffer = buf; + gcsegHi->buffer = NULL; + BufferReassignSeg(buf, seg); + } + + AVERT(GCSeg, gcseg); + return ResOK; + +failSuper: + AVERT(GCSeg, gcseg); + AVERT(GCSeg, gcsegHi); + return res; +} + + +/* gcSegSplit -- GCSeg split method */ + +static Res gcSegSplit(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + GCSeg gcseg, gcsegHi; + Buffer buf; + TraceSet grey; + Res res; + + AVERT(Seg, seg); + AVER(segHi != NULL); /* can't check fully, it's not initialized */ + gcseg = SegGCSeg(seg); + AVERT(GCSeg, gcseg); + AVER(base < mid); + AVER(mid < limit); + AVER(SegBase(seg) == base); + AVER(SegLimit(seg) == limit); + + grey = SegGrey(seg); + buf = gcseg->buffer; /* Look for buffer to reassign to segHi */ + if (buf != NULL) { + if (BufferLimit(buf) > mid) { + /* Existing buffer extends above the split point */ + AVER(BufferBase(buf) > mid); /* check it's all above the split */ + } else { + buf = NULL; /* buffer lies below split and is unaffected */ + } + } + + /* Split the superclass fields via next-method call */ + res = NextMethod(Seg, GCSeg, split)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + /* Full initialization for segHi. */ + gcsegHi = SegGCSeg(segHi); + gcsegHi->summary = gcseg->summary; + gcsegHi->buffer = NULL; + RingInit(&gcsegHi->greyRing); + RingInit(&gcsegHi->genRing); + RingInsert(&gcseg->genRing, &gcsegHi->genRing); + gcsegHi->sig = GCSegSig; + gcSegSetGreyInternal(segHi, TraceSetEMPTY, grey); + + /* Reassign buffer if it's now connected to segHi */ + if (NULL != buf) { + gcsegHi->buffer = buf; + gcseg->buffer = NULL; + BufferReassignSeg(buf, segHi); + } + + AVERT(GCSeg, gcseg); + AVERT(GCSeg, gcsegHi); + return ResOK; + +failSuper: + AVERT(GCSeg, gcseg); + return res; +} + + +/* gcSegWhiten -- GCSeg white method */ + +static Res gcSegWhiten(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + + return ResOK; +} + + +/* gcSegGreyen -- GCSeg greyen method + * + * If we had a (partially) white segment, then other parts of the same + * segment might need to get greyed. In fact, all current pools only + * ever whiten a whole segment, so we never need to greyen any part of + * an already whitened segment. So we exclude white segments. + */ + +static void gcSegGreyen(Seg seg, Trace trace) +{ + AVERT(Seg, seg); + AVERT(Trace, trace); + AVER(PoolArena(SegPool(seg)) == trace->arena); + + if (!TraceSetIsMember(SegWhite(seg), trace)) + SegSetGrey(seg, TraceSetSingle(trace)); +} + + +/* gcSegTrivBlacken -- GCSeg trivial blacken method + * + * For segments which do not keep additional colour information. + */ + +static void gcSegTrivBlacken(Seg seg, TraceSet traceSet) +{ + AVERT(Seg, seg); + AVERT(TraceSet, traceSet); + NOOP; +} + + +/* gcSegDescribe -- GCSeg description method */ + +static Res gcSegDescribe(Inst inst, mps_lib_FILE *stream, Count depth) +{ + GCSeg gcseg = CouldBeA(GCSeg, inst); + Res res; + + if (!TESTC(GCSeg, gcseg)) + return ResPARAM; + if (stream == NULL) + return ResPARAM; + + /* Describe the superclass fields first via next-method call */ + res = NextMethod(Inst, GCSeg, describe)(inst, stream, depth); + if (res != ResOK) + return res; + + res = WriteF(stream, depth + 2, + "summary $W\n", (WriteFW)gcseg->summary, + NULL); + if (res != ResOK) + return res; + + if (gcseg->buffer == NULL) { + res = WriteF(stream, depth + 2, "buffer: NULL\n", NULL); + } else { + res = BufferDescribe(gcseg->buffer, stream, depth + 2); + } + if (res != ResOK) + return res; + + return ResOK; +} + + +/* SegClassCheck -- check a segment class */ + +Bool SegClassCheck(SegClass klass) +{ + CHECKD(InstClass, &klass->instClassStruct); + CHECKL(klass->size >= sizeof(SegStruct)); + CHECKL(FUNCHECK(klass->init)); + CHECKL(FUNCHECK(klass->setSummary)); + CHECKL(FUNCHECK(klass->buffer)); + CHECKL(FUNCHECK(klass->setBuffer)); + CHECKL(FUNCHECK(klass->unsetBuffer)); + CHECKL(FUNCHECK(klass->bufferFill)); + CHECKL(FUNCHECK(klass->bufferEmpty)); + CHECKL(FUNCHECK(klass->setGrey)); + CHECKL(FUNCHECK(klass->setWhite)); + CHECKL(FUNCHECK(klass->setRankSet)); + CHECKL(FUNCHECK(klass->setRankSummary)); + CHECKL(FUNCHECK(klass->merge)); + CHECKL(FUNCHECK(klass->split)); + CHECKL(FUNCHECK(klass->access)); + CHECKL(FUNCHECK(klass->whiten)); + CHECKL(FUNCHECK(klass->greyen)); + CHECKL(FUNCHECK(klass->blacken)); + CHECKL(FUNCHECK(klass->scan)); + CHECKL(FUNCHECK(klass->fix)); + CHECKL(FUNCHECK(klass->fixEmergency)); + CHECKL(FUNCHECK(klass->reclaim)); + CHECKL(FUNCHECK(klass->walk)); + + /* Check that segment classes override sets of related methods. */ + CHECKL((klass->init == segAbsInit) + == (klass->instClassStruct.finish == segAbsFinish)); + CHECKL((klass->init == gcSegInit) + == (klass->instClassStruct.finish == gcSegFinish)); + CHECKL((klass->merge == segTrivMerge) == (klass->split == segTrivSplit)); + CHECKL((klass->fix == segNoFix) == (klass->fixEmergency == segNoFix)); + CHECKL((klass->fix == segNoFix) == (klass->reclaim == segNoReclaim)); + + CHECKS(SegClass, klass); + return TRUE; +} + + +/* SegClass -- the vanilla segment class definition */ + +DEFINE_CLASS(Inst, SegClass, klass) +{ + INHERIT_CLASS(klass, SegClass, InstClass); + AVERT(InstClass, klass); +} + +DEFINE_CLASS(Seg, Seg, klass) +{ + INHERIT_CLASS(&klass->instClassStruct, Seg, Inst); + klass->instClassStruct.describe = segAbsDescribe; + klass->instClassStruct.finish = segAbsFinish; + klass->size = sizeof(SegStruct); + klass->init = segAbsInit; + klass->setSummary = segNoSetSummary; + klass->buffer = segNoBuffer; + klass->setBuffer = segNoSetBuffer; + klass->unsetBuffer = segNoUnsetBuffer; + klass->bufferFill = segNoBufferFill; + klass->bufferEmpty = segNoBufferEmpty; + klass->setGrey = segNoSetGrey; + klass->flip = segTrivFlip; + klass->setWhite = segNoSetWhite; + klass->setRankSet = segNoSetRankSet; + klass->setRankSummary = segNoSetRankSummary; + klass->merge = segTrivMerge; + klass->split = segTrivSplit; + klass->access = segNoAccess; + klass->whiten = segNoWhiten; + klass->greyen = segNoGreyen; + klass->blacken = segNoBlacken; + klass->scan = segNoScan; + klass->fix = segNoFix; + klass->fixEmergency = segNoFix; + klass->reclaim = segNoReclaim; + klass->walk = segTrivWalk; + klass->sig = SegClassSig; + AVERT(SegClass, klass); +} + + +/* GCSegClass -- GC-supporting segment class definition */ + +typedef SegClassStruct GCSegClassStruct; + +DEFINE_CLASS(Seg, GCSeg, klass) +{ + INHERIT_CLASS(klass, GCSeg, Seg); + klass->instClassStruct.describe = gcSegDescribe; + klass->instClassStruct.finish = gcSegFinish; + klass->size = sizeof(GCSegStruct); + klass->init = gcSegInit; + klass->setSummary = gcSegSetSummary; + klass->buffer = gcSegBuffer; + klass->setBuffer = gcSegSetBuffer; + klass->unsetBuffer = gcSegUnsetBuffer; + klass->setGrey = gcSegSetGrey; + klass->setWhite = gcSegSetWhite; + klass->setRankSet = gcSegSetRankSet; + klass->setRankSummary = gcSegSetRankSummary; + klass->merge = gcSegMerge; + klass->split = gcSegSplit; + klass->access = SegWholeAccess; + klass->whiten = gcSegWhiten; + klass->greyen = gcSegGreyen; + klass->blacken = gcSegTrivBlacken; + klass->scan = segNoScan; /* no useful default method */ + klass->fix = segNoFix; /* no useful default method */ + klass->fixEmergency = segNoFix; /* no useful default method */ + klass->reclaim = segNoReclaim; /* no useful default method */ + klass->walk = segTrivWalk; + AVERT(SegClass, klass); +} + + +/* MutatorSegClass -- collectable mutator segment class definition */ + +typedef SegClassStruct MutatorSegClassStruct; + +DEFINE_CLASS(Seg, MutatorSeg, klass) +{ + INHERIT_CLASS(klass, MutatorSeg, GCSeg); + klass->setSummary = mutatorSegSetSummary; + klass->setGrey = mutatorSegSetGrey; + klass->flip = mutatorSegFlip; + klass->setRankSet = mutatorSegSetRankSet; + klass->setRankSummary = mutatorSegSetRankSummary; + AVERT(SegClass, klass); +} + + +/* SegClassMixInNoSplitMerge -- Mix-in for unsupported merge + * + * Classes which don't support segment splitting and merging + * may mix this in to ensure that erroneous calls are checked. + */ + +void SegClassMixInNoSplitMerge(SegClass klass) +{ + /* Can't check class because it's not initialized yet */ + klass->merge = segNoMerge; + klass->split = segNoSplit; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/segsmss.c b/mps/code/segsmss.c new file mode 100644 index 00000000000..6bb78c5ba31 --- /dev/null +++ b/mps/code/segsmss.c @@ -0,0 +1,873 @@ +/* segsmss.c: Segment splitting and merging stress test + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (c) 2002 Global Graphics Software. + * + * .design: Adapted from amsss.c (because AMS already supports + * a protocol for subclassing AMS segments). Defines a new pool + * class, AMST. Segments are split and merged during BufferFill + * operations. Buffered segments are also split and merged between + * allocation requests. + */ + +#include "mpm.h" +#include "poolams.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "locus.h" +#include "mpscams.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" + +#include /* fflush, printf, puts, stdout */ + + +/* Start by defining the AMST pool (AMS Test pool) */ + +#define AMSTSig ((Sig)0x519A3529) /* SIGnature AMST */ + +/* AMSTStruct -- AMST pool instance structure */ + +typedef struct AMSTStruct { + AMSStruct amsStruct; /* generic AMS structure */ + Bool failSegs; /* fail seg splits & merges when true */ + Count splits; /* count of successful segment splits */ + Count merges; /* count of successful segment merges */ + Count badSplits; /* count of unsuccessful segment splits */ + Count badMerges; /* count of unsuccessful segment merges */ + Count bsplits; /* count of buffered segment splits */ + Count bmerges; /* count of buffered segment merges */ + Sig sig; /* design.mps.sig.field.end.outer */ +} AMSTStruct; + +typedef struct AMSTStruct *AMST; + +#define PoolAMST(pool) PARENT(AMSTStruct, amsStruct, PARENT(AMSStruct, poolStruct, (pool))) +#define AMST2AMS(amst) (&(amst)->amsStruct) + + +typedef AMST AMSTPool; +#define AMSTPoolCheck AMSTCheck +DECLARE_CLASS(Pool, AMSTPool, AMSPool); +DECLARE_CLASS(Seg, AMSTSeg, AMSSeg); + + +/* AMSTCheck -- the check method for an AMST */ + +ATTRIBUTE_UNUSED +static Bool AMSTCheck(AMST amst) +{ + CHECKS(AMST, amst); + CHECKD_NOSIG(AMS, AMST2AMS(amst)); /* */ + return TRUE; +} + +/* AMSTFailOperation -- should a split/merge operation fail? + * + * returns TRUE if so. + */ +static Bool AMSTFailOperation(AMST amst) +{ + if (amst->failSegs) { + return rnd() % 2; + } else { + return FALSE; + } +} + +/* AMSTSegStruct: AMST segment instances */ + +#define AMSTSegSig ((Sig)0x519A3525) /* SIGnature AMST Seg */ + +typedef struct AMSTSegStruct *AMSTSeg; + +typedef struct AMSTSegStruct { + AMSSegStruct amsSegStruct; /* superclass fields must come first */ + AMSTSeg next; /* mergeable next segment, or NULL */ + AMSTSeg prev; /* mergeable prev segment, or NULL */ + Sig sig; /* design.mps.sig.field.end.outer */ +} AMSTSegStruct; + + + +/* AMSTSegCheck -- check the AMST segment */ + +ATTRIBUTE_UNUSED +static Bool AMSTSegCheck(AMSTSeg amstseg) +{ + CHECKS(AMSTSeg, amstseg); + CHECKD_NOSIG(AMSSeg, &amstseg->amsSegStruct); /* */ + /* don't bother to do other checks - this is a stress test */ + return TRUE; +} + +#define Seg2AMSTSeg(seg) ((AMSTSeg)(seg)) +#define AMSTSeg2Seg(amstseg) ((Seg)(amstseg)) + + +/* amstSegInit -- initialise an amst segment */ + +static Res amstSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args) +{ + AMSTSeg amstseg; + AMST amst; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, AMSTSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + return res; + amstseg = CouldBeA(AMSTSeg, seg); + + AVERT(Pool, pool); + amst = PoolAMST(pool); + AVERT(AMST, amst); + /* no useful checks for base and size */ + + amstseg->next = NULL; + amstseg->prev = NULL; + + SetClassOfPoly(seg, CLASS(AMSTSeg)); + amstseg->sig = AMSTSegSig; + AVERC(AMSTSeg, amstseg); + + return ResOK; +} + + +/* amstSegFinish -- Finish method for AMST segments */ + +static void amstSegFinish(Inst inst) +{ + Seg seg = MustBeA(Seg, inst); + AMSTSeg amstseg = MustBeA(AMSTSeg, seg); + + AVERT(AMSTSeg, amstseg); + + if (amstseg->next != NULL) + amstseg->next->prev = NULL; + if (amstseg->prev != NULL) + amstseg->prev->next = NULL; + + amstseg->sig = SigInvalid; + /* finish the superclass fields last */ + NextMethod(Inst, AMSTSeg, finish)(inst); +} + + + +/* amstSegMerge -- AMSTSeg merge method + * + * .fail: Test proper handling of the most complex failure cases + * by deliberately detecting failure sometimes after calling the + * next method. We handle the error by calling the anti-method. + * This isn't strictly safe . + * But we assume here that we won't run out of memory when calling the + * anti-method. + */ +static Res amstSegMerge(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + AMST amst; + AMSTSeg amstseg, amstsegHi; + Res res; + + AVERT(Seg, seg); + AVERT(Seg, segHi); + amstseg = Seg2AMSTSeg(seg); + amstsegHi = Seg2AMSTSeg(segHi); + AVERT(AMSTSeg, amstseg); + AVERT(AMSTSeg, amstsegHi); + amst = PoolAMST(SegPool(seg)); + + /* Merge the superclass fields via direct next-method call */ + res = NextMethod(Seg, AMSTSeg, merge)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + if (AMSTFailOperation(amst)) { + amst->badMerges++; + printf("D"); + goto failDeliberate; + } + + amstseg->next = amstsegHi->next; + amstsegHi->sig = SigInvalid; + AVERT(AMSTSeg, amstseg); + amst->merges++; + printf("M"); + return ResOK; + +failDeliberate: + /* Call the anti-method (see .fail) */ + res = NextMethod(Seg, AMSTSeg, split)(seg, segHi, base, mid, limit); + AVER(res == ResOK); + res = ResFAIL; +failSuper: + AVERT(AMSTSeg, amstseg); + AVERT(AMSTSeg, amstsegHi); + return res; +} + + +/* amstSegSplit -- AMSTSeg split method */ + +static Res amstSegSplit(Seg seg, Seg segHi, + Addr base, Addr mid, Addr limit) +{ + AMST amst; + AMSTSeg amstseg, amstsegHi; + Res res; + + AVERT(Seg, seg); + AVER(segHi != NULL); /* can't check fully, it's not initialized */ + amstseg = Seg2AMSTSeg(seg); + amstsegHi = Seg2AMSTSeg(segHi); + AVERT(AMSTSeg, amstseg); + amst = PoolAMST(SegPool(seg)); + + /* Split the superclass fields via direct next-method call */ + res = NextMethod(Seg, AMSTSeg, split)(seg, segHi, base, mid, limit); + if (res != ResOK) + goto failSuper; + + if (AMSTFailOperation(amst)) { + amst->badSplits++; + printf("B"); + goto failDeliberate; + } + + /* Full initialization for segHi. */ + amstsegHi->next = amstseg->next; + amstsegHi->prev = amstseg; + amstsegHi->sig = AMSTSegSig; + amstseg->next = amstsegHi; + AVERT(AMSTSeg, amstseg); + AVERT(AMSTSeg, amstsegHi); + amst->splits++; + printf("S"); + return ResOK; + +failDeliberate: + /* Call the anti-method. (see .fail) */ + res = NextMethod(Seg, AMSTSeg, merge)(seg, segHi, base, mid, limit); + AVER(res == ResOK); + res = ResFAIL; +failSuper: + AVERT(AMSTSeg, amstseg); + return res; +} + + +/* AMSTSegClass -- Class definition for AMST segments */ + +DEFINE_CLASS(Seg, AMSTSeg, klass) +{ + INHERIT_CLASS(klass, AMSTSeg, AMSSeg); + klass->instClassStruct.finish = amstSegFinish; + klass->size = sizeof(AMSTSegStruct); + klass->init = amstSegInit; + klass->split = amstSegSplit; + klass->merge = amstSegMerge; + AVERT(SegClass, klass); +} + + +/* AMSTSegSizePolicy + * + * Picks double the default segment size. + */ +static Res AMSTSegSizePolicy(Size *sizeReturn, + Pool pool, Size size, RankSet rankSet) +{ + Arena arena; + Size basic, want; + + AVER(sizeReturn != NULL); + AVERT(Pool, pool); + AVER(size > 0); + AVERT(RankSet, rankSet); + + arena = PoolArena(pool); + + basic = SizeArenaGrains(size, arena); + if (basic == 0) { + /* overflow */ + return ResMEMORY; + } + want = basic + basic; + if (want <= basic) { + /* overflow */ + return ResMEMORY; + } + *sizeReturn = want; + return ResOK; +} + + +/* AMSTInit -- the pool class initialization method */ + +static Res AMSTInit(Pool pool, Arena arena, PoolClass klass, ArgList args) +{ + AMST amst; + AMS ams; + Res res; + + res = NextMethod(Pool, AMSTPool, init)(pool, arena, klass, args); + if (res != ResOK) + return res; + + amst = CouldBeA(AMSTPool, pool); + ams = MustBeA(AMSPool, pool); + + ams->segSize = AMSTSegSizePolicy; + ams->segClass = AMSTSegClassGet; + amst->failSegs = TRUE; + amst->splits = 0; + amst->merges = 0; + amst->badSplits = 0; + amst->badMerges = 0; + amst->bsplits = 0; + amst->bmerges = 0; + + SetClassOfPoly(pool, CLASS(AMSTPool)); + amst->sig = AMSTSig; + AVERC(AMSTPool, amst); + + return ResOK; +} + + +/* AMSTFinish -- the pool class finish method */ + +static void AMSTFinish(Inst inst) +{ + Pool pool = MustBeA(AbstractPool, inst); + AMST amst = MustBeA(AMSTPool, pool); + + AVERT(AMST, amst); + + amst->sig = SigInvalid; + + printf("\nDestroying pool, having performed:\n"); + printf(" %"PRIuLONGEST" splits (S)\n", (ulongest_t)amst->splits); + printf(" %"PRIuLONGEST" merges (M)\n", (ulongest_t)amst->merges); + printf(" %"PRIuLONGEST" aborted splits (B)\n", (ulongest_t)amst->badSplits); + printf(" %"PRIuLONGEST" aborted merges (D)\n", (ulongest_t)amst->badMerges); + printf(" which included:\n"); + printf(" %"PRIuLONGEST" buffered splits (C)\n", (ulongest_t)amst->bsplits); + printf(" %"PRIuLONGEST" buffered merges (J)\n", (ulongest_t)amst->bmerges); + + NextMethod(Inst, AMSTPool, finish)(inst); +} + + +/* AMSSegIsFree -- return TRUE if a seg is all unallocated */ + +static Bool AMSSegIsFree(Seg seg) +{ + AMSSeg amsseg; + AVERT(Seg, seg); + amsseg = Seg2AMSSeg(seg); + return amsseg->freeGrains == amsseg->grains; +} + + +/* AMSSegRegionIsFree -- return TRUE if a region is all unallocated */ + +static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit) +{ + AMSSeg amsseg = MustBeA(AMSSeg, seg); + Index baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); + + if (amsseg->allocTableInUse) { + Index limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); + return BTIsResRange(amsseg->allocTable, baseIndex, limitIndex); + } else { + return amsseg->firstFree <= baseIndex; + } +} + + +/* AMSUnallocateRange -- set a range to be unallocated + * + * Used as a means of overriding the behaviour of AMSBufferFill. + * The code is similar to amsSegBufferEmpty. + */ +static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit) +{ + AMSSeg amsseg; + Index baseIndex, limitIndex; + Count unallocatedGrains; + /* parameters checked by caller */ + + amsseg = Seg2AMSSeg(seg); + + baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); + limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); + + if (amsseg->allocTableInUse) { + /* check that it's allocated */ + AVER(BTIsSetRange(amsseg->allocTable, baseIndex, limitIndex)); + BTResRange(amsseg->allocTable, baseIndex, limitIndex); + } else { + /* check that it's allocated */ + AVER(limitIndex <= amsseg->firstFree); + if (limitIndex == amsseg->firstFree) /* is it at the end? */ { + amsseg->firstFree = baseIndex; + } else { /* start using allocTable */ + amsseg->allocTableInUse = TRUE; + BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); + if (amsseg->firstFree < amsseg->grains) + BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); + BTResRange(amsseg->allocTable, baseIndex, limitIndex); + } + } + + unallocatedGrains = limitIndex - baseIndex; + AVER(amsseg->bufferedGrains >= unallocatedGrains); + amsseg->freeGrains += unallocatedGrains; + amsseg->bufferedGrains -= unallocatedGrains; + PoolGenAccountForEmpty(ams->pgen, 0, + PoolGrainsSize(AMSPool(ams), unallocatedGrains), + FALSE); +} + + +/* AMSAllocateRange -- set a range to be allocated + * + * Used as a means of overriding the behaviour of AMSBufferFill. + * The code is similar to AMSUnallocateRange. + */ +static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit) +{ + AMSSeg amsseg; + Index baseIndex, limitIndex; + Count allocatedGrains; + /* parameters checked by caller */ + + amsseg = Seg2AMSSeg(seg); + + baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); + limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); + + if (amsseg->allocTableInUse) { + /* check that it's not allocated */ + AVER(BTIsResRange(amsseg->allocTable, baseIndex, limitIndex)); + BTSetRange(amsseg->allocTable, baseIndex, limitIndex); + } else { + /* check that it's not allocated */ + AVER(baseIndex >= amsseg->firstFree); + if (baseIndex == amsseg->firstFree) /* is it at the end? */ { + amsseg->firstFree = limitIndex; + } else { /* start using allocTable */ + amsseg->allocTableInUse = TRUE; + BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); + if (amsseg->firstFree < amsseg->grains) + BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); + BTSetRange(amsseg->allocTable, baseIndex, limitIndex); + } + } + + allocatedGrains = limitIndex - baseIndex; + AVER(amsseg->freeGrains >= allocatedGrains); + amsseg->freeGrains -= allocatedGrains; + amsseg->bufferedGrains += allocatedGrains; + PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit)); +} + + +/* AMSTBufferFill -- the pool class buffer fill method + * + * Calls next method - but possibly splits or merges the chosen + * segment. + * + * .merge: A merge is performed when the next method returns the + * entire segment, this segment had previously been split from the + * segment below, and the segment below is appropriately similar + * (i.e. not already attached to a buffer and similarly coloured) + * + * .split: If we're not merging, a split is performed if the next method + * returns the entire segment, and yet lower half of the segment would + * meet the request. + */ +static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, + Pool pool, Buffer buffer, Size size) +{ + Addr base, limit; + Arena arena; + AMS ams; + AMST amst; + Bool b; + Seg seg; + AMSTSeg amstseg; + Res res; + + AVERT(Pool, pool); + AVER(baseReturn != NULL); + AVER(limitReturn != NULL); + /* other parameters are checked by next method */ + arena = PoolArena(pool); + ams = PoolAMS(pool); + amst = PoolAMST(pool); + + /* call next method */ + res = NextMethod(Pool, AMSTPool, bufferFill)(&base, &limit, pool, buffer, size); + if (res != ResOK) + return res; + + b = SegOfAddr(&seg, arena, base); + AVER(b); + amstseg = Seg2AMSTSeg(seg); + + if (SegLimit(seg) == limit && SegBase(seg) == base) { + if (amstseg->prev != NULL) { + Seg segLo = AMSTSeg2Seg(amstseg->prev); + if (!SegHasBuffer(segLo) && + SegGrey(segLo) == SegGrey(seg) && + SegWhite(segLo) == SegWhite(seg)) { + /* .merge */ + Seg mergedSeg; + Res mres; + + AMSUnallocateRange(ams, seg, base, limit); + mres = SegMerge(&mergedSeg, segLo, seg); + if (ResOK == mres) { /* successful merge */ + AMSAllocateRange(ams, mergedSeg, base, limit); + /* leave range as-is */ + } else { /* failed to merge */ + AVER(amst->failSegs); /* deliberate fails only */ + AMSAllocateRange(ams, seg, base, limit); + } + } + + } else { + Size half = SegSize(seg) / 2; + if (half >= size && SizeIsArenaGrains(half, arena)) { + /* .split */ + Addr mid = AddrAdd(base, half); + Seg segLo, segHi; + Res sres; + AMSUnallocateRange(ams, seg, mid, limit); + sres = SegSplit(&segLo, &segHi, seg, mid); + if (ResOK == sres) { /* successful split */ + limit = mid; /* range is lower segment */ + } else { /* failed to split */ + AVER(amst->failSegs); /* deliberate fails only */ + AMSAllocateRange(ams, seg, mid, limit); + } + + } + } + } + + *baseReturn = base; + *limitReturn = limit; + return ResOK; +} + + +/* AMSTStressBufferedSeg -- Stress test for a buffered seg + * + * Test splitting or merging a buffered seg. + * + * .bmerge: A merge is performed when the segment had previously + * been split and the segment above meets the constraints (i.e. empty, + * not already attached to a buffer and similar colour) + * + * .bsplit: Whether or not a merge happened, a split is performed if + * the limit of the buffered region is also the limit of an arena + * grain, and yet does not correspond to the segment limit, provided + * that the part of the segment above the buffer is all free. + */ +static void AMSTStressBufferedSeg(Seg seg, Buffer buffer) +{ + AMSTSeg amstseg; + AMST amst; + Arena arena; + Addr limit; + Buffer segBuf; + + AVERT(Seg, seg); + AVERT(Buffer, buffer); + AVER(SegBuffer(&segBuf, seg) && segBuf == buffer); + amstseg = Seg2AMSTSeg(seg); + AVERT(AMSTSeg, amstseg); + limit = BufferLimit(buffer); + arena = PoolArena(SegPool(seg)); + amst = PoolAMST(SegPool(seg)); + AVERT(AMST, amst); + + if (amstseg->next != NULL) { + Seg segHi = AMSTSeg2Seg(amstseg->next); + if (AMSSegIsFree(segHi) && SegGrey(segHi) == SegGrey(seg)) { + /* .bmerge */ + Seg mergedSeg; + Res res; + res = SegMerge(&mergedSeg, seg, segHi); + if (ResOK == res) { + amst->bmerges++; + printf("J"); + } else { + /* deliberate fails only */ + AVER(amst->failSegs); + } + } + } + + if (SegLimit(seg) != limit && + AddrIsArenaGrain(limit, arena) && + AMSSegRegionIsFree(seg, limit, SegLimit(seg))) { + /* .bsplit */ + Seg segLo, segHi; + Res res; + res = SegSplit(&segLo, &segHi, seg, limit); + if (ResOK == res) { + amst->bsplits++; + printf("C"); + } else { + /* deliberate fails only */ + AVER(amst->failSegs); + } + } +} + + + +/* AMSTPoolClass -- the pool class definition */ + +DEFINE_CLASS(Pool, AMSTPool, klass) +{ + INHERIT_CLASS(klass, AMSTPool, AMSPool); + klass->instClassStruct.finish = AMSTFinish; + klass->size = sizeof(AMSTStruct); + klass->init = AMSTInit; + klass->bufferFill = AMSTBufferFill; + AVERT(PoolClass, klass); +} + + +/* mps_amst_ap_stress -- stress an active buffer + * + * Attempt to either split or merge a segment attached to an AP + */ +static void mps_amst_ap_stress(mps_ap_t ap) +{ + Buffer buffer; + Seg seg; + + buffer = BufferOfAP(ap); + AVERT(Buffer, buffer); + seg = BufferSeg(buffer); + AMSTStressBufferedSeg(seg, buffer); +} + + +/* mps_class_amst -- return the pool class descriptor to the client */ + +static mps_pool_class_t mps_class_amst(void) +{ + return (mps_pool_class_t)CLASS(AMSTPool); +} + + +/* AMS collection parameters */ + +#define exactRootsCOUNT 50 +#define ambigRootsCOUNT 100 +#define sizeScale 4 +/* This is enough for five GCs. */ +#define totalSizeMAX sizeScale * 800 * (size_t)1024 +#define totalSizeSTEP 200 * (size_t)1024 +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) +#define testArenaSIZE ((size_t)16<<20) +#define initTestFREQ 6000 +#define stressTestFREQ 40 + + +/* static variables for the test */ + +static mps_pool_t pool; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; +static size_t totalSize = 0; + + +/* make -- object allocation and init */ + +static mps_addr_t make(void) +{ + size_t length = rnd() % 20, size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + + do { + MPS_RESERVE_BLOCK(res, p, ap, size); + if (res) + die(res, "MPS_RESERVE_BLOCK"); + res = dylan_init(p, size, exactRoots, exactRootsCOUNT); + if (res) + die(res, "dylan_init"); + } while(!mps_commit(ap, p, size)); + + totalSize += size; + return p; +} + + +/* test -- the actual stress test */ + +static void test(mps_arena_t arena) +{ + mps_fmt_t format; + mps_root_t exactRoot, ambigRoot; + size_t lastStep = 0, i, r; + unsigned long objs; + mps_ap_t busy_ap; + mps_addr_t busy_init; + const char *indent = " "; + mps_chain_t chain; + static mps_gen_param_s genParam = {1024, 0.2}; + + die(mps_fmt_create_A(&format, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, 1, &genParam), "chain_create"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, format); + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + MPS_ARGS_ADD(args, MPS_KEY_GEN, 0); + die(mps_pool_create_k(&pool, arena, mps_class_amst(), args), + "pool_create(amst)"); + } MPS_ARGS_END(args); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + die(mps_ap_create(&busy_ap, pool, mps_rank_exact()), "BufferCreate 2"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + + puts(indent); + + /* create an ap, and leave it busy */ + die(mps_reserve(&busy_init, busy_ap, 64), "mps_reserve busy"); + + objs = 0; + while(totalSize < totalSizeMAX) { + if (totalSize > lastStep + totalSizeSTEP) { + lastStep = totalSize; + printf("\nSize %"PRIuLONGEST" bytes, %"PRIuLONGEST" objects.\n", + (ulongest_t)totalSize, (ulongest_t)objs); + printf("%s", indent); + (void)fflush(stdout); + for(i = 0; i < exactRootsCOUNT; ++i) + cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), + "all roots check"); + } + + r = (size_t)rnd(); + if (r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if (exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(); + if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + if (rnd() % stressTestFREQ == 0) + mps_amst_ap_stress(ap); /* stress active buffer */ + + if (rnd() % initTestFREQ == 0) + *(int*)busy_init = -1; /* check that the buffer is still there */ + + ++objs; + if (objs % 256 == 0) { + printf("."); + (void)fflush(stdout); + } + } + + (void)mps_commit(busy_ap, busy_init, 64); + + mps_arena_park(arena); + mps_ap_destroy(busy_ap); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); +} + + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create"); + die(mps_thread_reg(&thread, arena), "thread_reg"); + test(arena); + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/shield.c b/mps/code/shield.c new file mode 100644 index 00000000000..83bdd3e159b --- /dev/null +++ b/mps/code/shield.c @@ -0,0 +1,790 @@ +/* shield.c: SHIELD IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * See: idea.shield, . + * + * IMPORTANT: HERE BE DRAGONS! This code is subtle and + * critical. Ensure you have read and understood + * before you touch it. + */ + +#include "mpm.h" + +SRCID(shield, "$Id$"); + + +void ShieldInit(Shield shield) +{ + shield->inside = FALSE; + shield->suspended = FALSE; + shield->queuePending = FALSE; + shield->queue = NULL; + shield->length = 0; + shield->next = 0; + shield->limit = 0; + shield->depth = 0; + shield->unsynced = 0; + shield->holds = 0; + shield->sig = ShieldSig; +} + + +void ShieldDestroyQueue(Shield shield, Arena arena) +{ + AVER(shield->limit == 0); /* queue must be empty */ + + if (shield->length != 0) { + AVER(shield->queue != NULL); + ControlFree(arena, shield->queue, + shield->length * sizeof shield->queue[0]); + shield->queue = NULL; + shield->length = 0; + } +} + + +void ShieldFinish(Shield shield) +{ + /* The queue should already have been destroyed by + GlobalsPrepareToDestroy calling ShieldDestroyQueue. */ + AVER(shield->length == 0); + AVER(shield->limit == 0); + AVER(shield->queue == NULL); + + AVER(shield->depth == 0); + AVER(shield->unsynced == 0); + AVER(shield->holds == 0); + shield->sig = SigInvalid; +} + + +static Bool SegIsSynced(Seg seg); + +Bool ShieldCheck(Shield shield) +{ + CHECKS(Shield, shield); + /* Can't check Boolean bitfields */ + CHECKL(shield->queue == NULL || shield->length > 0); + CHECKL(shield->limit <= shield->length); + CHECKL(shield->next <= shield->limit); + + /* The mutator is not suspended while outside the shield + . */ + CHECKL(shield->inside || !shield->suspended); + + /* If any segment is not synced, the mutator is suspended + . */ + CHECKL(shield->unsynced == 0 || shield->suspended); + + /* The total depth is zero while outside the shield + . */ + CHECKL(shield->inside || shield->depth == 0); + + /* There are no unsynced segments when we're outside the shield. */ + CHECKL(shield->inside || shield->unsynced == 0); + + /* Every unsynced segment should be on the queue, because we have to + remember to sync it before we return to the mutator. */ + CHECKL(shield->limit + shield->queuePending >= shield->unsynced); + + /* The mutator is suspended if there are any holds. */ + CHECKL(shield->holds == 0 || shield->suspended); + + /* This is too expensive to check all the time since we have an + expanding shield queue that often has 16K elements instead of + 16. */ +#if defined(AVER_AND_CHECK_ALL) + { + Count unsynced = 0; + Index i; + for (i = 0; i < shield->limit; ++i) { + Seg seg = shield->queue[i]; + CHECKD(Seg, seg); + if (!SegIsSynced(seg)) + ++unsynced; + } + CHECKL(unsynced + shield->queuePending == shield->unsynced); + } +#endif + + return TRUE; +} + + +Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Shield $P {\n", (WriteFP)shield, + " ", shield->inside ? "inside" : "outside", " shield\n", + " suspended $S\n", WriteFYesNo(shield->suspended), + " depth $U\n", (WriteFU)shield->depth, + " next $U\n", (WriteFU)shield->next, + " length $U\n", (WriteFU)shield->length, + " unsynced $U\n", (WriteFU)shield->unsynced, + " holds $U\n", (WriteFU)shield->holds, + "} Shield $P\n", (WriteFP)shield, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* SHIELD_AVER -- transgressive argument checking + * + * .trans.check: A number of shield functions cannot do normal + * argument checking with AVERT because (for example) SegCheck checks + * the shield invariants, and it is these functions that are enforcing + * them. Instead, we AVER(TESTT(Seg, seg)) to check the type + * signature but not the contents. + */ + +#define SHIELD_AVERT(type, exp) AVER(TESTT(type, exp)) +#define SHIELD_AVERT_CRITICAL(type, exp) AVER_CRITICAL(TESTT(type, exp)) + + +/* SegIsSynced -- is a segment synced? + * + * . + */ + +static Bool SegIsSynced(Seg seg) +{ + SHIELD_AVERT_CRITICAL(Seg, seg); + return SegSM(seg) == SegPM(seg); +} + + +/* shieldSetSM -- set shield mode, maintaining sync count */ + +static void shieldSetSM(Shield shield, Seg seg, AccessSet mode) +{ + if (SegSM(seg) != mode) { + if (SegIsSynced(seg)) { + SegSetSM(seg, mode); + ++shield->unsynced; + } else { + SegSetSM(seg, mode); + if (SegIsSynced(seg)) { + AVER(shield->unsynced > 0); + --shield->unsynced; + } + } + } +} + + +/* shieldSetPM -- set protection mode, maintaining sync count */ + +static void shieldSetPM(Shield shield, Seg seg, AccessSet mode) +{ + if (SegPM(seg) != mode) { + if (SegIsSynced(seg)) { + SegSetPM(seg, mode); + ++shield->unsynced; + } else { + SegSetPM(seg, mode); + if (SegIsSynced(seg)) { + AVER(shield->unsynced > 0); + --shield->unsynced; + } + } + } +} + + +/* SegIsExposed -- is a segment exposed? + * + * . + */ + +static Bool SegIsExposed(Seg seg) +{ + SHIELD_AVERT_CRITICAL(Seg, seg); + return seg->depth > 0; +} + + +/* shieldSync -- synchronize a segment's protection + * + * . + */ + +static void shieldSync(Shield shield, Seg seg) +{ + SHIELD_AVERT_CRITICAL(Seg, seg); + + if (!SegIsSynced(seg)) { + shieldSetPM(shield, seg, SegSM(seg)); + ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); + } +} + + +/* shieldSuspend -- suspend the mutator + * + * Called from inside when any segment is not synced, in + * order to provide exclusive access to the segment by the MPS. See + * .inv.unsynced.suspended. + */ + +static void shieldSuspend(Arena arena) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + AVER(shield->inside); + + if (!shield->suspended) { + ThreadRingSuspend(ArenaThreadRing(arena), ArenaDeadRing(arena)); + shield->suspended = TRUE; + } +} + + +/* ShieldHold -- suspend mutator access to the unprotectable + * + * From outside , this is used when we really need to + * lock everything against the mutator -- for example, during flip + * when we must scan all thread registers at once. + */ + +void (ShieldHold)(Arena arena) +{ + AVERT(Arena, arena); + shieldSuspend(arena); + ++ArenaShield(arena)->holds; +} + + +/* ShieldRelease -- declare mutator could be resumed + * + * In practice, we don't resume the mutator until ShieldLeave, but + * this marks the earliest point at which we could resume. + */ + +void (ShieldRelease)(Arena arena) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + AVER(shield->inside); + AVER(shield->suspended); + + AVER(shield->holds > 0); + --shield->holds; + + /* It is only correct to actually resume the mutator here if + shield->depth is 0, shield->unsycned is 0, and the queue is + empty. */ + /* See for a discussion of when it + might be a good idea to resume the mutator early. */ +} + + +/* shieldProtLower -- reduce protection on a segment + * + * This ensures actual prot mode does not include mode. + */ + +static void shieldProtLower(Shield shield, Seg seg, AccessSet mode) +{ + /* */ + SHIELD_AVERT_CRITICAL(Seg, seg); + AVERT_CRITICAL(AccessSet, mode); + + if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY) { + shieldSetPM(shield, seg, BS_DIFF(SegPM(seg), mode)); + ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); + } +} + + +/* shieldDequeue -- remove a segment from the shield queue */ + +static Seg shieldDequeue(Shield shield, Index i) +{ + Seg seg; + AVER(i < shield->limit); + seg = shield->queue[i]; + AVERT(Seg, seg); + AVER(seg->queued); + shield->queue[i] = NULL; /* to ensure it can't get re-used */ + seg->queued = FALSE; + return seg; +} + + +/* shieldFlushEntry -- flush a single entry from the queue + * + * If the segment is exposed we can simply dequeue it, because later + * there will be a call to ShieldCover that will put it back on the + * queue. If the segment is not exposed, we can sync its protection. + * (And if it does not have the shield raised any more, that will do + * nothing.) + */ + +static void shieldFlushEntry(Shield shield, Index i) +{ + Seg seg = shieldDequeue(shield, i); + + if (!SegIsExposed(seg)) + shieldSync(shield, seg); +} + + +/* shieldQueueReset -- reset shield queue pointers */ + +static void shieldQueueReset(Shield shield) +{ + AVER(shield->depth == 0); /* overkill: implies no segs are queued */ + AVER(shield->unsynced == 0); + shield->next = 0; + shield->limit = 0; +} + + +/* shieldQueueEntryCompare -- comparison for queue sorting */ + +static Compare shieldAddrCompare(Addr left, Addr right) +{ + if (left < right) + return CompareLESS; + else if (left == right) + return CompareEQUAL; + else + return CompareGREATER; +} + +static Compare shieldQueueEntryCompare(void *left, void *right, void *closure) +{ + Seg segA = left, segB = right; + + /* These checks are not critical in a hot build, but slow down cool + builds quite a bit, so just check the signatures. */ + AVER(TESTT(Seg, segA)); + AVER(TESTT(Seg, segB)); + UNUSED(closure); + + return shieldAddrCompare(SegBase(segA), SegBase(segB)); +} + + +/* shieldFlushEntries -- flush queue coalescing protects + * + * Sort the shield queue into address order, then iterate over it + * coalescing protection work, in order to reduce the number of system + * calls to a minimum. This is very important on macOS, where + * protection calls are extremely inefficient, but has no net gain on + * Windows. + * + * TODO: Could we keep extending the outstanding area over memory + * that's *not* in the queue but has the same protection mode? Might + * require . + */ + +static void shieldFlushEntries(Shield shield) +{ + Addr base = NULL, limit; + AccessSet mode; + Index i; + + if (shield->length == 0) { + AVER(shield->queue == NULL); + return; + } + + QuickSort((void *)shield->queue, shield->limit, + shieldQueueEntryCompare, UNUSED_POINTER, + &shield->sortStruct); + + mode = AccessSetEMPTY; + limit = NULL; + for (i = 0; i < shield->limit; ++i) { + Seg seg = shieldDequeue(shield, i); + if (!SegIsSynced(seg)) { + shieldSetPM(shield, seg, SegSM(seg)); + if (SegSM(seg) != mode || SegBase(seg) != limit) { + if (base != NULL) { + AVER(base < limit); + ProtSet(base, limit, mode); + } + base = SegBase(seg); + mode = SegSM(seg); + } + limit = SegLimit(seg); + } + } + if (base != NULL) { + AVER(base < limit); + ProtSet(base, limit, mode); + } + + shieldQueueReset(shield); +} + + +/* shieldQueue -- consider adding a segment to the queue + * + * If the segment is out of sync, either sync it, or ensure it is + * queued and the mutator is suspended. + */ + +static void shieldQueue(Arena arena, Seg seg) +{ + Shield shield; + + /* */ + AVERT_CRITICAL(Arena, arena); + shield = ArenaShield(arena); + SHIELD_AVERT_CRITICAL(Seg, seg); + + if (SegIsSynced(seg) || seg->queued) + return; + + if (SegIsExposed(seg)) { + /* This can occur if the mutator isn't suspended, we expose a + segment, then raise the shield on it. In this case, the + mutator isn't allowed to see the segment, but we don't need to + queue it until its covered. */ + shieldSuspend(arena); + return; + } + + /* Allocate or extend the shield queue if necessary. */ + if (shield->next >= shield->length) { + void *p; + Res res; + Count length; + + AVER(shield->next == shield->length); + + if (shield->length == 0) + length = ShieldQueueLENGTH; + else + length = shield->length * 2; + + res = ControlAlloc(&p, arena, length * sizeof shield->queue[0]); + if (res != ResOK) { + AVER(ResIsAllocFailure(res)); + /* Carry on with the existing queue. */ + } else { + if (shield->length > 0) { + Size oldSize = shield->length * sizeof shield->queue[0]; + AVER(shield->queue != NULL); + mps_lib_memcpy(p, shield->queue, oldSize); + ControlFree(arena, shield->queue, oldSize); + } + shield->queue = p; + shield->length = length; + } + } + + /* Queue unavailable, so synchronize now. Or if the mutator is not + yet suspended and the code raises the shield on a covered + segment, protect it now, because that's probably better than + suspending the mutator. */ + if (shield->length == 0 || !shield->suspended) { + shieldSync(shield, seg); + return; + } + + AVER_CRITICAL(shield->limit <= shield->length); + AVER_CRITICAL(shield->next <= shield->limit); + + /* If we failed to extend the shield queue array, degrade to an LRU + circular buffer. */ + if (shield->next >= shield->length) + shield->next = 0; + AVER_CRITICAL(shield->next < shield->length); + + AVER_CRITICAL(shield->length > 0); + + /* If the limit is less than the length, then the queue array has + yet to be filled, and next is an uninitialized entry. + Otherwise it's the tail end from last time around, and needs to + be flushed. */ + if (shield->limit >= shield->length) { + AVER_CRITICAL(shield->limit == shield->length); + shieldFlushEntry(shield, shield->next); + } + + shield->queue[shield->next] = seg; + ++shield->next; + seg->queued = TRUE; + + if (shield->next >= shield->limit) + shield->limit = shield->next; +} + + +/* ShieldRaise -- declare segment should be protected from mutator + * + * Does not immediately protect the segment, unless the segment is + * covered and the shield queue is unavailable. + */ + +void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode) +{ + Shield shield; + + SHIELD_AVERT(Arena, arena); + SHIELD_AVERT(Seg, seg); + AVERT(AccessSet, mode); + shield = ArenaShield(arena); + AVER(!shield->queuePending); + shield->queuePending = TRUE; + + /* preserved */ + shieldSetSM(ArenaShield(arena), seg, BS_UNION(SegSM(seg), mode)); + + /* Ensure and + */ + shieldQueue(arena, seg); + shield->queuePending = FALSE; + + /* Check queue and segment consistency. */ + AVERT(Arena, arena); + AVERT(Seg, seg); +} + + +/* ShieldLower -- declare segment may be accessed by mutator */ + +void (ShieldLower)(Arena arena, Seg seg, AccessSet mode) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + SHIELD_AVERT(Seg, seg); + AVERT(AccessSet, mode); + + /* SegIsSynced(seg) is not changed by the following preserving + and + . */ + shieldSetSM(shield, seg, BS_DIFF(SegSM(seg), mode)); + /* TODO: Do we need to promptly call shieldProtLower here? It + loses the opportunity to coalesce the protection call. It would + violate . */ + /* shieldQueue(arena, seg); */ + shieldProtLower(shield, seg, mode); + + /* Check queue and segment consistency. */ + AVERT(Arena, arena); + AVERT(Seg, seg); +} + + +/* ShieldEnter -- enter the shield, allowing exposes */ + +void (ShieldEnter)(Arena arena) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + AVER(!shield->inside); + AVER(shield->depth == 0); + AVER(!shield->suspended); + + shieldQueueReset(shield); + + shield->inside = TRUE; +} + + +/* shieldDebugCheck -- expensive consistency check + * + * While developing the shield it is very easy to make a consistency + * mistake that causes random corruption of the heap, usually because + * all the attempts to avoid protection and suspension end up failing + * to enforce . In these cases, + * try enabling SHIELD_DEBUG and extending this code as necessary. + * + * The basic idea is to iterate over *all* segments and check + * consistency with the arena and shield queue. + */ + +#if defined(SHIELD_DEBUG) +static void shieldDebugCheck(Arena arena) +{ + Shield shield; + Seg seg; + Count queued = 0; + Count depth = 0; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + AVER(shield->inside || shield->limit == 0); + + if (SegFirst(&seg, arena)) + do { + depth += SegDepth(seg); + if (shield->limit == 0) { + AVER(!seg->queued); + AVER(SegIsSynced(seg)); + /* You can directly set protections here to see if it makes a + difference. */ + /* ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); */ + } else { + if (seg->queued) + ++queued; + } + } while(SegNext(&seg, arena, seg)); + + AVER(depth == shield->depth); + AVER(queued == shield->limit); +} +#endif + + +/* ShieldFlush -- empty the shield queue + * + * .shield.flush: Flush empties the shield queue. This needs to be + * called before queued segments are destroyed, to remove them from + * the queue. We flush the whole queue because finding the entry is + * O(n) and we're very likely reclaiming and destroying loads of + * segments. See also . + * + * The memory for the segment may become spare, and not released back + * to the operating system. Since we keep track of protection on + * segments and not grains we have no way of keeping track of the + * protection state of spare grains. We therefore flush the protection + * to get it back into the default state (unprotected). See also + * . + */ + +void (ShieldFlush)(Arena arena) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); +#ifdef SHIELD_DEBUG + shieldDebugCheck(arena); +#endif + shieldFlushEntries(shield); + AVER(shield->unsynced == 0); /* everything back in sync */ +#ifdef SHIELD_DEBUG + shieldDebugCheck(arena); +#endif +} + + +/* ShieldLeave -- leave the shield, protect segs from mutator */ + +void (ShieldLeave)(Arena arena) +{ + Shield shield; + + AVERT(Arena, arena); + shield = ArenaShield(arena); + AVER(shield->inside); + AVER(shield->depth == 0); /* no pending covers */ + AVER(shield->holds == 0); + + ShieldFlush(arena); + + AVER(shield->unsynced == 0); /* everything back in sync */ + + /* Ensuring the mutator is running at this point guarantees + .inv.outside.running */ + if (shield->suspended) { + ThreadRingResume(ArenaThreadRing(arena), ArenaDeadRing(arena)); + shield->suspended = FALSE; + } + + shield->inside = FALSE; +} + + + +/* ShieldExpose -- allow the MPS access to a segment while denying the mutator + * + * The first expose of a shielded segment suspends the mutator to + * ensure the MPS has exclusive access. + */ + +void (ShieldExpose)(Arena arena, Seg seg) +{ + Shield shield; + AccessSet mode = AccessREAD | AccessWRITE; + + /* */ + AVERT_CRITICAL(Arena, arena); + shield = ArenaShield(arena); + AVER_CRITICAL(shield->inside); + + SegSetDepth(seg, SegDepth(seg) + 1); + AVER_CRITICAL(SegDepth(seg) > 0); /* overflow */ + ++shield->depth; + AVER_CRITICAL(shield->depth > 0); /* overflow */ + + if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY) + shieldSuspend(arena); + + /* Ensure . */ + /* TODO: Mass exposure -- see + . */ + shieldProtLower(shield, seg, mode); +} + + +/* ShieldCover -- declare MPS no longer needs access to seg */ + +void (ShieldCover)(Arena arena, Seg seg) +{ + Shield shield; + + /* */ + AVERT_CRITICAL(Arena, arena); + shield = ArenaShield(arena); + AVERT_CRITICAL(Seg, seg); + AVER_CRITICAL(SegPM(seg) == AccessSetEMPTY); + + AVER_CRITICAL(SegDepth(seg) > 0); + SegSetDepth(seg, SegDepth(seg) - 1); + AVER_CRITICAL(shield->depth > 0); + --shield->depth; + + /* Ensure . */ + shieldQueue(arena, seg); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sncss.c b/mps/code/sncss.c new file mode 100644 index 00000000000..91e1cbbd8a8 --- /dev/null +++ b/mps/code/sncss.c @@ -0,0 +1,243 @@ +/* sncss.c: SNC STRESS TEST + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "mpscmvt.h" +#include "mpscmvff.h" +#include "mpscsnc.h" +#include "mpsavm.h" +#include "mps.h" +#include "testlib.h" + +#include /* printf */ + + +/* Simple format for the SNC pool. */ + +typedef struct obj_s { + size_t size; + int pad; +} obj_s, *obj_t; + +/* make -- allocate one object, and if it's big enough, store the size + * in the first word, for the benefit of the object format */ + +static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) +{ + mps_addr_t addr; + mps_res_t res; + + do { + obj_t obj; + res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) + return res; + obj = addr; + obj->size = size; + obj->pad = 0; + } while (!mps_commit(ap, addr, size)); + + *p = addr; + return MPS_RES_OK; +} + +static mps_res_t fmtScan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) +{ + testlib_unused(ss); + testlib_unused(base); + testlib_unused(limit); + return MPS_RES_OK; +} + +static mps_addr_t fmtSkip(mps_addr_t addr) +{ + obj_t obj = addr; + return (char *)addr + obj->size; +} + +static void fmtPad(mps_addr_t addr, size_t size) +{ + obj_t obj = addr; + obj->size = size; + obj->pad = 1; +} + +typedef struct env_s { + size_t obj; + size_t pad; +} env_s, *env_t; + +static void fmtVisitor(mps_addr_t object, mps_fmt_t format, + mps_pool_t pool, void *p, size_t s) +{ + env_t env = p; + obj_t obj = object; + testlib_unused(format); + testlib_unused(pool); + testlib_unused(s); + if (obj->pad) + env->pad += obj->size; + else + env->obj += obj->size; +} + + +/* area_scan -- area scanning function for mps_pool_walk */ + +static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + env_t env = closure; + testlib_unused(ss); + while (base < limit) { + mps_addr_t prev = base; + obj_t obj = base; + if (obj->pad) + env->pad += obj->size; + else + env->obj += obj->size; + base = fmtSkip(base); + Insist(prev < base); + } + Insist(base == limit); + return MPS_RES_OK; +} + + +#define AP_MAX 3 /* Number of allocation points */ +#define DEPTH_MAX 20 /* Maximum depth of frame push */ + +typedef struct ap_s { + mps_ap_t ap; /* An allocation point on an ANC pool */ + size_t depth; /* Number of frames pushed */ + size_t alloc[DEPTH_MAX + 1]; /* Total allocation at each depth */ + size_t push[DEPTH_MAX]; /* Total allocation when we pushed */ + mps_frame_t frame[DEPTH_MAX]; /* The frame pointers at each depth */ +} ap_s, *ap_t; + +static void test(mps_pool_class_t pool_class) +{ + size_t i, j; + mps_align_t align; + mps_arena_t arena; + mps_fmt_t fmt; + mps_pool_t pool; + ap_s aps[AP_MAX]; + + align = sizeof(obj_s) << (rnd() % 4); + + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, fmtScan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, fmtSkip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, fmtPad); + die(mps_fmt_create_k(&fmt, arena, args), "fmt_create"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, fmt); + die(mps_pool_create_k(&pool, arena, pool_class, args), "pool_create"); + } MPS_ARGS_END(args); + + for (i = 0; i < NELEMS(aps); ++i) { + ap_t a = &aps[i]; + die(mps_ap_create_k(&a->ap, pool, mps_args_none), "ap_create"); + a->depth = 0; + a->alloc[0] = 0; + } + + for (i = 0; i < 1000000; ++i) { + size_t k = rnd() % NELEMS(aps); + ap_t a = &aps[k]; + if (rnd() % 10 == 0) { + j = rnd() % NELEMS(a->frame); + if (j < a->depth) { + a->depth = j; + mps_ap_frame_pop(a->ap, a->frame[j]); + a->alloc[j] = a->push[j]; + } else { + a->push[a->depth] = a->alloc[a->depth]; + mps_ap_frame_push(&a->frame[a->depth], a->ap); + ++ a->depth; + a->alloc[a->depth] = 0; + } + } else { + size_t size = alignUp(1 + rnd() % 128, align); + mps_addr_t p; + make(&p, a->ap, size); + a->alloc[a->depth] += size; + } + } + + mps_arena_park(arena); + { + env_s env1 = {0, 0}, env2 = {0, 0}; + size_t alloc = 0; + + for (i = 0; i < NELEMS(aps); ++i) { + ap_t a = &aps[i]; + for (j = 0; j <= a->depth; ++j) { + alloc += a->alloc[j]; + } + } + + mps_arena_formatted_objects_walk(arena, fmtVisitor, &env1, 0); + Insist(alloc == env1.obj); + + die(mps_pool_walk(pool, area_scan, &env2), "mps_pool_walk"); + Insist(alloc == env2.obj); + Insist(env1.pad == env2.pad); + } + + for (i = 0; i < NELEMS(aps); ++i) { + mps_ap_destroy(aps[i].ap); + } + mps_pool_destroy(pool); + mps_fmt_destroy(fmt); + mps_arena_destroy(arena); +} + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + test(mps_class_snc()); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/sp.h b/mps/code/sp.h new file mode 100644 index 00000000000..2762599bf00 --- /dev/null +++ b/mps/code/sp.h @@ -0,0 +1,57 @@ +/* sp.h: STACK PROBE INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef sp_h +#define sp_h + +#include "mpmtypes.h" + + +/* StackProbe -- probe above the stack to provoke early stack overflow + * + * This function should check that the stack has at least depth words + * available, and if not, then provoke a stack overflow exception or + * protection fault. The purpose is to ensure that the exception is + * generated before taking the arena lock where it can be handled + * safely, rather than at some later point where the arena lock is + * held and so handling the exception may cause the MPS to be entered + * recursively. + */ + +extern void StackProbe(Size depth); + + +#endif /* sp_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/span.c b/mps/code/span.c new file mode 100644 index 00000000000..6c712d332ed --- /dev/null +++ b/mps/code/span.c @@ -0,0 +1,55 @@ +/* span.c: ANSI STACK PROBE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * PURPOSE + * + * .purpose: The purpose of the ANSI Stack Probe is to provide a + * non-functional implementation of the StackProbe interface. + * StackProbe has a function implementation on platforms where the + * MPS takes some special action to avoid stack overflow. + */ + +#include "mpm.h" + +SRCID(span, "$Id$"); + + +/* StackProbe -- probe above the stack to provoke early stack overflow */ + +void StackProbe(Size depth) +{ + AVER(depth == 0); + NOOP; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/splay.c b/mps/code/splay.c new file mode 100644 index 00000000000..2c30583005a --- /dev/null +++ b/mps/code/splay.c @@ -0,0 +1,1442 @@ +/* splay.c: SPLAY TREE IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Splay trees are used to manage potentially unbounded + * collections of ordered things. In the MPS these are usually + * address-ordered memory blocks. + * + * .source: + * + * .note.stack: It's important that the MPS have a bounded stack size, + * and this is a problem for tree algorithms. Basically, we have to + * avoid recursion. . + * + * .critical: In manual-allocation-bound programs using MVFF, many of + * these functions are on the critical paths via mps_alloc (and then + * PoolAlloc, MVFFAlloc, failoverFind*, cbsFind*, SplayTreeFind*) and + * mps_free (and then MVFFFree, failoverInsert, cbsInsert, + * SplayTreeInsert). + */ + + +#include "splay.h" +#include "mpm.h" + +SRCID(splay, "$Id$"); + + +/* SPLAY_DEBUG -- switch for extra debugging + * + * Define SPLAY_DEBUG to enable extra consistency checking when modifying + * splay tree algorithms, which can be tricky to get right. This will + * check the tree size and ordering frequently. + */ + +/* #define SPLAY_DEBUG */ + +#define SplayTreeSetRoot(splay, tree) BEGIN ((splay)->root = (tree)); END +#define SplayCompare(tree, key, node) (((tree)->compare)(node, key)) +#define SplayHasUpdate(splay) ((splay)->updateNode != SplayTrivUpdate) + + +/* SplayTreeCheck -- check consistency of SplayTree + * + * See guide.impl.c.adt.check and . + */ + +Bool SplayTreeCheck(SplayTree splay) +{ + UNUSED(splay); + CHECKS(SplayTree, splay); + CHECKL(FUNCHECK(splay->compare)); + CHECKL(FUNCHECK(splay->nodeKey)); + CHECKL(FUNCHECK(splay->updateNode)); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(TreeCheck(splay->root)); + return TRUE; +} + + +/* SplayTreeInit -- initialise a splay tree + * + * ``compare`` must provide a total ordering on node keys. + * + * ``nodeKey`` extracts a key from a tree node for passing to ``compare``. + * + * ``updateNode`` will be applied to nodes from bottom to top when the + * tree is restructured in order to maintain client properties + * . If SplayTrivUpdate is be passed, faster + * algorithms are chosen for splaying. Compare SplaySplitDown with + * SplaySplitRev. + */ + +void SplayTreeInit(SplayTree splay, + TreeCompareFunction compare, + TreeKeyFunction nodeKey, + SplayUpdateNodeFunction updateNode) +{ + AVER(splay != NULL); + AVER(FUNCHECK(compare)); + AVER(FUNCHECK(nodeKey)); + AVER(FUNCHECK(updateNode)); + + splay->compare = compare; + splay->nodeKey = nodeKey; + splay->updateNode = updateNode; + SplayTreeSetRoot(splay, TreeEMPTY); + splay->sig = SplayTreeSig; + + AVERT(SplayTree, splay); +} + + +/* SplayTreeFinish -- finish a splay tree + * + * Does not attempt to descend or finish any tree nodes. + * + * TODO: Should probably fail on non-empty tree, so that client code is + * forced to decide what to do about that. + */ + +void SplayTreeFinish(SplayTree splay) +{ + AVERT(SplayTree, splay); + splay->sig = SigInvalid; + SplayTreeSetRoot(splay, TreeEMPTY); + splay->compare = NULL; + splay->nodeKey = NULL; + splay->updateNode = NULL; +} + + +/* SplayTrivUpdate -- trivial update method + * + * This is passed to SplayTreeInit to indicate that no client property + * maintenance is required. It can also be called to do nothing. + */ + +void SplayTrivUpdate(SplayTree splay, Tree tree) +{ + AVERT(SplayTree, splay); + AVERT(Tree, tree); +} + + +/* compareLess, compareGreater -- trivial comparisons + * + * These comparisons can be passed to SplaySplay to find the leftmost + * or rightmost nodes in a tree quickly. + * + * NOTE: It's also possible to make specialised versions of SplaySplit + * that traverse left and right unconditionally. These weren't found + * to have a significant performance advantage when benchmarking. + * RB 2014-02-23 + */ + +static Compare compareLess(Tree tree, TreeKey key) +{ + UNUSED(tree); + UNUSED(key); + return CompareLESS; +} + +static Compare compareGreater(Tree tree, TreeKey key) +{ + UNUSED(tree); + UNUSED(key); + return CompareGREATER; +} + + +/* SplayDebugUpdate -- force update of client property + * + * A debugging utility to recursively update the client property of + * a subtree. May not be used in production MPS because it has + * indefinite stack usage. See .note.stack. + */ + +void SplayDebugUpdate(SplayTree splay, Tree tree) +{ + AVERT(SplayTree, splay); + AVERT(Tree, tree); + if (tree == TreeEMPTY) + return; + SplayDebugUpdate(splay, TreeLeft(tree)); + SplayDebugUpdate(splay, TreeRight(tree)); + splay->updateNode(splay, tree); +} + + +/* SplayDebugCount -- count and check order of tree + * + * This function may be called from a debugger or temporarily inserted + * during development to check a tree's integrity. It may not be called + * from the production MPS because it uses indefinite stack depth. + * See . + */ + +Count SplayDebugCount(SplayTree splay) +{ + AVERT(SplayTree, splay); + return TreeDebugCount(SplayTreeRoot(splay), splay->compare, splay->nodeKey); +} + + +/* SplayZig -- move to left child, prepending to right tree + * + * Link the top node of the middle tree into the left child of the + * right tree, then step to the left child. Returns new middle. + * + * . + * + * middle rightNext middle + * B E A E + * / \ / \ => / \ + * A C D F rightNext D F + * rightFirst / + * rightFirst B + * \ + * C + */ + +static Tree SplayZig(Tree middle, Tree *rightFirstIO, Tree *rightNextReturn) +{ + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(rightFirstIO != NULL); + AVERT_CRITICAL(Tree, *rightFirstIO); + TreeSetLeft(*rightFirstIO, middle); + *rightNextReturn = *rightFirstIO; + *rightFirstIO = middle; + return TreeLeft(middle); +} + +/* SplayZigZig -- move to left child, rotating on on to right tree + * + * Rotate the top node of the middle tree over the left child of the + * right tree, then step to the left child, completing a splay "zig zig" + * after an initial SplayZig. Returns new middle. + * + * middle rightNext middle rightNext + * B E A E + * / \ / \ => / \ + * A C D F rightFirst B F + * rightFirst \ + * D + * / + * C + */ + +static Tree SplayZigZig(Tree middle, Tree *rightFirstIO, Tree rightNext) +{ + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(rightFirstIO != NULL); + AVERT_CRITICAL(Tree, *rightFirstIO); + TreeSetLeft(*rightFirstIO, TreeRight(middle)); + TreeSetRight(middle, *rightFirstIO); + TreeSetLeft(rightNext, middle); + *rightFirstIO = middle; + return TreeLeft(middle); +} + +/* SplayZag -- mirror image of SplayZig */ + +static Tree SplayZag(Tree middle, Tree *leftLastIO, Tree *leftPrevReturn) +{ + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(leftLastIO != NULL); + AVERT_CRITICAL(Tree, *leftLastIO); + TreeSetRight(*leftLastIO, middle); + *leftPrevReturn = *leftLastIO; + *leftLastIO = middle; + return TreeRight(middle); +} + +/* SplayZagZag -- mirror image of SplayZigZig */ + +static Tree SplayZagZag(Tree middle, Tree *leftLastIO, Tree leftPrev) +{ + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(leftLastIO != NULL); + AVERT_CRITICAL(Tree, *leftLastIO); + TreeSetRight(*leftLastIO, TreeLeft(middle)); + TreeSetLeft(middle, *leftLastIO); + TreeSetRight(leftPrev, middle); + *leftLastIO = middle; + return TreeRight(middle); +} + + +/* SplayState -- the state of splaying between "split" and "assemble" + * + * Splaying is divided into two phases: splitting the tree into three, + * and then assembling a final tree. This allows for optimisation of + * certain operations, the key one being SplayTreeNeighbours, which is + * critical for coalescing memory blocks (see CBSInsert). + * + * Note that SplaySplitDown and SplaySplitRev use the trees slightly + * differently. SplaySplitRev does not provide "left" and "right", and + * "leftLast" and "rightFirst" are pointer-reversed spines. + */ + +typedef struct SplayStateStruct { + Tree middle; /* always non-empty, has the found node at the root */ + Tree left; /* nodes less than search key during split */ + Tree leftLast; /* rightmost node on right spine of "left" */ + Tree right; /* nodes greater than search key during split */ + Tree rightFirst; /* leftmost node on left spine of "right" */ +} SplayStateStruct, *SplayState; + + +/* SplaySplitDown -- divide the tree around a key + * + * Split a tree into three according to a key and a comparison, + * splaying nested left and right nodes. Preserves tree ordering. + * This is a top-down splay procedure, and does not use any recursion + * or require any parent pointers . + * + * Returns cmp, the relationship of the root of the middle tree to the key, + * and a SplayState. + * + * Does *not* call update to maintain client properties. See SplaySplitRev. + */ + +static Compare SplaySplitDown(SplayStateStruct *stateReturn, + SplayTree splay, TreeKey key, + TreeCompareFunction compare) +{ + TreeStruct sentinel; + Tree middle, leftLast, rightFirst, leftPrev, rightNext; + Compare cmp; + + AVERT(SplayTree, splay); + AVER(FUNCHECK(compare)); + AVER(!SplayTreeIsEmpty(splay)); + AVER(!SplayHasUpdate(splay)); + + TreeInit(&sentinel); + leftLast = &sentinel; + rightFirst = &sentinel; + middle = SplayTreeRoot(splay); + for (;;) { + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZig(middle, &rightFirst, &rightNext); + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZigZig(middle, &rightFirst, rightNext); + break; + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZag(middle, &leftLast, &leftPrev); + break; + } + break; + + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZag(middle, &leftLast, &leftPrev); + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZagZag(middle, &leftLast, leftPrev); + break; + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZig(middle, &rightFirst, &rightNext); + break; + } + break; + } + } + +stop: + stateReturn->middle = middle; + stateReturn->left = TreeRight(&sentinel); + stateReturn->leftLast = leftLast == &sentinel ? TreeEMPTY : leftLast; + stateReturn->right = TreeLeft(&sentinel); + stateReturn->rightFirst = rightFirst == &sentinel ? TreeEMPTY : rightFirst; + return cmp; +} + + +/* SplayAssembleDown -- assemble left right and middle trees into one + * + * Takes the result of a SplaySplit and forms a single tree with the + * root of the middle tree as the root. + * + * left middle right middle + * B P V P + * / \ / \ / \ => / \ + * A C N Q U X B V + * leftLast rightFirst / \ / \ + * A C U X + * \ / + * N Q + * + * The children of the middle tree are grafted onto the last and first + * nodes of the side trees, which become the children of the root. + * + * Does *not* maintain client properties. See SplayAssembleRev. + * + * . + */ + +static void SplayAssembleDown(SplayTree splay, SplayState state) +{ + AVERT(SplayTree, splay); + AVER(state->middle != TreeEMPTY); + AVER(!SplayHasUpdate(splay)); + + if (state->left != TreeEMPTY) { + AVER_CRITICAL(state->leftLast != TreeEMPTY); + TreeSetRight(state->leftLast, TreeLeft(state->middle)); + TreeSetLeft(state->middle, state->left); + } + + if (state->right != TreeEMPTY) { + AVER_CRITICAL(state->rightFirst != TreeEMPTY); + TreeSetLeft(state->rightFirst, TreeRight(state->middle)); + TreeSetRight(state->middle, state->right); + } +} + + +/* SplayZigRev -- move to left child, prepending to reversed right tree + * + * Same as SplayZig, except that the left spine of the right tree is + * pointer-reversed, so that its left children point at their parents + * instead of their children. This is fixed up in SplayAssembleRev. + */ + +static Tree SplayZigRev(Tree middle, Tree *rightFirstIO) +{ + Tree child; + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(rightFirstIO != NULL); + AVERT_CRITICAL(Tree, *rightFirstIO); + child = TreeLeft(middle); + TreeSetLeft(middle, *rightFirstIO); + *rightFirstIO = middle; + return child; +} + +/* SplayZigZigRev -- move to left child, rotating onto reversed right tree + * + * Same as SplayZigZig, except that the right tree is pointer reversed + * (see SplayZigRev) + */ + +static Tree SplayZigZigRev(Tree middle, Tree *rightFirstIO) +{ + Tree child; + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(rightFirstIO != NULL); + AVERT_CRITICAL(Tree, *rightFirstIO); + child = TreeLeft(middle); + TreeSetLeft(middle, TreeLeft(*rightFirstIO)); + TreeSetLeft(*rightFirstIO, TreeRight(middle)); + TreeSetRight(middle, *rightFirstIO); + *rightFirstIO = middle; + return child; +} + +/* SplayZagRev -- mirror image of SplayZigRev */ + +static Tree SplayZagRev(Tree middle, Tree *leftLastIO) +{ + Tree child; + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(leftLastIO != NULL); + AVERT_CRITICAL(Tree, *leftLastIO); + child = TreeRight(middle); + TreeSetRight(middle, *leftLastIO); + *leftLastIO = middle; + return child; +} + +/* SplayZagZagRev -- mirror image of SplayZigZigRev */ + +static Tree SplayZagZagRev(Tree middle, Tree *leftLastIO) +{ + Tree child; + AVERT_CRITICAL(Tree, middle); + AVER_CRITICAL(leftLastIO != NULL); + AVERT_CRITICAL(Tree, *leftLastIO); + child = TreeRight(middle); + TreeSetRight(middle, TreeRight(*leftLastIO)); + TreeSetRight(*leftLastIO, TreeLeft(middle)); + TreeSetLeft(middle, *leftLastIO); + *leftLastIO = middle; + return child; +} + + +/* SplaySplitRev -- divide the tree around a key + * + * This is the same as SplaySplit, except that: + * - the left and right trees are pointer reversed on their spines + * - client properties for rotated nodes (not on the spines) are + * updated + */ + +static Compare SplaySplitRev(SplayStateStruct *stateReturn, + SplayTree splay, TreeKey key, + TreeCompareFunction compare) +{ + SplayUpdateNodeFunction updateNode; + Tree middle, leftLast, rightFirst; + Compare cmp; + + AVERT_CRITICAL(SplayTree, splay); + AVER_CRITICAL(FUNCHECK(compare)); + AVER_CRITICAL(!SplayTreeIsEmpty(splay)); + + updateNode = splay->updateNode; + leftLast = TreeEMPTY; + rightFirst = TreeEMPTY; + middle = SplayTreeRoot(splay); + for (;;) { + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZigRev(middle, &rightFirst); + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZigZigRev(middle, &rightFirst); + updateNode(splay, TreeRight(rightFirst)); + break; + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZagRev(middle, &leftLast); + break; + } + break; + + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZagRev(middle, &leftLast); + cmp = compare(middle, key); + switch(cmp) { + default: + NOTREACHED; + /* defensive fall-through */ + case CompareEQUAL: + goto stop; + case CompareGREATER: + if (!TreeHasRight(middle)) + goto stop; + middle = SplayZagZagRev(middle, &leftLast); + updateNode(splay, TreeLeft(leftLast)); + break; + case CompareLESS: + if (!TreeHasLeft(middle)) + goto stop; + middle = SplayZigRev(middle, &rightFirst); + break; + } + break; + } + } + +stop: + stateReturn->middle = middle; + stateReturn->leftLast = leftLast; + stateReturn->rightFirst = rightFirst; + return cmp; +} + + +/* SplayUpdateLeftSpine -- undo pointer reversal, updating client property */ + +static Tree SplayUpdateLeftSpine(SplayTree splay, Tree node, Tree child) +{ + SplayUpdateNodeFunction updateNode; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, node); + AVERT_CRITICAL(Tree, child); + + updateNode = splay->updateNode; + while(node != TreeEMPTY) { + Tree parent = TreeLeft(node); + TreeSetLeft(node, child); /* un-reverse pointer */ + updateNode(splay, node); + child = node; + node = parent; + } + return child; +} + +/* SplayUpdateRightSpine -- mirror of SplayUpdateLeftSpine */ + +static Tree SplayUpdateRightSpine(SplayTree splay, Tree node, Tree child) +{ + SplayUpdateNodeFunction updateNode; + + AVERT_CRITICAL(SplayTree, splay); + AVERT_CRITICAL(Tree, node); + AVERT_CRITICAL(Tree, child); + + updateNode = splay->updateNode; + while (node != TreeEMPTY) { + Tree parent = TreeRight(node); + TreeSetRight(node, child); /* un-reverse pointer */ + updateNode(splay, node); + child = node; + node = parent; + } + return child; +} + + +/* SplayAssembleRev -- pointer reversed SplayAssemble + * + * Does the same job as SplayAssemble, but operates on pointer-reversed + * left and right trees, updating client properties. When we reach + * this function, the nodes on the spines of the left and right trees + * will have out-of-date client properties because their children have + * been changed by SplaySplitRev. + */ + +static void SplayAssembleRev(SplayTree splay, SplayState state) +{ + Tree left, right; + + AVERT_CRITICAL(SplayTree, splay); + AVER_CRITICAL(state->middle != TreeEMPTY); + + left = TreeLeft(state->middle); + left = SplayUpdateRightSpine(splay, state->leftLast, left); + TreeSetLeft(state->middle, left); + + right = TreeRight(state->middle); + right = SplayUpdateLeftSpine(splay, state->rightFirst, right); + TreeSetRight(state->middle, right); + + splay->updateNode(splay, state->middle); +} + + +/* SplaySplit -- call SplaySplitDown or SplaySplitRev as appropriate */ + +static Compare SplaySplit(SplayStateStruct *stateReturn, + SplayTree splay, TreeKey key, + TreeCompareFunction compare) +{ + if (SplayHasUpdate(splay)) + return SplaySplitRev(stateReturn, splay, key, compare); + else + return SplaySplitDown(stateReturn, splay, key, compare); +} + + +/* SplayAssemble -- call SplayAssembleDown or SplayAssembleRev as appropriate */ + +static void SplayAssemble(SplayTree splay, SplayState state) +{ + if (SplayHasUpdate(splay)) + SplayAssembleRev(splay, state); + else + SplayAssembleDown(splay, state); +} + + +/* SplaySplay -- splay the tree around a given key + * + * Uses SplaySplitRev/SplayAssembleRev or SplaySplitDown/SplayAssembleDown + * as appropriate, but also catches the empty tree case and shortcuts + * the common case where the wanted node is already at the root (due + * to a previous splay). The latter shortcut has a significant effect + * on run time. + * + * If a matching node is found, it is splayed to the root and the function + * returns CompareEQUAL, or if the tree is empty, will also return + * CompareEQUAL. Otherwise, CompareGREATER or CompareLESS is returned + * meaning either the key is greater or less than the new root. In this + * case the new root is the last node visited which is either the closest + * node left or the closest node right of the key. + * + * . + */ + +static Compare SplaySplay(SplayTree splay, TreeKey key, + TreeCompareFunction compare) +{ + Compare cmp; + SplayStateStruct stateStruct; + +#ifdef SPLAY_DEBUG + Count count = SplayDebugCount(splay); +#endif + + /* Short-circuit common cases. Splay trees often bring recently + accessed nodes to the root. */ + if (SplayTreeIsEmpty(splay) || + compare(SplayTreeRoot(splay), key) == CompareEQUAL) + return CompareEQUAL; + + if (SplayHasUpdate(splay)) { + cmp = SplaySplitRev(&stateStruct, splay, key, compare); + SplayAssembleRev(splay, &stateStruct); + } else { + cmp = SplaySplitDown(&stateStruct, splay, key, compare); + SplayAssembleDown(splay, &stateStruct); + } + + SplayTreeSetRoot(splay, stateStruct.middle); + +#ifdef SPLAY_DEBUG + AVER(count == SplayDebugCount(splay)); +#endif + + return cmp; +} + + +/* SplayTreeInsert -- insert a node into a splay tree + * + * This function is used to insert a node into the tree. Splays the + * tree at the node's key. If an attempt is made to insert a node that + * compares ``CompareEQUAL`` to an existing node in the tree, then + * ``FALSE`` will be returned and the node will not be inserted. + * + * NOTE: It would be possible to use split here, then assemble around + * the new node, leaving the neighbour where it was, but it's probably + * a good thing for key neighbours to be tree neighbours. + */ + +Bool SplayTreeInsert(SplayTree splay, Tree node) +{ + Tree neighbour; + + AVERT(SplayTree, splay); + AVERT(Tree, node); + AVER(TreeLeft(node) == TreeEMPTY); + AVER(TreeRight(node) == TreeEMPTY); + + if (SplayTreeIsEmpty(splay)) { + SplayTreeSetRoot(splay, node); + return TRUE; + } + + switch (SplaySplay(splay, splay->nodeKey(node), splay->compare)) { + default: + NOTREACHED; + /* fall through */ + case CompareEQUAL: /* duplicate node */ + return FALSE; + + case CompareGREATER: /* left neighbour is at root */ + neighbour = SplayTreeRoot(splay); + SplayTreeSetRoot(splay, node); + TreeSetRight(node, TreeRight(neighbour)); + TreeSetLeft(node, neighbour); + TreeSetRight(neighbour, TreeEMPTY); + break; + + case CompareLESS: /* right neighbour is at root */ + neighbour = SplayTreeRoot(splay); + SplayTreeSetRoot(splay, node); + TreeSetLeft(node, TreeLeft(neighbour)); + TreeSetRight(node, neighbour); + TreeSetLeft(neighbour, TreeEMPTY); + break; + } + + splay->updateNode(splay, neighbour); + splay->updateNode(splay, node); + return TRUE; +} + + +/* SplayTreeDelete -- delete a node from a splay tree + * + * Delete a node from the tree. If the tree does not contain the given + * node then ``FALSE`` will be returned. The client must not pass a + * node whose key compares equal to a different node in the tree. + * + * The function first splays the tree at the given key. + * + * TODO: If the node has zero or one children, then the replacement + * would be the leftLast or rightFirst after a SplaySplit, and would + * avoid a search for a replacement in more cases. + */ + +Bool SplayTreeDelete(SplayTree splay, Tree node) +{ + Tree leftLast; + Compare cmp; + + AVERT(SplayTree, splay); + AVERT(Tree, node); + + if (SplayTreeIsEmpty(splay)) + return FALSE; + + cmp = SplaySplay(splay, splay->nodeKey(node), splay->compare); + AVER(cmp != CompareEQUAL || SplayTreeRoot(splay) == node); + + if (cmp != CompareEQUAL) { + return FALSE; + } else if (!TreeHasLeft(node)) { + SplayTreeSetRoot(splay, TreeRight(node)); + TreeClearRight(node); + } else if (!TreeHasRight(node)) { + SplayTreeSetRoot(splay, TreeLeft(node)); + TreeClearLeft(node); + } else { + Tree rightHalf = TreeRight(node); + TreeClearRight(node); + SplayTreeSetRoot(splay, TreeLeft(node)); + TreeClearLeft(node); + (void)SplaySplay(splay, NULL, compareGreater); + leftLast = SplayTreeRoot(splay); + AVER(leftLast != TreeEMPTY); + AVER(!TreeHasRight(leftLast)); + TreeSetRight(leftLast, rightHalf); + splay->updateNode(splay, leftLast); + } + + TreeFinish(node); + + return TRUE; +} + + +/* SplayTreeFind -- search for a node in a splay tree matching a key + * + * Search the tree for a node that compares ``CompareEQUAL`` to a key + * Splays the tree at the key. Returns ``FALSE`` if there is no such + * node in the tree, otherwise ``*nodeReturn`` will be set to the node. + */ + +Bool SplayTreeFind(Tree *nodeReturn, SplayTree splay, TreeKey key) +{ + AVERT(SplayTree, splay); + AVER(nodeReturn != NULL); + + if (SplayTreeIsEmpty(splay)) + return FALSE; + + if (SplaySplay(splay, key, splay->compare) != CompareEQUAL) + return FALSE; + + *nodeReturn = SplayTreeRoot(splay); + return TRUE; +} + + +/* SplayTreeSuccessor -- splays a tree at the root's successor + * + * Must not be called on en empty tree. Successor need not exist, + * in which case TreeEMPTY is returned, and the tree is unchanged. + */ + +static Tree SplayTreeSuccessor(SplayTree splay) +{ + Tree oldRoot, newRoot; + + AVERT(SplayTree, splay); + AVER(!SplayTreeIsEmpty(splay)); + + oldRoot = SplayTreeRoot(splay); + + if (!TreeHasRight(oldRoot)) + return TreeEMPTY; /* No successor */ + + /* temporarily chop off the left half-tree, inclusive of root */ + SplayTreeSetRoot(splay, TreeRight(oldRoot)); + TreeSetRight(oldRoot, TreeEMPTY); + (void)SplaySplay(splay, NULL, compareLess); + newRoot = SplayTreeRoot(splay); + AVER(newRoot != TreeEMPTY); + AVER(TreeLeft(newRoot) == TreeEMPTY); + TreeSetLeft(newRoot, oldRoot); + splay->updateNode(splay, oldRoot); + splay->updateNode(splay, newRoot); + + return newRoot; +} + + +/* SplayTreeNeighbours + * + * Search for the two nodes in a splay tree neighbouring a key. + * Splays the tree at the key. ``*leftReturn`` will be the neighbour + * which compares less than the key if such a neighbour exists; otherwise + * it will be ``TreeEMPTY``. ``*rightReturn`` will be the neighbour which + * compares greater than the key if such a neighbour exists; otherwise + * it will be ``TreeEMPTY``. The function returns ``FALSE`` if any node + * in the tree compares ``CompareEQUAL`` with the given key. + * + * TODO: Change to SplayTreeCoalesce that takes a function that can + * direct the deletion of one of the neighbours, since this is a + * good moment to do it, avoiding another search and splay. + * + * This implementation uses SplaySplit to find both neighbours in a + * single splay . + */ + +Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, + SplayTree splay, TreeKey key) +{ + SplayStateStruct stateStruct; + Bool found; + Compare cmp; +#ifdef SPLAY_DEBUG + Count count = SplayDebugCount(splay); +#endif + + AVERT_CRITICAL(SplayTree, splay); + AVER_CRITICAL(leftReturn != NULL); + AVER_CRITICAL(rightReturn != NULL); + + if (SplayTreeIsEmpty(splay)) { + *leftReturn = *rightReturn = TreeEMPTY; + return TRUE; + } + + cmp = SplaySplit(&stateStruct, splay, key, splay->compare); + + switch (cmp) { + default: + NOTREACHED; + /* fall through */ + case CompareEQUAL: + found = FALSE; + break; + + case CompareLESS: + AVER_CRITICAL(!TreeHasLeft(stateStruct.middle)); + *rightReturn = stateStruct.middle; + *leftReturn = stateStruct.leftLast; + found = TRUE; + break; + + case CompareGREATER: + AVER_CRITICAL(!TreeHasRight(stateStruct.middle)); + *leftReturn = stateStruct.middle; + *rightReturn = stateStruct.rightFirst; + found = TRUE; + break; + } + + SplayAssemble(splay, &stateStruct); + SplayTreeSetRoot(splay, stateStruct.middle); + +#ifdef SPLAY_DEBUG + AVER(count == SplayDebugCount(splay)); +#endif + + return found; +} + + +/* SplayTreeFirst, SplayTreeNext -- iterators + * + * SplayTreeFirst returns TreeEMPTY if the tree is empty. Otherwise, + * it splays the tree to the first node, and returns the new root. + * + * SplayTreeNext takes a tree and splays it to the successor of a key + * and returns the new root. Returns TreeEMPTY is there are no + * successors. + * + * SplayTreeFirst and SplayTreeNext do not require the tree to remain + * unmodified. + * + * IMPORTANT: Iterating over the tree using these functions will leave + * the tree totally unbalanced, throwing away optimisations of the tree + * shape caused by previous splays. Consider using TreeTraverse instead. + */ + +Tree SplayTreeFirst(SplayTree splay) +{ + Tree node; + + AVERT(SplayTree, splay); + + if (SplayTreeIsEmpty(splay)) + return TreeEMPTY; + + (void)SplaySplay(splay, NULL, compareLess); + node = SplayTreeRoot(splay); + AVER(node != TreeEMPTY); + AVER(TreeLeft(node) == TreeEMPTY); + + return node; +} + +Tree SplayTreeNext(SplayTree splay, TreeKey oldKey) +{ + AVERT(SplayTree, splay); + + if (SplayTreeIsEmpty(splay)) + return TreeEMPTY; + + /* Make old node the root. Probably already is. We don't mind if the + node has been deleted, or replaced by a node with the same key. */ + switch (SplaySplay(splay, oldKey, splay->compare)) { + default: + NOTREACHED; + /* fall through */ + case CompareLESS: + return SplayTreeRoot(splay); + + case CompareGREATER: + case CompareEQUAL: + return SplayTreeSuccessor(splay); + } +} + + +/* SplayNodeDescribe -- Describe a node in the splay tree + * + * Note that this breaks the restriction of .note.stack. + * This is alright as the function is debug only. + */ + +static Res SplayNodeDescribe(Tree node, mps_lib_FILE *stream, + TreeDescribeFunction nodeDescribe) +{ + Res res; + + if (!TreeCheck(node)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + if (nodeDescribe == NULL) + return ResFAIL; + + res = WriteF(stream, 0, "( ", NULL); + if (res != ResOK) + return res; + + if (TreeHasLeft(node)) { + res = SplayNodeDescribe(TreeLeft(node), stream, nodeDescribe); + if (res != ResOK) + return res; + + res = WriteF(stream, 0, " / ", NULL); + if (res != ResOK) + return res; + } + + res = (*nodeDescribe)(node, stream); + if (res != ResOK) + return res; + + if (TreeHasRight(node)) { + res = WriteF(stream, 0, " \\ ", NULL); + if (res != ResOK) + return res; + + res = SplayNodeDescribe(TreeRight(node), stream, nodeDescribe); + if (res != ResOK) + return res; + } + + res = WriteF(stream, 0, " )", NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* SplayFindFirstCompare, SplayFindLastCompare -- filtering searches + * + * These are used by SplayFindFirst and SplayFindLast as comparison + * functions to SplaySplit in order to home in on a node using client + * tests. The way to understand them is that the comparison values + * they return have nothing to do with the tree ordering, but are instead + * like commands that tell SplaySplit whether to "go left", "stop", or + * "go right" according to the results of testNode and testTree. + * Since splaying preserves the order of the tree, any tests can be + * applied to navigate to a destination. + * + * In the MPS these are mainly used by the CBS to search for memory + * blocks above a certain size. Their performance is quite critical. + */ + +typedef struct SplayFindClosureStruct { + SplayTestNodeFunction testNode; + SplayTestTreeFunction testTree; + void *testClosure; + SplayTree splay; + Bool found; +} SplayFindClosureStruct, *SplayFindClosure; + +static Compare SplayFindFirstCompare(Tree node, TreeKey key) +{ + SplayFindClosure my; + SplayTestNodeFunction testNode; + SplayTestTreeFunction testTree; + void *testClosure; + SplayTree splay; + + AVERT_CRITICAL(Tree, node); + AVER_CRITICAL(key != NULL); + + /* Lift closure values into variables so that they aren't aliased by + calls to the test functions. */ + my = (SplayFindClosure)key; + testClosure = my->testClosure; + testNode = my->testNode; + testTree = my->testTree; + splay = my->splay; + + if (TreeHasLeft(node) && + (*testTree)(splay, TreeLeft(node), testClosure)) { + return CompareLESS; + } else if ((*testNode)(splay, node, testClosure)) { + my->found = TRUE; + return CompareEQUAL; + } else { + /* If there's a right subtree but it doesn't satisfy the tree test + then we want to terminate the splay right now. SplaySplay will + return TRUE, so the caller must check closure->found to find out + whether the result node actually satisfies testNode. */ + if (TreeHasRight(node) && + !(*testTree)(splay, TreeRight(node), testClosure)) { + my->found = FALSE; + return CompareEQUAL; + } + return CompareGREATER; + } +} + +static Compare SplayFindLastCompare(Tree node, TreeKey key) +{ + SplayFindClosure my; + SplayTestNodeFunction testNode; + SplayTestTreeFunction testTree; + void *testClosure; + SplayTree splay; + + AVERT_CRITICAL(Tree, node); + AVER_CRITICAL(key != NULL); + + /* Lift closure values into variables so that they aren't aliased by + calls to the test functions. */ + my = (SplayFindClosure)key; + testClosure = my->testClosure; + testNode = my->testNode; + testTree = my->testTree; + splay = my->splay; + + if (TreeHasRight(node) && + (*testTree)(splay, TreeRight(node), testClosure)) { + return CompareGREATER; + } else if ((*testNode)(splay, node, testClosure)) { + my->found = TRUE; + return CompareEQUAL; + } else { + /* See SplayFindFirstCompare. */ + if (TreeHasLeft(node) && + !(*testTree)(splay, TreeLeft(node), testClosure)) { + my->found = FALSE; + return CompareEQUAL; + } + return CompareLESS; + } +} + + +/* SplayFindFirst -- Find first node that satisfies client property + * + * This function finds the first node (in address order) in the given + * tree that satisfies some property defined by the client. The + * property is such that the client can detect, given a sub-tree, + * whether that sub-tree contains any nodes satisfying the property. + * If there is no satisfactory node, ``FALSE`` is returned, otherwise + * ``*nodeReturn`` is set to the node. + * + * The given callbacks testNode and testTree detect this property in + * a single node or a sub-tree rooted at a node, and both receive an + * arbitrary closure. + * + * TODO: This repeatedly splays failed matches to the root and rotates + * them, so it could have quite an unbalancing effect if size is small. + * Think about a better search, perhaps using TreeTraverse? + */ + +Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, + SplayTestNodeFunction testNode, + SplayTestTreeFunction testTree, + void *testClosure) +{ + SplayFindClosureStruct closureStruct; + Bool found; + + AVER_CRITICAL(nodeReturn != NULL); + AVERT_CRITICAL(SplayTree, splay); + AVER_CRITICAL(FUNCHECK(testNode)); + AVER_CRITICAL(FUNCHECK(testTree)); + + if (SplayTreeIsEmpty(splay) || + !testTree(splay, SplayTreeRoot(splay), testClosure)) + return FALSE; /* no suitable nodes in tree */ + + closureStruct.testClosure = testClosure; + closureStruct.testNode = testNode; + closureStruct.testTree = testTree; + closureStruct.splay = splay; + closureStruct.found = FALSE; + + found = SplaySplay(splay, &closureStruct, + SplayFindFirstCompare) == CompareEQUAL && + closureStruct.found; + + while (!found) { + Tree oldRoot, newRoot; + + /* TODO: Rename to "seen" and "not yet seen" to make it clear what + these are for, and because there's nothing particularly + temporal about them. */ + oldRoot = SplayTreeRoot(splay); + newRoot = TreeRight(oldRoot); + + if (newRoot == TreeEMPTY || !(*testTree)(splay, newRoot, testClosure)) + return FALSE; /* no suitable nodes in the rest of the tree */ + + /* Temporarily chop off the left half-tree, inclusive of root, + so that the search excludes any nodes we've seen already. */ + SplayTreeSetRoot(splay, newRoot); + TreeSetRight(oldRoot, TreeEMPTY); + + found = SplaySplay(splay, &closureStruct, + SplayFindFirstCompare) == CompareEQUAL && + closureStruct.found; + + /* Restore the left tree, then rotate left so that the node we + just splayed is at the root. Update both. */ + newRoot = SplayTreeRoot(splay); + TreeSetRight(oldRoot, newRoot); + SplayTreeSetRoot(splay, oldRoot); + TreeRotateLeft(&splay->root); + splay->updateNode(splay, oldRoot); + splay->updateNode(splay, newRoot); + } + + *nodeReturn = SplayTreeRoot(splay); + return TRUE; +} + + +/* SplayFindLast -- As SplayFindFirst but in reverse address order */ + +Bool SplayFindLast(Tree *nodeReturn, SplayTree splay, + SplayTestNodeFunction testNode, + SplayTestTreeFunction testTree, + void *testClosure) +{ + SplayFindClosureStruct closureStruct; + Bool found; + + AVER_CRITICAL(nodeReturn != NULL); + AVERT_CRITICAL(SplayTree, splay); + AVER_CRITICAL(FUNCHECK(testNode)); + AVER_CRITICAL(FUNCHECK(testTree)); + + if (SplayTreeIsEmpty(splay) || + !testTree(splay, SplayTreeRoot(splay), testClosure)) + return FALSE; /* no suitable nodes in tree */ + + closureStruct.testClosure = testClosure; + closureStruct.testNode = testNode; + closureStruct.testTree = testTree; + closureStruct.splay = splay; + + found = SplaySplay(splay, &closureStruct, + SplayFindLastCompare) == CompareEQUAL && + closureStruct.found; + + while (!found) { + Tree oldRoot, newRoot; + + oldRoot = SplayTreeRoot(splay); + newRoot = TreeLeft(oldRoot); + + if (newRoot == TreeEMPTY || !(*testTree)(splay, newRoot, testClosure)) + return FALSE; /* no suitable nodes in the rest of the tree */ + + /* Temporarily chop off the right half-tree, inclusive of root, + so that the search excludes any nodes we've seen already. */ + SplayTreeSetRoot(splay, newRoot); + TreeSetLeft(oldRoot, TreeEMPTY); + + found = SplaySplay(splay, &closureStruct, + SplayFindLastCompare) == CompareEQUAL && + closureStruct.found; + + /* Restore the right tree, then rotate right so that the node we + just splayed is at the root. Update both. */ + newRoot = SplayTreeRoot(splay); + TreeSetLeft(oldRoot, newRoot); + SplayTreeSetRoot(splay, oldRoot); + TreeRotateRight(&splay->root); + splay->updateNode(splay, oldRoot); + splay->updateNode(splay, newRoot); + } + + *nodeReturn = SplayTreeRoot(splay); + return TRUE; +} + + +/* SplayNodeRefresh -- updates the client property that has changed at a node + * + * This function undertakes to call the client updateNode callback for each + * node affected by the change in properties at the given node (which has + * the given key) in an appropriate order. + * + * The function fulfils its job by first splaying at the given node, and + * updating the single node. In the MPS it is used by the CBS during + * coalescing, when the node is likely to be at (or adjacent to) the top + * of the tree anyway. + */ + +void SplayNodeRefresh(SplayTree splay, Tree node) +{ + Compare cmp; + + AVERT(SplayTree, splay); + AVERT(Tree, node); + AVER(!SplayTreeIsEmpty(splay)); /* must contain node, at least */ + + cmp = SplaySplay(splay, splay->nodeKey(node), splay->compare); + AVER(cmp == CompareEQUAL); + AVER(SplayTreeRoot(splay) == node); + + splay->updateNode(splay, node); +} + + +/* SplayNodeInit -- initialize client property without splaying */ + +void SplayNodeInit(SplayTree splay, Tree node) +{ + AVERT(SplayTree, splay); + AVERT(Tree, node); + AVER(!TreeHasLeft(node)); /* otherwise, call SplayNodeRefresh */ + AVER(!TreeHasRight(node)); /* otherwise, call SplayNodeRefresh */ + + splay->updateNode(splay, node); +} + + +/* SplayTreeDescribe -- Describe a splay tree + * + * . + */ + +Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth, + TreeDescribeFunction nodeDescribe) +{ + Res res; + + if (!TESTT(SplayTree, splay)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + if (nodeDescribe == NULL) + return ResFAIL; + + res = WriteF(stream, depth, + "Splay $P {\n", (WriteFP)splay, + " compare $F\n", (WriteFF)splay->compare, + " nodeKey $F\n", (WriteFF)splay->nodeKey, + " updateNode $F\n", (WriteFF)splay->updateNode, + NULL); + if (res != ResOK) + return res; + + if (SplayTreeRoot(splay) != TreeEMPTY) { + res = WriteF(stream, depth, " tree ", NULL); + if (res != ResOK) + return res; + res = SplayNodeDescribe(SplayTreeRoot(splay), stream, nodeDescribe); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth, "\n} Splay $P\n", (WriteFP)splay, NULL); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/splay.h b/mps/code/splay.h new file mode 100644 index 00000000000..bc7ef9328bc --- /dev/null +++ b/mps/code/splay.h @@ -0,0 +1,111 @@ +/* splay.h: SPLAY TREE HEADER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .source: + */ + +#ifndef splay_h +#define splay_h + +#include "mpmtypes.h" /* for Res, etc. */ +#include "tree.h" + + +typedef struct SplayTreeStruct *SplayTree; + +typedef Bool (*SplayTestNodeFunction)(SplayTree splay, Tree node, + void *closure); +typedef Bool (*SplayTestTreeFunction)(SplayTree splay, Tree node, + void *closure); + +typedef void (*SplayUpdateNodeFunction)(SplayTree splay, Tree node); +extern void SplayTrivUpdate(SplayTree splay, Tree node); + +#define SplayTreeSig ((Sig)0x5195B1A1) /* SIGnature SPLAY */ + +typedef struct SplayTreeStruct { + Sig sig; /* design.mps.sig.field */ + TreeCompareFunction compare; + TreeKeyFunction nodeKey; + SplayUpdateNodeFunction updateNode; + Tree root; +} SplayTreeStruct; + +#define SplayTreeRoot(splay) RVALUE((splay)->root) +#define SplayTreeIsEmpty(splay) (SplayTreeRoot(splay) == TreeEMPTY) + +extern Bool SplayTreeCheck(SplayTree splay); +extern void SplayTreeInit(SplayTree splay, + TreeCompareFunction compare, + TreeKeyFunction nodeKey, + SplayUpdateNodeFunction updateNode); +extern void SplayTreeFinish(SplayTree splay); + +extern Bool SplayTreeInsert(SplayTree splay, Tree node); +extern Bool SplayTreeDelete(SplayTree splay, Tree node); + +extern Bool SplayTreeFind(Tree *nodeReturn, SplayTree splay, TreeKey key); + +extern Bool SplayTreeNeighbours(Tree *leftReturn, + Tree *rightReturn, + SplayTree splay, TreeKey key); + +extern Tree SplayTreeFirst(SplayTree splay); +extern Tree SplayTreeNext(SplayTree splay, TreeKey oldKey); + +typedef Bool (*SplayFindFunction)(Tree *nodeReturn, SplayTree splay, + SplayTestNodeFunction testNode, + SplayTestTreeFunction testTree, + void *closure); +extern Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, + SplayTestNodeFunction testNode, + SplayTestTreeFunction testTree, + void *closure); +extern Bool SplayFindLast(Tree *nodeReturn, SplayTree splay, + SplayTestNodeFunction testNode, + SplayTestTreeFunction testTree, + void *closure); + +extern void SplayNodeRefresh(SplayTree splay, Tree node); +extern void SplayNodeInit(SplayTree splay, Tree node); + +extern Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, + Count depth, TreeDescribeFunction nodeDescribe); + +extern void SplayDebugUpdate(SplayTree splay, Tree tree); +extern Count SplayDebugCount(SplayTree splay); + + +#endif /* splay_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/spw3i3.c b/mps/code/spw3i3.c new file mode 100644 index 00000000000..a43633eff12 --- /dev/null +++ b/mps/code/spw3i3.c @@ -0,0 +1,65 @@ +/* spw3i3.c: STACK PROBE FOR 32-BIT WINDOWS + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2001 Global Graphics Software. + * + * This function reads a location that is depth words beyond the + * current stack pointer. On Intel platforms, the stack grows + * downwards, so this means reading from a location with a lesser + * address. + */ + + +#include "mpm.h" + +#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I3) +#error "spw3i3.c is specific to MPS_OS_W3 and MPS_ARCH_I3" +#endif + +#ifdef MPS_BUILD_PC + +/* "[ISO] Inline assembly code is not portable." */ +#pragma warn(disable: 2007) + +#endif /* MPS_BUILD_PC */ + + +void StackProbe(Size depth) +{ + __asm { + mov eax, depth + neg eax + mov eax, [esp+eax*4] /* do the actual probe */ + } +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/spw3i6.c b/mps/code/spw3i6.c new file mode 100644 index 00000000000..d06b4afcab0 --- /dev/null +++ b/mps/code/spw3i6.c @@ -0,0 +1,55 @@ +/* spw3i6.c: STACK PROBE FOR 64-BIT WINDOWS + * + * $Id$ + * Copyright (c) 2013-2020 Ravenbrook Limited. See end of file for license. + * + * The function StackProbe ensures that the stack has at least depth + * words available. It achieves this by exploiting an obscure but + * documented feature of Microsoft's function _alloca: "A stack + * overflow exception is generated if the space cannot be allocated." + * + */ + +#include "mpm.h" + +#if !defined(MPS_OS_W3) +#error "spw3i6.c is specific to MPS_OS_W3" +#endif + +#include /* _alloca */ + + +void StackProbe(Size depth) +{ + (void)_alloca(depth * sizeof(Word)); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ss.c b/mps/code/ss.c new file mode 100644 index 00000000000..ce088101f15 --- /dev/null +++ b/mps/code/ss.c @@ -0,0 +1,99 @@ +/* ss.c: STACK SCANNING + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This scans the mutator's stack and fixes the registers that may + * contain roots. . + * + * This is a generic implementation, but it makes assumptions that, + * while true on all the platforms we currently (version 1.115) + * support, may not be true on all platforms. See + * . + * + * .assume.desc: The stack is descending (and so stackHot is a lower + * address than stackCold). + * + * .assume.full: The stack convention is "full" (and so we must scan + * the word pointed to by stackHot but not the word pointed to by + * stackCold). + */ + +#include "mpm.h" + +SRCID(ss, "$Id$"); + + +/* StackHot -- capture a hot stack pointer + * + * On all supported platforms, the arguments are pushed on to the + * stack by the caller below its other local data, so as long as + * it does not use something like alloca, the address of the argument + * is a hot stack pointer. . + */ + +ATTRIBUTE_NOINLINE +void StackHot(void **stackOut) +{ + *stackOut = &stackOut; +} + + +/* StackScan -- scan the mutator's stack and registers */ + +Res StackScan(ScanState ss, void *stackCold, + mps_area_scan_t scan_area, void *closure) +{ + StackContextStruct scStruct; + Arena arena; + /* Avoid the error "variable might be clobbered by 'longjmp'" from + GCC by specifying volatile. See job004113. */ + void * volatile warmest; + + AVERT(ScanState, ss); + + arena = ss->arena; + + AVER(arena->stackWarm != NULL); + warmest = arena->stackWarm; + if (warmest == NULL) { + /* Somehow missed saving the context at the entry point + : do it now. */ + warmest = &scStruct; + STACK_CONTEXT_SAVE(&scStruct); + } + + AVER(warmest < stackCold); /* .assume.desc */ + + return TraceScanArea(ss, warmest, stackCold, scan_area, closure); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ss.h b/mps/code/ss.h new file mode 100644 index 00000000000..f05ff67b629 --- /dev/null +++ b/mps/code/ss.h @@ -0,0 +1,120 @@ +/* ss.h: STACK SCANNING + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This module saves the mutator context on entry to the MPS, and + * provides functions for decoding the context and scanning the root + * registers. . + */ + +#ifndef ss_h +#define ss_h + +#include "mpm.h" + + +/* StackContext -- some of the mutator's state + * + * The jumpBuffer is used to capture most of the mutator's state on + * entry to the MPS, but can't capture it all. See + * . + */ + +#include + +typedef struct StackContextStruct { + jmp_buf jumpBuffer; +} StackContextStruct; + + +/* StackHot -- capture a hot stack pointer + * + * Sets *stackOut to a stack pointer that includes the current frame. + */ + +void StackHot(void **stackOut); + + +/* STACK_CONTEXT_BEGIN -- save context */ + +#define STACK_CONTEXT_BEGIN(arena) \ + BEGIN \ + StackContextStruct _sc; \ + STACK_CONTEXT_SAVE(&_sc); \ + AVER(arena->stackWarm == NULL); \ + StackHot(&arena->stackWarm); \ + AVER(arena->stackWarm < (void *)&_sc); /* */ \ + BEGIN + + +/* STACK_CONTEXT_END -- clear context */ + +#define STACK_CONTEXT_END(arena) \ + END; \ + AVER(arena->stackWarm != NULL); \ + arena->stackWarm = NULL; \ + END + + +/* STACK_CONTEXT_SAVE -- save the callee-saves and stack pointer */ + +#if defined(MPS_OS_XC) + +/* We call _setjmp rather than setjmp because we can be confident what + * it does via the source code at + * , + * and because _setjmp saves only the register set and the stack while + * setjmp also saves the signal mask, which we don't care about. See + * _setjmp(2). */ + +#define STACK_CONTEXT_SAVE(sc) ((void)_setjmp((sc)->jumpBuffer)) + +#else /* other platforms */ + +#define STACK_CONTEXT_SAVE(sc) ((void)setjmp((sc)->jumpBuffer)) + +#endif /* platform defines */ + + +/* StackScan -- scan the mutator's stack and registers + * + * This must be called between STACK_CONTEXT_BEGIN and + * STACK_CONTEXT_END. + */ + +extern Res StackScan(ScanState ss, void *stackCold, + mps_area_scan_t scan_area, void *closure); + + +#endif /* ss_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/steptest.c b/mps/code/steptest.c new file mode 100644 index 00000000000..88f99dee00d --- /dev/null +++ b/mps/code/steptest.c @@ -0,0 +1,510 @@ +/* steptest.c: TEST FOR ARENA STEPPING + * + * $Id$ + * Copyright (c) 1998-2020 Ravenbrook Limited. See end of file for license. + * + * Loosely based on . + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpm.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "mps.h" + +#include /* pow */ +#include /* fflush, printf, putchar, stdout */ + +#define testArenaSIZE ((size_t)((size_t)64 << 20)) +#define avLEN 3 +#define exactRootsCOUNT 200 +#define ambigRootsCOUNT 50 +#define objCOUNT 2000000 +#define clockSetFREQ 10000 +#define multiStepFREQ 500000 +#define multiStepMULT 100 + +#define genCOUNT 3 +#define gen1SIZE 750 /* kB */ +#define gen2SIZE 2000 /* kB */ +#define gen3SIZE 5000 /* kB */ +#define gen1MORTALITY 0.85 +#define gen2MORTALITY 0.60 +#define gen3MORTALITY 0.40 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + {gen1SIZE, gen1MORTALITY}, + {gen2SIZE, gen2MORTALITY}, + {gen3SIZE, gen3MORTALITY}, +}; + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) + +static mps_pool_t pool; +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static mps_addr_t ambigRoots[ambigRootsCOUNT]; + +/* Things we want to measure. Times are all in microseconds. */ + +static double alloc_time; /* Time spent allocating */ +static double max_alloc_time; /* Max time taken to allocate one object */ +static double step_time; /* Time spent in mps_arena_step returning 1 */ +static double max_step_time; /* Max time of mps_arena_step returning 1 */ +static double no_step_time; /* Time spent in mps_arena_step returning 0 */ +static double max_no_step_time; /* Max time of mps_arena_step returning 0 */ + +static double total_clock_time; /* Time spent reading the clock */ +static long clock_reads; /* Number of times clock is read */ +static long steps; /* # of mps_arena_step calls returning 1 */ +static long no_steps; /* # of mps_arena_step calls returning 0 */ +static size_t alloc_bytes; /* # of bytes allocated */ +static long commit_failures; /* # of times mps_commit fails */ + + +/* Operating-system dependent timing. Defines two functions, void + * prepare_clock(void) and double my_clock(void). my_clock() returns + * the number of microseconds of CPU time used so far by the process. + * prepare_clock() sets things up so that my_clock() can run + * efficiently. + */ + +#ifdef MPS_OS_W3 + +#include "mpswin.h" + +static HANDLE currentProcess; + +static void prepare_clock(void) +{ + currentProcess = GetCurrentProcess(); +} + +static double my_clock(void) +{ + FILETIME ctime, etime, ktime, utime; + double dk, du; + cdie(GetProcessTimes(currentProcess, &ctime, &etime, &ktime, &utime) != 0, + "GetProcessTimes"); + dk = ktime.dwHighDateTime * 4096.0 * 1024.0 * 1024.0 + + ktime.dwLowDateTime; + dk /= 10.0; + du = utime.dwHighDateTime * 4096.0 * 1024.0 * 1024.0 + + utime.dwLowDateTime; + du /= 10.0; + ++ clock_reads; + return (du+dk); +} + +#else +/* on Posix systems, we can use getrusage. */ + +#include +#include +#include + +static void prepare_clock(void) +{ + /* do nothing */ +} + +static double my_clock(void) +{ + struct rusage ru; + getrusage(RUSAGE_SELF, &ru); + ++ clock_reads; + return (double)(ru.ru_utime.tv_sec + ru.ru_stime.tv_sec) * 1000000.0 + + (double)(ru.ru_utime.tv_usec + ru.ru_stime.tv_usec); +} +#endif + +/* Need to calibrate the clock. */ +/* In fact we need to do this repeatedly while the tests run because + * on some platforms the time taken to read the clock changes + * significantly during program execution. Yes, really (e.g. fri4gc + * on thrush.ravenbrook.com on 2002-06-28, clock_time goes from 5.43 + * us near process start to 7.45 us later). */ + +static double clock_time; /* current estimate of time to read the clock */ + +/* take at least this many microseconds to set the clock */ +#define CLOCK_TIME_SET 10000 + +/* set_clock_timing() sets clock_time. */ + +static void set_clock_timing(void) +{ + double i; + double t1, t2, t3; + + t2 = 0.0; + t3 = my_clock(); + i = 0; + do { + t1 = my_clock(); + /* do nothing here */ + t2 += my_clock()-t1; + ++i; + } while (t1 < t3 + CLOCK_TIME_SET); + clock_time = t2/i; + total_clock_time += my_clock() - t3 + clock_time; +} + +/* How much time has elapsed since a recent call to my_clock? + * Deducts the calibrated clock timing, clamping to zero. + * + * The idea is to have code like this: + * + * t = my_clock(); + * do_something(); + * t = time_since(t); + * + * and the result will be our best estimate of how much CPU time the + * call to do_something() took. + */ + +static double time_since(double t) +{ + t = my_clock() - t; + total_clock_time += clock_time + clock_time; + if (t < clock_time) + return 0.0; + else + return (t - clock_time); +} + +/* print a number of microseconds in a useful format. */ + +#define MAXPRINTABLE 100.0 +#define MINPRINTABLE (MAXPRINTABLE / 1000.0) + +static void print_time(const char *before, double t, const char *after) +{ + char prefixes[] = "\0munpfazy"; /* don't print "ks" etc */ + char *x = prefixes+2; /* start at micro */ + double ot = t; + if (before) + printf("%s", before); + if (t > MAXPRINTABLE) { + while (x[-1] && t > MAXPRINTABLE) { + t /= 1000.0; + -- x; + } + if (t < MAXPRINTABLE) { + printf("%.3f %cs", t, *x); + } else { + printf("%.3f s", t/1000.0); + } + } else { + while (x[1] && t < MINPRINTABLE) { + t *= 1000.0; + ++ x; + } + if (t > MINPRINTABLE) + printf("%.3f %cs", t, *x); + else + printf("%g s", ot/1000000.0); + } + if (after) + printf("%s", after); +} + +/* Make a single Dylan object */ + +static mps_addr_t make(void) +{ + size_t length = rnd() % (avLEN * 2); + size_t size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + + alloc_bytes += size; + + for(;;) { + mps_bool_t commit_res; + double t1, t2; + t1 = my_clock(); + MPS_RESERVE_BLOCK(res, p, ap, size); + t1 = time_since(t1); /* reserve time */ + if(res) + die(res, "MPS_RESERVE_BLOCK"); + res = dylan_init(p, size, exactRoots, exactRootsCOUNT); + if(res) + die(res, "dylan_init"); + t2 = my_clock(); + commit_res = mps_commit(ap, p, size); + t2 = time_since(t2); /* commit time */ + t1 += t2; /* total MPS time for this allocation */ + alloc_time += t1; + if (t1 > max_alloc_time) + max_alloc_time = t1; + if (commit_res) + break; + else + ++ commit_failures; + } + + return p; +} + +/* call mps_arena_step() */ + +static void test_step(mps_arena_t arena, double multiplier) +{ + mps_bool_t res; + double t1 = my_clock(); + res = mps_arena_step(arena, 0.1, multiplier); + cdie(ArenaGlobals(arena)->clamped, "arena was unclamped"); + t1 = time_since(t1); + if (res) { + if (t1 > max_step_time) + max_step_time = t1; + step_time += t1; + ++ steps; + } else { + if (t1 > max_no_step_time) + max_no_step_time = t1; + no_step_time += t1; + ++ no_steps; + } +} + +/* test -- the body of the test */ + +static void test(mps_arena_t arena, unsigned long step_period) +{ + mps_fmt_t format; + mps_chain_t chain; + mps_root_t exactRoot, ambigRoot; + unsigned long objs; + size_t i; + mps_message_t message; + size_t live, condemned, not_condemned; + size_t messages; + mps_word_t collections, old_collections; + double total_mps_time, total_time; + double t1; + + die(dylan_fmt(&format, arena), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + die(mps_pool_create(&pool, arena, mps_class_amc(), format, chain), + "pool_create(amc)"); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + for(i = 0; i < ambigRootsCOUNT; ++i) + ambigRoots[i] = rnd_addr(); + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + die(mps_root_create_table(&ambigRoot, arena, + mps_rank_ambig(), (mps_rm_t)0, + &ambigRoots[0], ambigRootsCOUNT), + "root_create_table(ambig)"); + + printf("Stepping every %lu allocations.\n", step_period); + + mps_message_type_enable(arena, mps_message_type_gc()); + + /* zero all our counters and timers. */ + + objs = 0; + clock_reads = 0; + steps = no_steps = 0; + alloc_bytes = 0; + commit_failures = 0; + alloc_time = step_time = no_step_time = 0.0; + max_alloc_time = max_step_time = max_no_step_time = 0.0; + total_clock_time = 0.0; + collections = old_collections = 0; + + t1 = my_clock(); + + while(objs < objCOUNT) { + size_t r; + + r = (size_t)rnd(); + if(r & 1) { + i = (r >> 1) % exactRootsCOUNT; + if(exactRoots[i] != objNULL) + cdie(dylan_check(exactRoots[i]), "dying root check"); + exactRoots[i] = make(); + if(exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + } else { + i = (r >> 1) % ambigRootsCOUNT; + ambigRoots[(ambigRootsCOUNT-1) - i] = make(); + /* Create random interior pointers */ + ambigRoots[i] = (mps_addr_t)((char *)(ambigRoots[i/2]) + 1); + } + + ++objs; + + if (objs % step_period == 0) + test_step(arena, 0.0); + + if (objs % multiStepFREQ == 0) + test_step(arena, multiStepMULT); + + if (objs % clockSetFREQ == 0) + set_clock_timing(); + + collections = mps_collections(arena); + if (collections > old_collections) { + old_collections = collections; + putchar('.'); + (void)fflush(stdout); + } + } + + total_time = time_since(t1) - total_clock_time; + + if (collections > 0) + printf("\n"); + + messages = live = condemned = not_condemned = 0; + while (mps_message_get(&message, arena, mps_message_type_gc())) { + ++ messages; + live += mps_message_gc_live_size(arena, message); + condemned += mps_message_gc_condemned_size(arena, message); + not_condemned += mps_message_gc_not_condemned_size(arena, + message); + mps_message_discard(arena, message); + } + if (collections != messages) { + printf("%lu collections but %lu messages\n", + (unsigned long)collections, (unsigned long)messages); + collections = messages; + } + + total_mps_time = alloc_time + step_time + no_step_time; + printf("Collection statistics:\n"); + printf(" %"PRIuLONGEST" collections\n", (ulongest_t)collections); + printf(" %"PRIuLONGEST" bytes condemned.\n", (ulongest_t)condemned); + printf(" %"PRIuLONGEST" bytes not condemned.\n", + (ulongest_t)not_condemned); + printf(" %"PRIuLONGEST" bytes survived.\n", (ulongest_t)live); + if (condemned) { + printf(" Mortality %5.2f%%.\n", + (1.0 - (double)live/(double)condemned) * 100.0); + printf(" Condemned fraction %5.2f%%.\n", + ((double)condemned/(double)(condemned + not_condemned)) * 100.0); + } + if (collections) { + printf(" Condemned per collection %"PRIuLONGEST" bytes.\n", + (ulongest_t)condemned/collections); + printf(" Reclaimed per collection %"PRIuLONGEST" bytes.\n", + (ulongest_t)(condemned - live)/collections); + } + + printf("Allocation statistics:\n"); + printf(" %"PRIuLONGEST" objects (%"PRIuLONGEST" bytes) allocated.\n", + (ulongest_t)objs, (ulongest_t)alloc_bytes); + printf(" Commit failed %ld times.\n", commit_failures); + + printf("Timings:\n"); + print_time(" Allocation took ", alloc_time, ""); + print_time(", mean ", alloc_time / (double)objs, ""); + print_time(", max ", max_alloc_time, ".\n"); + if (steps) { + printf(" %ld steps took ", steps); + print_time("", step_time, ""); + print_time(", mean ", step_time / (double)steps, ""); + print_time(", max ", max_step_time, ".\n"); + } + if (no_steps) { + printf(" %ld non-steps took ", no_steps); + print_time("", no_step_time, ""); + print_time(", mean ", no_step_time / (double)no_steps, ""); + print_time(", max ", max_no_step_time, ".\n"); + } + if (alloc_time > 0.0) + printf(" Allocated %.2f bytes per us.\n", + (double)alloc_bytes/alloc_time); + if (step_time > 0.0) { + printf(" Reclaimed %.2f bytes per us of step.\n", + (double)(condemned - live)/step_time); + if (collections > 0) { + printf(" Took %.2f steps ", (double)steps / (double)collections); + print_time("(", step_time / (double)collections, ") per collection.\n"); + } + } + print_time(" Total time ", total_time, ".\n"); + print_time(" Total MPS time ", total_mps_time, ""); + printf(" (%5.2f%%, ", total_mps_time * 100.0 / total_time); + print_time("", total_mps_time / (double)alloc_bytes, " per byte, "); + print_time("", total_mps_time / (double)objs, " per object)\n"); + print_time(" (adjusted for clock timing: ", + total_clock_time, + " spent reading the clock;\n"); + printf(" %"PRIuLONGEST" clock reads; ", (ulongest_t)clock_reads); + print_time("", total_clock_time / (double)clock_reads, " per read;"); + print_time(" recently measured as ", clock_time, ").\n"); + + mps_arena_park(arena); + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + prepare_clock(); + testlib_init(argc, argv); + set_clock_timing(); + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create"); + mps_arena_clamp(arena); + test(arena, (unsigned long)pow(10, rnd() % 10)); + mps_arena_destroy(arena); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 1998-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/table.c b/mps/code/table.c new file mode 100644 index 00000000000..f2525f18f06 --- /dev/null +++ b/mps/code/table.c @@ -0,0 +1,407 @@ +/* table.h: A dictionary mapping a Word to a void* + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .note.good-hash: As is common in hash table implementations, we + * assume that the hash function is good. + */ + +#include "table.h" +#include "mpm.h" + +#include + + +SRCID(table, "$Id$"); + + +/* tableHash -- return a hash value from an address + * + * This uses a single cycle of an MLCG, more commonly seen as a + * pseudorandom number generator. It works extremely well as a + * hash function. + * + * (In particular, it is substantially better than simply doing this: + * seed = (unsigned long)addr * 48271; + * Tested by RHSK 2010-12-28.) + * + * This MLCG is a full period generator: it cycles through every + * number from 1 to m-1 before repeating. Therefore, no two numbers + * in that range hash to the same value. Furthermore, it has prime + * modulus, which tends to avoid recurring patterns in the low-order + * bits, which is good because the hash will be used modulus the + * number of slots in the table. + * + * Of course it's only a 31-bit cycle, so we start by losing the top + * bit of the address, but that's hardly a great problem. + * + * See `rnd` in testlib.c for more technical details. + * + * The implementation is quite subtle. See rnd() in testlib.c, where + * it has been exhaustively (ie: totally) tested. RHSK 2010-12-28. + * + * NOTE: According to NB, still a fine function for producing a 31-bit hash + * value, although of course it only hashes on the lower 31 bits of the + * key; we could cheaply make it choose a different 31 bits if we'd prefer + * (e.g. ((key >> 2) & 0x7FFFFFFF)), or combine more of the key bits (e.g. + * ((key ^ (key >> 31)) & 0x7fffffff)). + */ + +#define R_m 2147483647UL +#define R_a 48271UL + +typedef Word Hash; + +static Hash tableHash(TableKey key) +{ + Hash hash = (Hash)(key & 0x7FFFFFFF); + /* requires m == 2^31-1, a < 2^16 */ + Hash bot = R_a * (hash & 0x7FFF); + Hash top = R_a * (hash >> 15); + hash = bot + ((top & 0xFFFF) << 15) + (top >> 16); + if(hash > R_m) + hash -= R_m; + return hash; +} + + +Bool TableCheck(Table table) +{ + CHECKS(Table, table); + CHECKL(table->count <= table->length); + CHECKL(table->length == 0 || table->array != NULL); + CHECKL(FUNCHECK(table->alloc)); + CHECKL(FUNCHECK(table->free)); + /* can't check allocClosure -- it could be anything */ + CHECKL(table->unusedKey != table->deletedKey); + return TRUE; +} + + +static Bool entryIsActive(Table table, TableEntry entry) +{ + return !(entry->key == table->unusedKey || + entry->key == table->deletedKey); +} + + +/* tableFind -- finds the entry for this key, or NULL + * + * .worst: In the worst case, this looks at every slot before giving up, + * but that's what you have to do in a closed hash table, to make sure + * that all the items still fit in after growing the table. + */ + +static TableEntry tableFind(Table table, TableKey key, Bool skip_deleted) +{ + Hash hash; + Index i; + Word mask; + + /* .find.visit: Ensure the length is a power of two so that the stride + is coprime and so visits all entries in the array eventually. */ + AVER(WordIsP2(table->length)); /* .find.visit */ + + mask = table->length - 1; + hash = tableHash(key) & mask; + i = hash; + do { + Word k = table->array[i].key; + if (k == key || + k == table->unusedKey || + (!skip_deleted && key == table->deletedKey)) + return &table->array[i]; + i = (i + (hash | 1)) & mask; /* .find.visit */ + } while(i != hash); + + return NULL; +} + + +/* TableGrow -- increase the capacity of the table + * + * Ensure the transform's hashtable can accommodate N entries (filled + * slots), without becoming cramped. If necessary, resize the + * hashtable by allocating a new one and rehashing all old entries. + * If insufficient memory, return error without modifying table. + * + * .hash.spacefraction: As with all closed hash tables, we must choose + * an appropriate proportion of slots to remain free. More free slots + * help avoid large-sized contiguous clumps of full cells and their + * associated linear search costs. + * + * .hash.initial: Any reasonable number. + * + * .hash.growth: A compromise between space inefficiency (growing bigger + * than required) and time inefficiency (growing too slowly, with all + * the rehash costs at every step). A factor of 2 means that at the + * point of growing to a size X table, hash-work equivalent to filling + * a size-X table has already been done. So we do at most 2x the + * hash-work we would have done if we had been able to guess the right + * table size initially. + * + * Numbers of slots maintain this relation: + * occupancy <= capacity < enough <= cSlots + */ + +#define SPACEFRACTION 0.75 /* .hash.spacefraction */ + +Res TableGrow(Table table, Count extraCapacity) +{ + TableEntry oldArray, newArray; + Count oldLength, newLength; + Count required, minimum; + Count i, found; + + required = table->count + extraCapacity; + if (required < table->count) /* overflow? */ + return ResLIMIT; + + /* Calculate the minimum table length that would allow for the required + capacity without growing again. */ + minimum = (Count)((double)required / SPACEFRACTION); + if (minimum < required) /* overflow? */ + return ResLIMIT; + + /* Double the table length until it's larger than the minimum */ + oldLength = table->length; + newLength = oldLength; + while(newLength < minimum) { + Count doubled = newLength > 0 ? newLength * 2 : 1; /* .hash.growth */ + if (doubled <= newLength) /* overflow? */ + return ResLIMIT; + newLength = doubled; + } + + if (newLength == oldLength) /* already enough space? */ + return ResOK; + + /* TODO: An event would be good here */ + + oldArray = table->array; + newArray = table->alloc(table->allocClosure, + sizeof(TableEntryStruct) * newLength); + if(newArray == NULL) + return ResMEMORY; + + for(i = 0; i < newLength; ++i) { + newArray[i].key = table->unusedKey; + newArray[i].value = NULL; + } + + table->length = newLength; + table->array = newArray; + + found = 0; + for(i = 0; i < oldLength; ++i) { + if (entryIsActive(table, &oldArray[i])) { + TableEntry entry; + entry = tableFind(table, oldArray[i].key, FALSE /* none deleted */); + AVER(entry != NULL); + AVER(entry->key == table->unusedKey); + entry->key = oldArray[i].key; + entry->value = oldArray[i].value; + ++found; + } + } + AVER(found == table->count); + + if (oldLength > 0) { + AVER(oldArray != NULL); + table->free(table->allocClosure, + oldArray, + sizeof(TableEntryStruct) * oldLength); + } + + return ResOK; +} + + +/* TableCreate -- makes a new table */ + +Res TableCreate(Table *tableReturn, Count length, + TableAllocFunction tableAlloc, TableFreeFunction tableFree, + void *allocClosure, TableKey unusedKey, TableKey deletedKey) +{ + Table table; + Res res; + + AVER(tableReturn != NULL); + AVER(FUNCHECK(tableAlloc)); + AVER(FUNCHECK(tableFree)); + AVER(unusedKey != deletedKey); + + table = tableAlloc(allocClosure, sizeof(TableStruct)); + if(table == NULL) + return ResMEMORY; + + table->length = 0; + table->count = 0; + table->array = NULL; + table->alloc = tableAlloc; + table->free = tableFree; + table->allocClosure = allocClosure; + table->unusedKey = unusedKey; + table->deletedKey = deletedKey; + table->sig = TableSig; + + AVERT(Table, table); + + res = TableGrow(table, length); + if (res != ResOK) + return res; + + *tableReturn = table; + return ResOK; +} + + +/* TableDestroy -- destroy a table */ + +void TableDestroy(Table table) +{ + AVER(table != NULL); + if (table->length > 0) { + AVER(table->array != NULL); + table->free(table->allocClosure, + table->array, + sizeof(TableEntryStruct) * table->length); + } + table->sig = SigInvalid; + table->free(table->allocClosure, table, sizeof(TableStruct)); +} + + +/* TableLookup -- look up */ + +Bool TableLookup(TableValue *valueReturn, Table table, TableKey key) +{ + TableEntry entry = tableFind(table, key, TRUE /* skip deleted */); + + if(entry == NULL || !entryIsActive(table, entry)) + return FALSE; + *valueReturn = entry->value; + return TRUE; +} + + +/* TableDefine -- add a new mapping */ + +Res TableDefine(Table table, TableKey key, TableValue value) +{ + TableEntry entry; + + AVER(key != table->unusedKey); + AVER(key != table->deletedKey); + + if ((double)table->count >= (double)table->length * SPACEFRACTION) { + Res res = TableGrow(table, 1); + if (res != ResOK) + return res; + entry = tableFind(table, key, FALSE /* no deletions yet */); + AVER(entry != NULL); + if (entryIsActive(table, entry)) + return ResFAIL; + } else { + entry = tableFind(table, key, TRUE /* skip deleted */); + if (entry != NULL && entryIsActive(table, entry)) + return ResFAIL; + /* Search again to find the best slot, deletions included. */ + entry = tableFind(table, key, FALSE /* don't skip deleted */); + AVER(entry != NULL); + } + + entry->key = key; + entry->value = value; + ++table->count; + + return ResOK; +} + + +/* TableRedefine -- redefine an existing mapping */ + +Res TableRedefine(Table table, TableKey key, TableValue value) +{ + TableEntry entry; + + AVER(key != table->unusedKey); + AVER(key != table->deletedKey); + + entry = tableFind(table, key, TRUE /* skip deletions */); + if (entry == NULL || !entryIsActive(table, entry)) + return ResFAIL; + AVER(entry->key == key); + entry->value = value; + return ResOK; +} + + +/* TableRemove -- remove a mapping */ + +Res TableRemove(Table table, TableKey key) +{ + TableEntry entry; + + AVER(key != table->unusedKey); + AVER(key != table->deletedKey); + + entry = tableFind(table, key, TRUE); + if (entry == NULL || !entryIsActive(table, entry)) + return ResFAIL; + entry->key = table->deletedKey; + --table->count; + return ResOK; +} + + +/* TableMap -- apply a function to all the mappings */ + +void TableMap(Table table, + void (*fun)(void *closure, TableKey key, TableValue value), + void *closure) +{ + Index i; + for (i = 0; i < table->length; i++) + if (entryIsActive(table, &table->array[i])) + (*fun)(closure, table->array[i].key, table->array[i].value); +} + + +/* TableCount -- count the number of mappings in the table */ + +Count TableCount(Table table) +{ + return table->count; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/table.h b/mps/code/table.h new file mode 100644 index 00000000000..9112455cabe --- /dev/null +++ b/mps/code/table.h @@ -0,0 +1,93 @@ +/* table.h: Interface for a dictionary + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * A table is a hashed mapping from keys to values. + */ + +#ifndef table_h +#define table_h + +#include "mpmtypes.h" +#include + + +typedef struct TableStruct *Table; + +typedef Word TableKey; +typedef void *TableValue; + +typedef struct TableEntryStruct { + TableKey key; + TableValue value; +} TableEntryStruct, *TableEntry; + +typedef void *(*TableAllocFunction)(void *closure, size_t size); +typedef void (*TableFreeFunction)(void *closure, void *p, size_t size); + +#define TableSig ((Sig)0x5192AB13) /* SIGnature TABLE */ + +typedef struct TableStruct { + Sig sig; /* design.mps.sig.field */ + Count length; /* Number of slots in the array */ + Count count; /* Active entries in the table */ + TableEntry array; /* Array of table slots */ + TableAllocFunction alloc; + TableFreeFunction free; + void *allocClosure; + TableKey unusedKey; /* key marking unused (undefined) entries */ + TableKey deletedKey; /* key marking deleted entries */ +} TableStruct; + +extern Res TableCreate(Table *tableReturn, + Count length, + TableAllocFunction tableAlloc, + TableFreeFunction tableFree, + void *allocClosure, + TableKey unusedKey, + TableKey deletedKey); +extern void TableDestroy(Table table); +extern Bool TableCheck(Table table); +extern Res TableDefine(Table table, TableKey key, TableValue value); +extern Res TableRedefine(Table table, TableKey key, TableValue value); +extern Bool TableLookup(TableValue *valueReturn, Table table, TableKey key); +extern Res TableRemove(Table table, TableKey key); +extern Count TableCount(Table table); +extern void TableMap(Table table, + void(*fun)(void *closure, TableKey key, TableValue value), + void *closure); +extern Res TableGrow(Table table, Count extraCapacity); + + +#endif /* table_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/tagtest.c b/mps/code/tagtest.c new file mode 100644 index 00000000000..e1fa1c3cd9d --- /dev/null +++ b/mps/code/tagtest.c @@ -0,0 +1,297 @@ +/* tagtest.c: TAGGED POINTER TEST + * + * $Id$ + * Copyright (c) 2015-2020 Ravenbrook Limited. See end of file for license. + * + * .overview: This test case checks that the MPS correctly handles + * tagged pointers via the object format and tagged area scanning. + */ + +#include /* printf */ + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "testlib.h" + +#define OBJCOUNT 1000 /* Number of conses to allocate */ + +typedef struct cons_s { + mps_word_t car, cdr; +} cons_s, *cons_t; + +typedef mps_word_t imm_t; /* Immediate value. */ +typedef mps_word_t fwd_t; /* Forwarding pointer. */ + +static mps_word_t tag_bits; /* Number of tag bits */ +static mps_word_t tag_cons; /* Tag bits indicating pointer to cons */ +static mps_word_t tag_fwd; /* Tag bits indicating forwarding pointer */ +static mps_word_t tag_imm; /* Tag bits indicating immediate value */ +static mps_word_t tag_invalid; /* Invalid tag bits */ +static mps_addr_t refs[OBJCOUNT]; /* Tagged references to objects */ + +#define TAG_COUNT ((mps_word_t)1 << tag_bits) /* Number of distinct tags */ +#define TAG_MASK (TAG_COUNT - 1) /* Tag mask */ +#define TAG(word) ((mps_word_t)(word) & TAG_MASK) +#define TAGGED(value, type) (((mps_word_t)(value) & ~TAG_MASK) + tag_ ## type) +#define UNTAGGED(word, type) ((type ## _t)((mps_word_t)(word) & ~TAG_MASK)) + +static mps_word_t make_cons(mps_ap_t ap, mps_word_t car, mps_word_t cdr) +{ + cons_t obj; + mps_addr_t addr; + size_t size = sizeof(cons_s); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in cons"); + obj = addr; + obj->car = car; + obj->cdr = cdr; + } while (!mps_commit(ap, addr, size)); + return TAGGED(obj, cons); +} + +static void fwd(mps_addr_t old, mps_addr_t new) +{ + cons_t cons = old; + cons->car = TAGGED(0, fwd); + cons->cdr = (mps_word_t)new; +} + +static mps_addr_t isfwd(mps_addr_t addr) +{ + cons_t cons = addr; + if (TAG(cons->car) != tag_fwd) + return NULL; + return (mps_addr_t)cons->cdr; +} + +static void pad(mps_addr_t addr, size_t size) +{ + mps_word_t *word = addr; + mps_word_t *limit = (mps_word_t *)((char *)addr + size); + while (word < limit) { + *word = TAGGED(0, imm); + ++ word; + } +} + +static mps_arena_t arena; + +static mps_res_t scan(mps_ss_t ss, mps_addr_t base, + mps_addr_t limit) +{ + Insist(mps_arena_busy(arena)); + MPS_SCAN_BEGIN(ss) { + mps_word_t *p = base; + while (p < (mps_word_t *)limit) { + mps_word_t word = *p; + mps_word_t tag = TAG(word); + if (tag == tag_cons) { + mps_addr_t ref = UNTAGGED(word, cons); + if (MPS_FIX1(ss, ref)) { + mps_res_t res = MPS_FIX2(ss, &ref); + if (res != MPS_RES_OK) + return res; + *p = TAGGED(ref, cons); + } + } + ++p; + } + } MPS_SCAN_END(ss); + return MPS_RES_OK; +} + +static mps_addr_t skip(mps_addr_t addr) +{ + return (mps_addr_t)((char *)addr + sizeof(cons_s)); +} + + +static void collect(size_t expected) +{ + size_t finalized = 0; + mps_arena_collect(arena); + while (mps_message_poll(arena)) { + mps_message_t message; + mps_addr_t objaddr; + cdie(mps_message_get(&message, arena, mps_message_type_finalization()), + "message_get"); + mps_message_finalization_ref(&objaddr, arena, message); + Insist(TAG(objaddr) == 0); + mps_message_discard(arena, message); + ++ finalized; + } + printf("finalized=%lu expected=%lu\n", + (unsigned long)finalized, (unsigned long)expected); + Insist(finalized == expected); +} + + +/* test -- Run the test case in the specified mode. */ + +#define MODES(R, X) \ + R(X, CONS, "Scan words tagged \"cons\".") \ + R(X, INVALID, "Scan words tagged \"invalid\".") + +#define MODES_ENUM(X, id, comment) MODE_ ## id, + +enum { + MODES(MODES_ENUM, X) + MODE_LIMIT +}; + +#define MODES_NAME(X, id, comment) #id, + +static const char *mode_name[] = { + MODES(MODES_NAME, X) +}; + + +static void test(int mode) +{ + mps_thr_t thread; + mps_root_t root; + mps_fmt_t fmt; + mps_pool_t pool; + mps_ap_t ap; + size_t expected = 0; + size_t i; + + printf("test(%s)\n", mode_name[mode]); + + die(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none), "arena"); + mps_message_type_enable(arena, mps_message_type_finalization()); + die(mps_thread_reg(&thread, arena), "thread"); + + switch (mode) { + default: + Insist(0); + /* fall through */ + case MODE_CONS: + /* Scan words tagged "cons" -- everything will live. */ + die(mps_root_create_area_tagged(&root, arena, mps_rank_ambig(), 0, + refs, refs + OBJCOUNT, + mps_scan_area_tagged, TAG_MASK, tag_cons), + "root"); + expected = 0; + break; + case MODE_INVALID: + /* Scan words tagged "invalid" -- everything will die. */ + die(mps_root_create_area_tagged(&root, arena, mps_rank_ambig(), 0, + refs, refs + OBJCOUNT, + mps_scan_area_tagged, TAG_MASK, tag_invalid), + "root"); + expected = OBJCOUNT; + break; + } + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FMT_SCAN, scan); + MPS_ARGS_ADD(args, MPS_KEY_FMT_SKIP, skip); + MPS_ARGS_ADD(args, MPS_KEY_FMT_FWD, fwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_ISFWD, isfwd); + MPS_ARGS_ADD(args, MPS_KEY_FMT_PAD, pad); + die(mps_fmt_create_k(&fmt, arena, args), "fmt"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, fmt); + die(mps_pool_create_k(&pool, arena, mps_class_amc(), args), "pool"); + } MPS_ARGS_END(args); + + die(mps_ap_create_k(&ap, pool, mps_args_none), "ap"); + + for (i = 0; i < OBJCOUNT; ++i) { + mps_word_t p, r; + mps_word_t q = TAGGED(i << tag_bits, imm); + mps_addr_t addr; + p = make_cons(ap, q, q); + Insist(TAG(p) == tag_cons); + r = TAGGED(p, imm); + UNTAGGED(p, cons)->cdr = r; + refs[i] = (mps_addr_t)p; + addr = (mps_addr_t)UNTAGGED(p, cons); + die(mps_finalize(arena, &addr), "finalize"); + } + + collect(expected); + + mps_arena_park(arena); + mps_ap_destroy(ap); + mps_pool_destroy(pool); + mps_fmt_destroy(fmt); + mps_root_destroy(root); + mps_thread_dereg(thread); + mps_arena_destroy(arena); +} + +int main(int argc, char *argv[]) +{ + mps_word_t tags[sizeof(mps_word_t)]; + size_t i; + int mode; + + testlib_init(argc, argv); + + /* Work out how many tags to use. */ + tag_bits = SizeLog2(sizeof(mps_word_t)); + Insist(TAG_COUNT <= NELEMS(tags)); + + /* Shuffle the tags. */ + for (i = 0; i < TAG_COUNT; ++i) { + tags[i] = i; + } + for (i = 0; i < TAG_COUNT; ++i) { + size_t j = i + rnd() % (TAG_COUNT - i); + mps_word_t t = tags[i]; + tags[i] = tags[j]; + tags[j] = t; + } + tag_cons = tags[0]; + tag_fwd = tags[1]; + tag_imm = tags[2]; + tag_invalid = tags[3]; + + printf("tags: cons = %u, fwd = %u, imm = %u, invalid = %u\n", + (unsigned)tag_cons, (unsigned)tag_fwd, + (unsigned)tag_imm, (unsigned)tag_invalid); + + for (mode = 0; mode < MODE_LIMIT; ++mode) { + test(mode); + } + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2015-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/teletest.c b/mps/code/teletest.c new file mode 100644 index 00000000000..bead6dec820 --- /dev/null +++ b/mps/code/teletest.c @@ -0,0 +1,217 @@ +/* teletest.c: TELEMETRY TEST + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .source: The command parser here was taken and adapted from bttest.c. + */ + +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "testlib.h" +#include "mpslib.h" + +#include /* fflush, fgets, printf, stdin, stdout */ +#include /* exit, EXIT_SUCCESS, strtoul */ + +SRCID(teletest, "$Id$"); + + +static mps_arena_t arena; + +#define WORD_FORMAT "0x%0" PRIwWORD PRIuLONGEST +#define MAX_ARGS 3 +#define INPUT_BUFFER_SIZE 512 + +static mps_word_t args[MAX_ARGS]; +static char *stringArg; +static Count argCount; + + +static void doGet(void) +{ + mps_word_t old; + old = mps_telemetry_get(); + + (void)printf(WORD_FORMAT "\n", (ulongest_t)old); +} + + +static void doSet(void) +{ + mps_telemetry_set(args[0]); +} + + +static void doReset(void) +{ + mps_telemetry_reset(args[0]); +} + + +static void doIntern(void) +{ + mps_word_t id; + + id = mps_telemetry_intern(stringArg); + (void)printf(WORD_FORMAT "\n", (ulongest_t)id); +} + +static void doLabel(void) +{ + mps_telemetry_label((mps_addr_t)args[0], args[1]); +} + +static void doFlush(void) +{ + mps_telemetry_flush(); +} + +static void doQuit(void) +{ + mps_arena_destroy(arena); + exit(0); +} + + +static void doHelp(void) +{ + (void)printf("get -> Get filter\n" + "set -> Set filter\n" + "reset -> Reset filter\n"); + (void)printf("intern -> Intern string\n" + "label
Label address\n" + "flush Flush buffer\n" + "help Print this message\n" + "quit Quit\n"); +} + + +static struct commandShapeStruct { + const char *name; + Count int_args; + mps_bool_t string_arg; + void (*fun)(void); +} commandShapes[] = { + {"get", 0, 0, doGet}, + {"set", 1, 0, doSet}, + {"reset", 1, 0, doReset}, + {"intern", 0, 1, doIntern}, + {"label", 2, 0, doLabel}, + {"flush", 0, 0, doFlush}, + {"help", 0, 0, doHelp}, + {"quit", 0, 0, doQuit}, + {NULL, 0, 0, NULL} +}; + + +typedef struct commandShapeStruct *commandShape; + + +static void obeyCommand(char *command) +{ + commandShape shape = commandShapes; + while(shape->name != NULL) { + const char *csp = shape->name; + char *p = command; + while (*csp == *p) { + csp++; + p++; + } + if ((*csp == 0) && ((*p == '\n') || (*p == ' '))) { /* complete match */ + argCount = 0; + while ((*p == ' ') && (argCount < shape->int_args)) { + /* get an argument */ + char *newP; + mps_word_t word; + word = (mps_word_t)strtoul(p, &newP, 0); + args[argCount] = word; + if (newP == p) { /* strtoul failed */ + printf("couldn't parse an integer argument\n"); + return; + } + p = newP; + ++ argCount; + } + if(shape->string_arg) { + char *end; + while(*p == ' ') + ++p; + for(end = p; *end != '\n'; end++) + NOOP; + *end = '\0'; + stringArg = p; + } else { + stringArg = NULL; + } + if (argCount < shape->int_args) { + printf("insufficient arguments to command\n"); + } else if (*p != '\n' && stringArg == NULL) { + printf("too many arguments to command\n"); + } else { /* do the command */ + shape->fun(); + } + return; + } else { + ++ shape; /* try next command */ + } + } + printf("command not understood\n> %s\n", command); + doHelp(); +} + + +#define testArenaSIZE (((size_t)64)<<20) + +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + die(mps_arena_create((mps_arena_t*)&arena, mps_arena_class_vm(), + testArenaSIZE), + "mps_arena_create"); + doHelp(); + while(1) { + char input[INPUT_BUFFER_SIZE]; + printf("telemetry test> "); + (void)fflush(stdout); + if (fgets(input, INPUT_BUFFER_SIZE , stdin)) { + obeyCommand(input); + } else { + break; + } + } + mps_arena_destroy(arena); + return EXIT_SUCCESS; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/testlib.c b/mps/code/testlib.c new file mode 100644 index 00000000000..1e7f73bbef1 --- /dev/null +++ b/mps/code/testlib.c @@ -0,0 +1,469 @@ +/* testlib.c: TEST LIBRARY + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .purpose: A library of functions that may be of use to unit tests. + */ + +#include "testlib.h" +#include "clock.h" /* for EVENT_CLOCK */ +#include "mps.h" +#include "misc.h" /* for NOOP */ + +#include /* fmod, log, HUGE_VAL */ +#include /* fflush, printf, stderr, sscanf, vfprintf */ +#include /* abort, exit, getenv */ +#include /* time */ + + +/* fail -- like assert, but (notionally) returns a value, so usable in an expression */ + +int fail(void) +{ + Insist(FALSE); + return 1111UL; +} + + +/* rnd -- a random number generator + * + * We use the (Multiplicative) Linear Congruential Generator + * Xn = a * Xn-1 mod m + * with: m = 2147483647 (2^31 - 1, a Mersenne prime), and a = 48271. + * This is a 'full-period' generator: all values in [1..(mod-1)] + * (ie. 0x00000001 to 0x7ffffffe inclusive) are returned once, and then + * the cycle begins again. The value 0 is not part of the cycle and + * is never returned. So the period = mod-1, ie. 2147483646. + * + * This generator is extremely simple and has been very well studied. + * It is free of major vices we might care about for this application. + * In particular, as m is prime, low order bits are random. Therefore + * to roll an N-sided die (N << m), "rnd() % N" is acceptable, giving + * a value in [0..N-1]. + * + * It was popularised by the much-cited Park & Miller paper: + * Stephen K Park & Keith W Miller (1988). Random number generators: + * good ones are hard to find. Communications of the ACM, + * 31:1192-1201. + * The recommended multiplier a was later updated from 16807 to 48271: + * Stephen K Park, Keith W Miller, Paul K. Stockmeyer (1993). + * Technical Correspondence. Communications of the ACM, 36:105-110. + * + * (Many more elaborate generators have been invented. The next simple + * step would be to combine with the MLCG m = 2147483399 a = 40692, to + * make the period "about 74 quadrillion". See the summary of chapter + * 3 in Knuth's "The Art of Computer Programming".) + * + * This (fast) implementation uses the identity: + * 0x80000000 == 0x7FFFFFFF + 0x00000001 + * noted by David Carta (1990), where 0x7FFFFFFF == 2^31-1 == m, which + * means that bits above the first 31 can simply be shifted >> 31 and + * added, preserving Xn mod m. To remain within 32-bit unsigned + * arithmetic when multiplying the previous seed (31 bits) by a (16 + * bits), the seed is split into bottom and top halves; bits above + * the first 31 are simply "top >> 16". (Code by RHSK, inspired by + * Robin Whittle's article at "http://www.firstpr.com.au/dsp/rand31/"). + * + * Slower implementations, used for verification: + * rnd_verify_schrage uses the method of L. Schrage (1979 & 1983), + * namely splitting the seed by q, where q = m div a. + * rnd_verify_float simply uses floating point arithmetic. + */ + +static unsigned long seed = 1; +#define R_m 2147483647UL +#define R_a 48271UL +unsigned long rnd(void) +{ + /* requires m == 2^31-1, a < 2^16 */ + unsigned long bot = R_a * (seed & 0x7FFF); + unsigned long top = R_a * (seed >> 15); + seed = bot + ((top & 0xFFFF) << 15) + (top >> 16); + if(seed > R_m) + seed -= R_m; + return seed; + /* Have you modified this code? Run rnd_verify(3) please! RHSK */ +} + +static unsigned long seed_verify_schrage = 1; +#define R_q (R_m / R_a) +#define R_r (R_m % R_a) +static unsigned long rnd_verify_schrage(void) +{ + /* requires m < 2^31, q > r; see Park & Miller (1988) */ + unsigned long alpha = R_a * (seed_verify_schrage % R_q); /* < m */ + unsigned long beta = R_r * (seed_verify_schrage / R_q); /* < m */ + seed_verify_schrage = alpha - beta; + if(alpha < beta) + seed_verify_schrage += R_m; + return seed_verify_schrage; +} + +static unsigned long seed_verify_float = 1; +#define R_m_float 2147483647.0 +#define R_a_float 48271.0 +static unsigned long rnd_verify_float(void) +{ + double s; + s = (double)seed_verify_float; + s *= R_a_float; + s = fmod(s, R_m_float); + seed_verify_float = (unsigned long)s; + return seed_verify_float; +} + +/* rnd_verify -- verify that rnd() returns the correct results + * + * depth = how much time to spend verifying + * 0: very quick -- just verify the next rnd() value + * 1: quick -- verify the first 10000 calls from seed = 1 + * 2: slow (~ 1 minute) -- run the fast generator for a full cycle + * 3: very slow (several minutes) -- verify a full cycle + */ +void rnd_verify(int depth) +{ + unsigned long orig_seed = seed; + unsigned long i; + unsigned long r = 0; + + /* 0: the next value from rnd() matches rnd_verify_*() */ + if(depth >= 0) { + seed_verify_schrage = seed; + seed_verify_float = seed; + r = rnd(); + Insist(rnd_verify_schrage() == r); + Insist(rnd_verify_float() == r); + } + + /* 1: first 10000 (from Park & Miller, note: 1-based indexing!) */ + if(depth >= 1) { + i = 1; + seed = 1; + seed_verify_schrage = seed; + seed_verify_float = seed; + for(i = 2; i <= 10001; i += 1) { + r = rnd(); + Insist(rnd_verify_schrage() == r); + Insist(rnd_verify_float() == r); + } + /* Insist(r == 1043618065UL); -- correct value for a = 16807 */ + Insist(r == 399268537UL); /* correct for a = 48271 */ + } + + /* 1: observe wrap-around (note: 0-based indexing) */ + if(depth >= 1) { + /* set-up seed value for i = 2147483645 */ + /* seed = 1407677000UL; -- correct value for a = 16807 */ + seed = 1899818559UL; /* correct for a = 48271 */ + seed_verify_schrage = seed; + seed_verify_float = seed; + r = rnd(); + Insist(rnd_verify_schrage() == r); + Insist(rnd_verify_float() == r); + Insist(r == 1); /* wrap-around */ + } + + /* 2 & 3: Full cycle (3 => verifying each value) */ + if(depth >= 2) { + int verify = (depth >= 3); + unsigned long r1 = 1; + + i = 0; + seed = 1; + seed_verify_schrage = seed; + seed_verify_float = seed; + while(1) { + i += 1; + r = rnd(); + if(verify) { + Insist(rnd_verify_schrage() == r); + Insist(rnd_verify_float() == r); + } + if(r == 1) { + printf("Full cycle complete%s:\n", + verify ? " (verifying every value)" + : " (fast implementation only)" ); + printf("Wrapped at i=%lu, r=%lu, r(i-1)=%lu.\n", + i, r, r1); + break; + } else { + r1 = r; + } + } + } + + seed = orig_seed; +} + +/* rnd_addr -- a random address generator + * + * rnd gives 31 random bits, we run it repeatedly to get enough bits. + */ + +#define ADDR_BITS (sizeof(mps_addr_t) * CHAR_BIT) + +mps_addr_t rnd_addr(void) +{ + mps_word_t res = 0; + unsigned bits; + + for (bits = 0; bits < ADDR_BITS; bits += 31) + res = res << 31 | (mps_word_t)rnd(); + return (mps_addr_t)res; +} + +double rnd_double(void) +{ + return (double)rnd() / R_m_float; +} + +static unsigned sizelog2(size_t size) +{ + return (unsigned)(log((double)size) / log(2.0)); +} + +size_t rnd_grain(size_t arena_size) +{ + /* The grain size must be small enough to allow for a complete set + of zones in the initial chunk, but bigger than one word. */ + Insist(arena_size >> MPS_WORD_SHIFT >= sizeof(void *)); + return rnd_align(sizeof(void *), (size_t)1 << sizelog2(arena_size >> MPS_WORD_SHIFT)); +} + +size_t rnd_align(size_t min, size_t max) +{ + unsigned log2min = sizelog2(min); + unsigned log2max = sizelog2(max); + Insist(min <= max); + Insist((size_t)1 << log2min == min); + Insist((size_t)1 << log2max == max); + if (log2min < log2max) + return min << (rnd() % (log2max - log2min + 1)); + else + return min; +} + +double rnd_pause_time(void) +{ + double t = rnd_double(); + if (t == 0.0) + return HUGE_VAL; /* Would prefer to use INFINITY but it's not in C89. */ + else + return 1 / t - 1; +} + +rnd_state_t rnd_seed(void) +{ + /* Initialize seed based on seconds since epoch and on processor + * cycle count. */ + EventClock t2; + EVENT_CLOCK(t2); + return 1 + ((unsigned long)time(NULL) + (unsigned long)t2) % (R_m - 1); +} + + +/* randomize -- randomize the generator, or initialize to replay + * + * There have been 3 versions of the rnd-states reported by this + * function: + * + * 1. before RHSK got his hands on rnd(), ie. pre-2008. These seed + * values are not currently supported, but it might be easy to + * add support. + * + * 2. v2 states: the published "seed" (state) value was the seed + * *before* the 10 rnds to churn up and separate nearby values + * from time(). This was unfortunate: you can't write a rnd_state + * getter, because it would have to go 10 steps backwards, and + * that's impossible. + * (2008..2010-03-22) + * + * 3. v3 states: the published state is the state *after* all + * initialization is complete. Therefore you can easily store and + * re-use the published state. (From 2010-03-22, changelist + * 170093). + */ + +void randomize(int argc, char *argv[]) +{ + int n; + unsigned long seed0; + + if (argc > 1) { + n = sscanf(argv[1], "%lu", &seed0); + Insist(n == 1); + printf("%s: randomize(): resetting initial state (v3) to: %lu.\n", + argv[0], seed0); + rnd_state_set(seed0); + } else { + seed0 = rnd_seed(); + printf("%s: randomize(): choosing initial state (v3): %lu.\n", + argv[0], seed0); + rnd_state_set(seed0); + } + (void)fflush(stdout); /* ensure seed is not lost in case of failure */ +} + +unsigned long rnd_state(void) +{ + return seed; +} + +void rnd_state_set(unsigned long seed0) +{ + Insist(seed0 < R_m); + Insist(seed0 != 0); + seed = seed0; + + rnd_verify(0); + Insist(seed == seed0); +} + +/* rnd_state_set_2 -- legacy support for v2 rnd states + * + * In v2, the published "seed" (state) value was the seed *before* + * the 10 rnds to churn up and separate nearby values from time(). + * + * Set the seed, then convert it to a v3 state by doing those 10 rnds. + */ +void rnd_state_set_v2(unsigned long seed0_v2) +{ + int i; + unsigned long seed0; + + rnd_state_set(seed0_v2); + for(i = 0; i < 10; i += 1) { + (void)rnd(); + } + + seed0 = rnd_state(); + printf("rnd_state_set_v2(): seed0_v2 = %lu, converted to state_v3 = %lu.\n", seed0_v2, seed0); + rnd_state_set(seed0); +} + + +/* res_strings -- human readable MPS result codes */ + +static struct { + const char *ident; + const char *doc; +} const res_strings[] = { +#define RES_STRINGS_ROW(X, ident, doc) {#ident, doc}, + _mps_RES_ENUM(RES_STRINGS_ROW, X) +}; + + +/* verror -- die with message */ + +ATTRIBUTE_FORMAT((printf, 1, 0)) +void verror(const char *format, va_list args) +{ + (void)fflush(stdout); /* synchronize */ + (void)vfprintf(stderr, format, args); + (void)fprintf(stderr, "\n"); + (void)fflush(stderr); /* make sure the message is output */ + mps_telemetry_flush(); + /* On Windows, the abort signal pops up a dialog box. This suspends + * the test suite until a button is pressed, which is not acceptable + * for offline testing, so if the MPS_TESTLIB_NOABORT environment + * variable is set, then the test case exits instead of aborting. + */ + if (getenv("MPS_TESTLIB_NOABORT")) { + exit(EXIT_FAILURE); + } else { + abort(); + } +} + + +/* error -- die with message */ + +ATTRIBUTE_FORMAT((printf, 1, 2)) +void error(const char *format, ...) +{ + va_list args; + + va_start(args, format); + verror(format, args); + /* va_end(args); */ /* provokes "unreachable code" error from MSVC */ +} + + +/* die_expect -- Test a return code, and exit on unexpected result */ + +void die_expect(mps_res_t res, mps_res_t expected, const char *s) +{ + if (res != expected) { + if (0 <= res && (unsigned)res < NELEMS(res_strings)) + error("\n%s: %s: %s\n", s, res_strings[res].ident, res_strings[res].doc); + else + error("\n%s: %d: unknown result code\n", s, res); + } +} + + +/* die -- Test a return code, and exit on error */ + +void die(mps_res_t res, const char *s) +{ + die_expect(res, MPS_RES_OK, s); +} + + +/* cdie -- Test a C boolean, and exit on error */ + +void cdie(int res, const char *s) +{ + if (!res) { + error("\n%s: %d\n", s, res); + } +} + + +/* assert_die -- always die on assertion */ + +void assert_die(const char *file, unsigned line, const char *condition) +{ + error("%s:%u: MPS ASSERTION FAILED: %s\n", file, line, condition); +} + + +/* testlib_init -- install assertion handler and seed RNG */ + +void testlib_init(int argc, char *argv[]) +{ + (void)mps_lib_assert_fail_install(assert_die); + randomize(argc, argv); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/testlib.h b/mps/code/testlib.h new file mode 100644 index 00000000000..9a414f2b5dd --- /dev/null +++ b/mps/code/testlib.h @@ -0,0 +1,327 @@ +/* testlib.h: TEST LIBRARY INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .purpose: A library of functions that may be of use to unit tests. + */ + +#ifndef testlib_h +#define testlib_h + +#include "mps.h" +#include "misc.h" /* for STR */ +#include "mpstd.h" + + +/* Suppress Visual C warnings at /W4 (warning level 4) */ +/* This is also done in config.h. */ + +#ifdef MPS_BUILD_MV + +/* "constant conditional" (provoked by MPS_END) */ +#pragma warning(disable: 4127) + +#endif /* MPS_BUILD_MV */ + + +/* Suppress Pelles C warnings at /W2 (warning level 2) */ +/* This is also done in config.h. */ + +#ifdef MPS_BUILD_PC + +/* "Unreachable code" (provoked by AVER, if condition is constantly true). */ +#pragma warn(disable: 2154) + +#endif /* MPS_BUILD_PC */ + + +/* Function attributes */ +/* These are also defined in config.h */ + +/* Attribute for functions that take a printf-like format argument, so + * that the compiler can check the format specifiers against the types + * of the arguments. + * GCC: + * Clang: + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_FORMAT(ARGLIST) __attribute__((__format__ ARGLIST)) +#else +#define ATTRIBUTE_FORMAT(ARGLIST) +#endif + +/* Attribute for functions that must not be inlined. + * GCC: + * MSVC: + */ +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) +#define ATTRIBUTE_NOINLINE __attribute__((__noinline__)) +#elif defined(MPS_BUILD_MV) +#define ATTRIBUTE_NOINLINE __declspec(noinline) +#else +#define ATTRIBUTE_NOINLINE +#endif + + +/* alloca -- memory allocator + * + * Windows calls this function _alloca() instead of alloca(). + * + */ + +#if defined(MPS_OS_W3) + +#define alloca _alloca + +#endif + + +/* setenv -- set environment variable + * + * Windows lacks setenv(), but _putenv_s() has similar functionality. + * + * + * This macro version may evaluate the name argument twice. + */ + +#if defined(MPS_OS_W3) + +#define setenv(name, value, overwrite) \ + (((overwrite) || !getenv(name)) ? _putenv_s(name, value) : 0) + +#endif + + +/* ulongest_t -- longest unsigned integer type + * + * Define a longest unsigned integer type for testing, scanning, and + * printing. We'd like to use C99's uintmax_t and PRIuMAX here, but + * the MPS is in C89 and C99 isn't supported by Microsoft. + * + * We avoid using the types defined in mpstd.h because we want the + * tests to root out any incompatible assumptions by breaking. + */ + +#if defined(MPS_ARCH_A6) || defined(MPS_ARCH_I6) +#define PRIwWORD "16" +#elif defined(MPS_ARCH_I3) +#define PRIwWORD "8" +#else +#error "How many beans make five?" +#endif + +#if defined(MPS_OS_W3) && defined(MPS_ARCH_I6) +#define PRIuLONGEST "llu" +#define PRIdLONGEST "lld" +#define SCNuLONGEST "llu" +#define SCNXLONGEST "llX" +#define PRIXLONGEST "llX" +typedef unsigned long long ulongest_t; +typedef long long longest_t; +#define MPS_WORD_CONST(n) (n##ull) +#else +#define PRIuLONGEST "lu" +#define PRIdLONGEST "ld" +#define SCNuLONGEST "lu" +#define SCNXLONGEST "lX" +#define PRIXLONGEST "lX" +typedef unsigned long ulongest_t; +typedef long longest_t; +#define MPS_WORD_CONST(n) (n##ul) +#endif + + +#define PRIXPTR "0"PRIwWORD PRIXLONGEST + + +/* testlib_unused -- declares that a variable is unused + * + * It should be used to prevent compiler warnings about unused + * variables. Care should be exercised; the fact that a variable + * is unused may need justification. + */ + +#define testlib_unused(v) ((void)(v)) + + +/* max -- return larger value + * + * Note: evaluates its arguments twice. + */ + +#undef max +#define max(a, b) (((a) > (b)) ? (a) : (b)) + + +/* alignUp -- align word to alignment + * + * Note: evaluates argument a twice. + */ + +#define alignUp(w, a) (((w) + (a) - 1) & ~((size_t)(a) - 1)) + + +/* die -- succeed or die + * + * If the first argument is not ResOK then prints the second + * argument on stderr and exits the program. Otherwise does nothing. + * + * Typical use: + * die(mps_ap_create(&ap, pool, mps_rank_exact()), "APCreate"); + */ + +extern void die(mps_res_t res, const char *s); + + +/* die_expect -- get expected result or die + * + * If the first argument is not the same as the second argument, + * prints the third argument on stderr and exits the program. + * Otherwise does nothing. + * + * Typical use: + * die_expect(res, MPS_RES_COMMIT_LIMIT, "Commit limit allocation"); + */ + +extern void die_expect(mps_res_t res, mps_res_t expected, const char *s); + + +/* cdie -- succeed or die + * + * If the first argument is not true (non-zero) then prints the second + * argument on stderr and exits the program. Otherwise does nothing. + * + * Typical use: + * cdie(foo != NULL, "No foo"); + */ + +extern void cdie(int res, const char *s); + + +/* assert_die -- always die on assertion + * + * The MPS assertion handler may not stop in the HOT variety, + * preventing tests from detecting defects. This one does. + */ + +void assert_die(const char *file, unsigned line, const char *condition); + + +/* error, verror -- die with message */ + +ATTRIBUTE_FORMAT((printf, 1, 2)) +extern void error(const char *format, ...); +ATTRIBUTE_FORMAT((printf, 1, 0)) +extern void verror(const char *format, va_list args); + + +/* Insist -- like assert, but even in release varieties */ + +#define Insist(cond) insist1(cond, #cond) + +#define insist1(cond, condstring) \ + if(cond) \ + NOOP; \ + else \ + cdie(cond, condstring "\n" __FILE__ "\n" STR(__LINE__)) + + +/* fail -- like assert, but (notionally) returns a value, so usable in an expression */ + +extern int fail(void); + + +/* rnd -- random number generator + * + * rnd() generates a sequence of integers in the range [1, 2^31-2]. + */ + +extern unsigned long rnd(void); +typedef unsigned long rnd_state_t; +extern rnd_state_t rnd_state(void); +extern void rnd_state_set(rnd_state_t state_v3); +extern void rnd_state_set_v2(rnd_state_t seed0_v2); /* legacy */ +extern rnd_state_t rnd_seed(void); + + +/* rnd_verify() -- checks behaviour of rnd() */ +extern void rnd_verify(int depth); + + +/* rnd_addr -- random number generator + * + * rnd_addr() generates a sequence of addresses all over the address space. + */ + +extern mps_addr_t rnd_addr(void); + + +/* rnd_double -- uniformly distributed random number between 0.0 and 1.0 */ + +extern double rnd_double(void); + + +/* rnd_grain -- return a random grain size that's not too big for the + * given arena size */ + +extern size_t rnd_grain(size_t arena_size); + + +/* rnd_align -- random alignment */ + +extern size_t rnd_align(size_t min, size_t max); + + +/* rnd_pause_time -- random pause time */ + +extern double rnd_pause_time(void); + + +/* randomize -- randomize the generator, or initialize to replay + * + * randomize(argc, argv) randomizes the rnd generator (using time(3)) + * and prints out the randomization seed, or takes a seed (as a command- + * line argument) and initializes the generator to the same state. + */ + +extern void randomize(int argc, char *argv[]); + + +/* testlib_init -- install assertion handler and seed RNG */ + +extern void testlib_init(int argc, char *argv[]); + + +#endif /* testlib_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/testthr.h b/mps/code/testthr.h new file mode 100644 index 00000000000..a50dad1e5d0 --- /dev/null +++ b/mps/code/testthr.h @@ -0,0 +1,111 @@ +/* testthr.h: MULTI-THREADED TEST INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Simple interface to threads that makes it possible to + * write test cases that are portable between Windows (using the + * implementation in testthrw3.c) and Unix (using the implementation + * in testthrix.c). . + */ + +#ifndef testthr_h +#define testthr_h + +#include "mpstd.h" + + +/* testthr_routine_t -- type of thread routines + * + * Use the pthread type here and convert back and forth in the Windows + * implementation. + */ +typedef void *(*testthr_routine_t)(void *); + + +/* testthr_t -- type of thread identifiers + * + * It is necessary to define it here (even though it requires some + * #ifdefs) so that clients can allocate storage for them. + */ + +#if defined(MPS_OS_W3) + +#include "mpswin.h" + +/* On Windows, a thread is identified by a HANDLE. + * + * But use a structure so that the thread has somewhere to store its + * result for use by testthr_join. + */ +typedef struct testthr_t { + HANDLE handle; + testthr_routine_t start; + void *arg; /* argument to pass to start */ + void *result; /* result returned from start */ +} testthr_t; + +#elif defined(MPS_OS_FR) || defined(MPS_OS_LI) || defined(MPS_OS_XC) + +#include + +/* In pthreads, a thread is identified by a pthread_t, which is + * allowed "to be defined as a structure" [IEEE Std 1003.1, sys/types.h] + * + */ +typedef pthread_t testthr_t; + +#else +#error "Unknown platform: can't determine a type for testthr_t." +#endif + + +/* testthr_create -- create a thread + * + * Store the identifier of the newly created thread in *thread_o, and + * call start, passing arg as the single parameter. + */ + +void testthr_create(testthr_t *thread_o, testthr_routine_t start, void *arg); + + +/* testthr_join -- wait for a thread to complete + * + * Suspend execution of the calling thread until the target thread + * terminates (if necessary), and if result_o is non-NULL, update + * *result_o with the return value of the thread's start. + */ + +void testthr_join(testthr_t *thread, void **result_o); + +#endif /* testthr_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/testthrix.c b/mps/code/testthrix.c new file mode 100644 index 00000000000..2f70b3e4353 --- /dev/null +++ b/mps/code/testthrix.c @@ -0,0 +1,54 @@ +/* testthrix.c: MULTI-THREADED TEST IMPLEMENTATION (POSIX THREADS) + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "testlib.h" +#include "testthr.h" + +#include /* strerror */ + +void testthr_create(testthr_t *thread_o, testthr_routine_t start, void *arg) +{ + int res = pthread_create(thread_o, NULL, start, arg); + if (res != 0) + error("pthread_create failed with result %d (%s)", res, strerror(res)); +} + +void testthr_join(testthr_t *thread, void **result_o) +{ + int res = pthread_join(*thread, result_o); + if (res != 0) + error("pthread_join failed with result %d (%s)", res, strerror(res)); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/testthrw3.c b/mps/code/testthrw3.c new file mode 100644 index 00000000000..4994fae4a19 --- /dev/null +++ b/mps/code/testthrw3.c @@ -0,0 +1,68 @@ +/* testthrw3.c: MULTI-THREADED TEST IMPLEMENTATION (WINDOWS) + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "testlib.h" +#include "testthr.h" + +static DWORD WINAPI testthr_start(LPVOID arg) +{ + testthr_t *thread = arg; + thread->result = (*thread->start)(thread->arg); + return 0; +} + +void testthr_create(testthr_t *thread_o, testthr_routine_t start, void *arg) +{ + HANDLE res; + thread_o->start = start; + thread_o->arg = arg; + res = CreateThread(NULL, 0, testthr_start, thread_o, 0, NULL); + if (res == NULL) + error("CreateThread failed with error %lu", + (unsigned long)GetLastError()); + else + thread_o->handle = res; +} + +void testthr_join(testthr_t *thread, void **result_o) +{ + DWORD res = WaitForSingleObject(thread->handle, INFINITE); + if (res != WAIT_OBJECT_0) + error("WaitForSingleObject failed with result %lu (error %lu)", + (unsigned long)res, (unsigned long)GetLastError()); + if (result_o) + *result_o = thread->result; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/th.h b/mps/code/th.h new file mode 100644 index 00000000000..08e01d6cc3d --- /dev/null +++ b/mps/code/th.h @@ -0,0 +1,108 @@ +/* th.h: THREAD MANAGER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: Provides thread suspension facilities to the shield. + * . Each thread has to be + * individually registered and deregistered with an arena. + */ + +#ifndef th_h +#define th_h + +#include "mpmtypes.h" +#include "ring.h" + + +#define ThreadSig ((Sig)0x519286ED) /* SIGnature THREaD */ + +extern Bool ThreadCheck(Thread thread); + + +/* ThreadCheckSimple + * + * Simple thread-safe check of a thread object. + */ + +extern Bool ThreadCheckSimple(Thread thread); + + +extern Res ThreadDescribe(Thread thread, mps_lib_FILE *stream, Count depth); + + +/* Register/Deregister + * + * Explicitly register/deregister a thread on the arena threadRing. + * Register returns a "Thread" value which needs to be used + * for deregistration. + * + * Threads must not be multiply registered in the same arena. + */ + +extern Res ThreadRegister(Thread *threadReturn, Arena arena); + +extern void ThreadDeregister(Thread thread, Arena arena); + + +/* ThreadRingSuspend/Resume + * + * These functions suspend/resume the threads on the ring. If the + * current thread is among them, it is not suspended, nor is any + * attempt to resume it made. Threads that can't be suspended/resumed + * because they are dead are moved to deadRing. + */ + +extern void ThreadRingSuspend(Ring threadRing, Ring deadRing); +extern void ThreadRingResume(Ring threadRing, Ring deadRing); + + +/* ThreadRingThread + * + * Return the thread from an element of the Arena's + * thread ring. + */ + +extern Thread ThreadRingThread(Ring threadRing); + + +extern Arena ThreadArena(Thread thread); + +extern Res ThreadScan(ScanState ss, Thread thread, void *stackCold, + mps_area_scan_t scan_area, + void *closure); + +extern void ThreadSetup(void); + + +#endif /* th_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/than.c b/mps/code/than.c new file mode 100644 index 00000000000..624265c6097 --- /dev/null +++ b/mps/code/than.c @@ -0,0 +1,178 @@ +/* than.c: ANSI THREADS MANAGER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This is a single-threaded implementation of the threads manager. + * Has stubs for thread suspension. + * . + */ + +#include "mpm.h" + +SRCID(than, "$Id$"); + + +typedef struct mps_thr_s { /* ANSI fake thread structure */ + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->threadSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* attaches to arena */ +} ThreadStruct; + + +Bool ThreadCheck(Thread thread) +{ + CHECKS(Thread, thread); + CHECKU(Arena, thread->arena); + CHECKL(thread->serial < thread->arena->threadSerial); + CHECKD_NOSIG(Ring, &thread->arenaRing); + return TRUE; +} + + +Bool ThreadCheckSimple(Thread thread) +{ + CHECKS(Thread, thread); + return TRUE; +} + + +Res ThreadRegister(Thread *threadReturn, Arena arena) +{ + Res res; + Thread thread; + Ring ring; + void *p; + + AVER(threadReturn != NULL); + + res = ControlAlloc(&p, arena, sizeof(ThreadStruct)); + if (res != ResOK) + return res; + thread = (Thread)p; + + thread->arena = arena; + RingInit(&thread->arenaRing); + + thread->sig = ThreadSig; + thread->serial = arena->threadSerial; + ++arena->threadSerial; + + AVERT(Thread, thread); + + ring = ArenaThreadRing(arena); + + RingAppend(ring, &thread->arenaRing); + + *threadReturn = thread; + return ResOK; +} + + +void ThreadDeregister(Thread thread, Arena arena) +{ + AVERT(Thread, thread); + AVERT(Arena, arena); + + RingRemove(&thread->arenaRing); + + thread->sig = SigInvalid; + + RingFinish(&thread->arenaRing); + + ControlFree(arena, thread, sizeof(ThreadStruct)); +} + + +void ThreadRingSuspend(Ring threadRing, Ring deadRing) +{ + AVERT(Ring, threadRing); + AVERT(Ring, deadRing); +} + +void ThreadRingResume(Ring threadRing, Ring deadRing) +{ + AVERT(Ring, threadRing); + AVERT(Ring, deadRing); +} + +Thread ThreadRingThread(Ring threadRing) +{ + Thread thread; + AVERT(Ring, threadRing); + thread = RING_ELT(Thread, arenaRing, threadRing); + AVERT(Thread, thread); + return thread; +} + + +/* Must be thread-safe. . */ + +Arena ThreadArena(Thread thread) +{ + AVER(TESTT(Thread, thread)); + return thread->arena; +} + + +Res ThreadScan(ScanState ss, Thread thread, void *stackCold, + mps_area_scan_t scan_area, + void *closure) +{ + UNUSED(thread); + return StackScan(ss, stackCold, scan_area, closure); +} + + +Res ThreadDescribe(Thread thread, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial, + " arena $P ($U)\n", + (WriteFP)thread->arena, (WriteFU)thread->arena->serial, + "} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +void ThreadSetup(void) +{ + /* Nothing to do as ANSI platform does not have fork(). */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/thix.c b/mps/code/thix.c new file mode 100644 index 00000000000..c47f82ad3b8 --- /dev/null +++ b/mps/code/thix.c @@ -0,0 +1,375 @@ +/* thix.c: Threads Manager for Posix threads + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This is a pthreads implementation of the threads manager. + * This implements . + * + * .design: . + * + * .thread.id: The thread id is used to identify the current thread. + * + * ASSUMPTIONS + * + * .error.resume: PThreadextResume is assumed to succeed unless the + * thread has been terminated. + * .error.suspend: PThreadextSuspend is assumed to succeed unless the + * thread has been terminated. + * + * .stack.full-descend: assumes full descending stack. + * i.e. stack pointer points to the last allocated location; + * stack grows downwards. + * + * .stack.below-bottom: it's legal for the stack pointer to be at a + * higher address than the registered bottom of stack. This might + * happen if the stack of another thread doesn't contain any frames + * belonging to the client language. In this case, the stack should + * not be scanned. + * + * .stack.align: assume roots on the stack are always word-aligned, + * but don't assume that the stack pointer is necessarily + * word-aligned at the time of reading the context of another thread. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) +#error "thix.c is specific to MPS_OS_FR or MPS_OS_LI" +#endif + +#include "prmcix.h" +#include "pthrdext.h" + +#include + +SRCID(thix, "$Id$"); + + +/* ThreadStruct -- thread descriptor */ + +typedef struct mps_thr_s { /* PThreads thread structure */ + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->threadSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* threads attached to arena */ + Bool alive; /* thread believed to be alive? */ + PThreadextStruct thrextStruct; /* PThreads extension */ + pthread_t id; /* Pthread object of thread */ + MutatorContext context; /* Context if suspended, NULL if not */ +} ThreadStruct; + + +/* ThreadCheck -- check a thread */ + +Bool ThreadCheck(Thread thread) +{ + CHECKS(Thread, thread); + CHECKU(Arena, thread->arena); + CHECKL(thread->serial < thread->arena->threadSerial); + CHECKD_NOSIG(Ring, &thread->arenaRing); + CHECKL(BoolCheck(thread->alive)); + CHECKD(PThreadext, &thread->thrextStruct); + return TRUE; +} + +Bool ThreadCheckSimple(Thread thread) +{ + CHECKS(Thread, thread); + return TRUE; +} + + +/* ThreadRegister -- register a thread with an arena */ + +Res ThreadRegister(Thread *threadReturn, Arena arena) +{ + Res res; + Thread thread; + void *p; + + AVER(threadReturn != NULL); + AVERT(Arena, arena); + + res = ControlAlloc(&p, arena, sizeof(ThreadStruct)); + if(res != ResOK) + return res; + thread = (Thread)p; + + thread->id = pthread_self(); + + RingInit(&thread->arenaRing); + + thread->sig = ThreadSig; + thread->serial = arena->threadSerial; + ++arena->threadSerial; + thread->arena = arena; + thread->alive = TRUE; + thread->context = NULL; + + PThreadextInit(&thread->thrextStruct, thread->id); + + AVERT(Thread, thread); + + RingAppend(ArenaThreadRing(arena), &thread->arenaRing); + + *threadReturn = thread; + return ResOK; +} + + +/* ThreadDeregister -- deregister a thread from an arena */ + +void ThreadDeregister(Thread thread, Arena arena) +{ + AVERT(Thread, thread); + AVERT(Arena, arena); + + RingRemove(&thread->arenaRing); + + thread->sig = SigInvalid; + + RingFinish(&thread->arenaRing); + + PThreadextFinish(&thread->thrextStruct); + + ControlFree(arena, thread, sizeof(ThreadStruct)); +} + + +/* mapThreadRing -- map over threads on ring calling a function on + * each one. + * + * Threads that are found to be dead (that is, if func returns FALSE) + * are marked as dead and moved to deadRing, in order to implement + * design.thread-manager.sol.thread.term.attempt. + */ + +static void mapThreadRing(Ring threadRing, Ring deadRing, Bool (*func)(Thread)) +{ + Ring node, next; + + AVERT(Ring, threadRing); + AVERT(Ring, deadRing); + AVER(FUNCHECK(func)); + + RING_FOR(node, threadRing, next) { + Thread thread = RING_ELT(Thread, arenaRing, node); + AVERT(Thread, thread); + AVER(thread->alive); + if (!(*func)(thread)) { + thread->alive = FALSE; + RingRemove(&thread->arenaRing); + RingAppend(deadRing, &thread->arenaRing); + } + } +} + + +/* ThreadRingSuspend -- suspend all threads on a ring, except the + * current one. + */ + +static Bool threadSuspend(Thread thread) +{ + Res res; + pthread_t self; + self = pthread_self(); + if (pthread_equal(self, thread->id)) /* .thread.id */ + return TRUE; + + /* .error.suspend: if PThreadextSuspend fails, we assume the thread + * has been terminated. */ + AVER(thread->context == NULL); + res = PThreadextSuspend(&thread->thrextStruct, &thread->context); + AVER(res == ResOK); + AVER(thread->context != NULL); + /* design.thread-manager.sol.thread.term.attempt */ + return res == ResOK; +} + + + +void ThreadRingSuspend(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, threadSuspend); +} + + +/* ThreadRingResume -- resume all threads on a ring (expect the current one) */ + + +static Bool threadResume(Thread thread) +{ + Res res; + pthread_t self; + self = pthread_self(); + if (pthread_equal(self, thread->id)) /* .thread.id */ + return TRUE; + + /* .error.resume: If PThreadextResume fails, we assume the thread + * has been terminated. */ + AVER(thread->context != NULL); + res = PThreadextResume(&thread->thrextStruct); + AVER(res == ResOK); + thread->context = NULL; + /* design.thread-manager.sol.thread.term.attempt */ + return res == ResOK; +} + +void ThreadRingResume(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, threadResume); +} + + +/* ThreadRingThread -- return the thread at the given ring element */ + +Thread ThreadRingThread(Ring threadRing) +{ + Thread thread; + AVERT(Ring, threadRing); + thread = RING_ELT(Thread, arenaRing, threadRing); + AVERT(Thread, thread); + return thread; +} + + +/* ThreadArena -- get the arena of a thread + * + * Must be thread-safe. . + */ + +Arena ThreadArena(Thread thread) +{ + AVER(TESTT(Thread, thread)); + return thread->arena; +} + + +/* ThreadScan -- scan the state of a thread (stack and regs) */ + +Res ThreadScan(ScanState ss, Thread thread, void *stackCold, + mps_area_scan_t scan_area, + void *closure) +{ + pthread_t self; + Res res; + + AVERT(Thread, thread); + self = pthread_self(); + if(pthread_equal(self, thread->id)) { + /* scan this thread's stack */ + AVER(thread->alive); + res = StackScan(ss, stackCold, scan_area, closure); + if(res != ResOK) + return res; + } else if (thread->alive) { + MutatorContext context; + Word *stackBase, *stackLimit; + Addr stackPtr; + + context = thread->context; + AVER(context != NULL); + + stackPtr = MutatorContextSP(context); + /* .stack.align */ + stackBase = (Word *)AddrAlignUp(stackPtr, sizeof(Word)); + stackLimit = stackCold; + if (stackBase >= stackLimit) + return ResOK; /* .stack.below-bottom */ + + /* scan stack inclusive of current sp and exclusive of + * stackCold (.stack.full-descend) + */ + res = TraceScanArea(ss, stackBase, stackLimit, + scan_area, closure); + if(res != ResOK) + return res; + + /* scan the registers in the mutator context */ + res = MutatorContextScan(ss, context, scan_area, closure); + if(res != ResOK) + return res; + } + + return ResOK; +} + + +/* ThreadDescribe -- describe a thread */ + +Res ThreadDescribe(Thread thread, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial, + " arena $P ($U)\n", + (WriteFP)thread->arena, (WriteFU)thread->arena->serial, + " alive $S\n", WriteFYesNo(thread->alive), + " id $U\n", (WriteFU)thread->id, + "} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial, + NULL); + if(res != ResOK) + return res; + + return ResOK; +} + + +/* threadAtForkChild -- for each arena, move threads except for the + * current thread to the dead ring . + */ + +static Bool threadForkChild(Thread thread) +{ + AVERT(Thread, thread); + return pthread_equal(pthread_self(), thread->id); /* .thread.id */ +} + +static void threadRingForkChild(Arena arena) +{ + AVERT(Arena, arena); + mapThreadRing(ArenaThreadRing(arena), ArenaDeadRing(arena), threadForkChild); +} + +static void threadAtForkChild(void) +{ + GlobalsArenaMap(threadRingForkChild); +} + +void ThreadSetup(void) +{ + pthread_atfork(NULL, NULL, threadAtForkChild); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/thw3.c b/mps/code/thw3.c new file mode 100644 index 00000000000..dd573df14c0 --- /dev/null +++ b/mps/code/thw3.c @@ -0,0 +1,348 @@ +/* thw3.c: WIN32 THREAD MANAGER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * Implements thread registration, suspension, and stack and register + * scanning. . + * + * .thread.id: The thread id is used to identify the current thread. + * .thread.handle: The thread handle needs the enough access to + * be able to suspend threads and to get their context. i.e. + * .thread.handle.susp-res: THREAD_SUSPEND_RESUME access + * .thread.handle.get-context: THREAD_GET_CONTEXT access + * An appropriate handle is created on registration. + * + * + * ASSUMPTIONS + * + * .error: some major errors are assumed not to happen. + * .error.close-handle: CloseHandle is assumed to succeed. + * + * Other errors are assumed to only happen in certain circumstances. + * .error.resume: ResumeThread is assumed to succeed unless the thread + * has been destroyed (in fact, perversely, it appears to succeed even + * when the thread has been destroyed). + * .error.suspend: SuspendThread is assumed to succeed unless the thread + * has been destroyed. + * + * .stack.full-descend: assumes full descending stack, that is, stack + * pointer points to the last allocated location and stack grows + * downwards. + * + * .stack.below-bottom: it's legal for the stack pointer to be at a + * higher address than the registered bottom of stack. This might + * happen if the stack of another thread doesn't contain any frames + * belonging to the client language. In this case, the stack should + * not be scanned. + * + * .stack.align: assume roots on the stack are always word-aligned, + * but don't assume that the stack pointer is necessarily word-aligned + * at the time of reading the context of another thread. + * + * .nt: uses Win32 specific stuff + * HANDLE + * DWORD + * GetCurrentProcess + * DuplicateHandle + * THREAD_SUSPEND_RESUME + * GetCurrentThreadId + * CloseHandle + * SuspendThread + * ResumeThread + */ + +#include "mpm.h" + +#if !defined(MPS_OS_W3) /* .nt */ +#error "thw3.c is specific to MPS_OS_W3" +#endif + +#include "prmcw3.h" +#include "mpswin.h" + +SRCID(thw3, "$Id$"); + + +typedef struct mps_thr_s { /* Win32 thread structure */ + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->threadSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* threads attached to arena */ + Bool alive; /* thread believed to be alive? */ + HANDLE handle; /* Handle of thread, see + * */ + DWORD id; /* Thread id of thread */ +} ThreadStruct; + + +Bool ThreadCheck(Thread thread) +{ + CHECKS(Thread, thread); + CHECKU(Arena, thread->arena); + CHECKL(thread->serial < thread->arena->threadSerial); + CHECKD_NOSIG(Ring, &thread->arenaRing); + return TRUE; +} + + +Bool ThreadCheckSimple(Thread thread) +{ + CHECKS(Thread, thread); + return TRUE; +} + + +Res ThreadRegister(Thread *threadReturn, Arena arena) +{ + Res res; + Thread thread; + HANDLE procHandle; + BOOL b; + void *p; + + AVER(threadReturn != NULL); + AVERT(Arena, arena); + + res = ControlAlloc(&p, arena, sizeof(ThreadStruct)); + if(res != ResOK) + return res; + thread = (Thread)p; /* avoid pun */ + + /* Duplicate handle gives us a new handle with updated privileges. + * .thread.handle describes the ones needed. + */ + procHandle = GetCurrentProcess(); + + b = DuplicateHandle(procHandle, GetCurrentThread(), procHandle, + &thread->handle, + THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT, + FALSE, 0); + if(!b) + return ResRESOURCE; + + thread->id = GetCurrentThreadId(); + + RingInit(&thread->arenaRing); + + thread->sig = ThreadSig; + thread->serial = arena->threadSerial; + ++arena->threadSerial; + thread->arena = arena; + thread->alive = TRUE; + + AVERT(Thread, thread); + + RingAppend(ArenaThreadRing(arena), &thread->arenaRing); + + *threadReturn = thread; + return ResOK; +} + +void ThreadDeregister(Thread thread, Arena arena) +{ + Bool b; + + AVERT(Thread, thread); + AVERT(Arena, arena); + + RingRemove(&thread->arenaRing); + + thread->sig = SigInvalid; + + RingFinish(&thread->arenaRing); + + b = CloseHandle(thread->handle); + AVER(b); /* .error.close-handle */ + + ControlFree(arena, thread, sizeof(ThreadStruct)); +} + + +/* mapThreadRing -- map over threads on ring calling a function on + * each one except the current thread. + * + * Threads that are found to be dead (that is, if func returns FALSE) + * are moved to deadRing. + */ + +static void mapThreadRing(Ring threadRing, Ring deadRing, Bool (*func)(Thread)) +{ + Ring node, next; + DWORD id; + + AVERT(Ring, threadRing); + AVERT(Ring, deadRing); + AVER(FUNCHECK(func)); + + id = GetCurrentThreadId(); + RING_FOR(node, threadRing, next) { + Thread thread = RING_ELT(Thread, arenaRing, node); + AVERT(Thread, thread); + AVER(thread->alive); + if (id != thread->id /* .thread.id */ + && !(*func)(thread)) + { + thread->alive = FALSE; + RingRemove(&thread->arenaRing); + RingAppend(deadRing, &thread->arenaRing); + } + } +} + +static Bool suspendThread(Thread thread) +{ + /* .thread.handle.susp-res */ + /* .error.suspend */ + /* In the error case (SuspendThread returning -1), we */ + /* assume the thread has been terminated. */ + /* [GetLastError appears to return 5 when SuspendThread is called */ + /* on a terminated thread, but I'm not sufficiently confident of this */ + /* to check -- drj 1998-04-09] */ + return SuspendThread(thread->handle) != (DWORD)-1; +} + +void ThreadRingSuspend(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, suspendThread); +} + +static Bool resumeThread(Thread thread) +{ + /* .thread.handle.susp-res */ + /* .error.resume */ + /* In the error case (ResumeThread returning -1), we */ + /* assume the thread has been terminated. */ + return ResumeThread(thread->handle) != (DWORD)-1; +} + +void ThreadRingResume(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, resumeThread); +} + + +Thread ThreadRingThread(Ring threadRing) +{ + Thread thread; + AVERT(Ring, threadRing); + thread = RING_ELT(Thread, arenaRing, threadRing); + AVERT(Thread, thread); + return thread; +} + +/* Must be thread-safe. . */ + +Arena ThreadArena(Thread thread) +{ + AVER(TESTT(Thread, thread)); + return thread->arena; +} + +Res ThreadDescribe(Thread thread, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial, + " arena $P ($U)\n", + (WriteFP)thread->arena, (WriteFU)thread->arena->serial, + " alive $S\n", WriteFYesNo(thread->alive), + " handle $W\n", (WriteFW)thread->handle, + " id $U\n", (WriteFU)thread->id, + "} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial, + NULL); + if(res != ResOK) + return res; + + return ResOK; +} + + +Res ThreadScan(ScanState ss, Thread thread, Word *stackCold, + mps_area_scan_t scan_area, void *closure) +{ + DWORD id; + Res res; + + id = GetCurrentThreadId(); + + if (id != thread->id) { /* .thread.id */ + MutatorContextStruct context; + Word *stackBase, *stackLimit; + Addr stackPtr; + + /* scan stack and register roots in other threads */ + /* .thread.handle.get-context */ + res = MutatorContextInitThread(&context, thread->handle); + if (res != ResOK) { + /* .error.get-context */ + /* We assume that the thread must have been destroyed. */ + /* We ignore the situation by returning immediately. */ + return ResOK; + } + + stackPtr = MutatorContextSP(&context); + /* .stack.align */ + stackBase = (Word *)AddrAlignUp(stackPtr, sizeof(Word)); + stackLimit = stackCold; + if (stackBase >= stackLimit) + return ResOK; /* .stack.below-bottom */ + + /* scan stack inclusive of current sp and exclusive of + * stackCold (.stack.full-descend) + */ + res = TraceScanArea(ss, stackBase, stackLimit, + scan_area, closure); + if (res != ResOK) + return res; + + /* Scan registers. */ + res = MutatorContextScan(ss, &context, scan_area, closure); + if (res != ResOK) + return res; + + } else { /* scan this thread's stack */ + res = StackScan(ss, stackCold, scan_area, closure); + if (res != ResOK) + return res; + } + + return ResOK; +} + + +void ThreadSetup(void) +{ + /* Nothing to do as MPS does not support fork() on Windows. */ +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/thxc.c b/mps/code/thxc.c new file mode 100644 index 00000000000..34ab02981dd --- /dev/null +++ b/mps/code/thxc.c @@ -0,0 +1,425 @@ +/* thxc.c: THREAD MANAGER (macOS) + * + * $Id$ + * Copyright (c) 2001-2023 Ravenbrook Limited. See end of file for license. + * + * .design: . + * + * + * TODO + * + * Too much code in common with than.c, thix.c. Consider how to reduce + * redundancy without making the code obscure. + * + * + * REFERENCES + * + * [Mach_man] Mach man pages within XNU; + * Apple Computer; + * . + */ + +#include "mpm.h" + +#if !defined(MPS_OS_XC) +#error "thxc.c is specific to MPS_OS_XC" +#endif + +#include "protxc.h" + +#include +#include +#include +#include +#include + + +SRCID(thxc, "$Id$"); + + +typedef struct mps_thr_s { /* macOS thread structure */ + Sig sig; /* design.mps.sig.field */ + Serial serial; /* from arena->threadSerial */ + Arena arena; /* owning arena */ + RingStruct arenaRing; /* attaches to arena */ + Bool alive; /* thread believed to be alive? */ + Bool forking; /* thread currently calling fork? */ + thread_port_t port; /* thread kernel port */ +} ThreadStruct; + + +Bool ThreadCheck(Thread thread) +{ + CHECKS(Thread, thread); + CHECKU(Arena, thread->arena); + CHECKL(thread->serial < thread->arena->threadSerial); + CHECKD_NOSIG(Ring, &thread->arenaRing); + CHECKL(BoolCheck(thread->alive)); + CHECKL(BoolCheck(thread->forking)); + CHECKL(MACH_PORT_VALID(thread->port)); + return TRUE; +} + + +Bool ThreadCheckSimple(Thread thread) +{ + CHECKS(Thread, thread); + return TRUE; +} + + +Res ThreadRegister(Thread *threadReturn, Arena arena) +{ + Res res; + Thread thread; + Ring ring; + void *p; + + AVER(threadReturn != NULL); + + res = ControlAlloc(&p, arena, sizeof(ThreadStruct)); + if (res != ResOK) + return res; + thread = (Thread)p; + + thread->arena = arena; + RingInit(&thread->arenaRing); + + thread->serial = arena->threadSerial; + ++arena->threadSerial; + thread->alive = TRUE; + thread->forking = FALSE; + thread->port = mach_thread_self(); + AVER(MACH_PORT_VALID(thread->port)); + thread->sig = ThreadSig; + AVERT(Thread, thread); + + ProtThreadRegister(); + + ring = ArenaThreadRing(arena); + + RingAppend(ring, &thread->arenaRing); + + *threadReturn = thread; + return ResOK; +} + + +void ThreadDeregister(Thread thread, Arena arena) +{ + AVERT(Thread, thread); + AVERT(Arena, arena); + AVER(!thread->forking); + + RingRemove(&thread->arenaRing); + + thread->sig = SigInvalid; + + RingFinish(&thread->arenaRing); + + ControlFree(arena, thread, sizeof(ThreadStruct)); +} + + +/* mapThreadRing -- map over threads on ring calling a function on + * each one. + * + * Threads that are found to be dead (that is, if func returns FALSE) + * are marked as dead and moved to deadRing, in order to implement + * design.thread-manager.sol.thread.term.attempt. + */ + +static void mapThreadRing(Ring threadRing, Ring deadRing, Bool (*func)(Thread)) +{ + Ring node, next; + + AVERT(Ring, threadRing); + AVERT(Ring, deadRing); + AVER(FUNCHECK(func)); + + RING_FOR(node, threadRing, next) { + Thread thread = RING_ELT(Thread, arenaRing, node); + AVERT(Thread, thread); + AVER(thread->alive); + if (!(*func)(thread)) { + thread->alive = FALSE; + RingRemove(&thread->arenaRing); + RingAppend(deadRing, &thread->arenaRing); + } + } +} + + +static Bool threadSuspend(Thread thread) +{ + kern_return_t kern_return; + mach_port_t self = mach_thread_self(); + AVER(MACH_PORT_VALID(self)); + if (thread->port == self) + return TRUE; + + kern_return = thread_suspend(thread->port); + /* No rendezvous is necessary: thread_suspend "prevents the thread + * from executing any more user-level instructions" */ + AVER(kern_return == KERN_SUCCESS); + /* Experimentally, values other then KERN_SUCCESS indicate the thread has + terminated . */ + /* design.thread-manager.sol.thread.term.attempt */ + return kern_return == KERN_SUCCESS; +} + + +/* ThreadRingSuspend -- suspend all threads on a ring, except the + * current one. + */ +void ThreadRingSuspend(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, threadSuspend); +} + + +static Bool threadResume(Thread thread) +{ + kern_return_t kern_return; + mach_port_t self = mach_thread_self(); + AVER(MACH_PORT_VALID(self)); + if (thread->port == self) + return TRUE; + + kern_return = thread_resume(thread->port); + /* Mach has no equivalent of EAGAIN. */ + AVER(kern_return == KERN_SUCCESS); + /* Experimentally, values other then KERN_SUCCESS indicate the thread has + terminated . */ + /* design.thread-manager.sol.thread.term.attempt */ + return kern_return == KERN_SUCCESS; +} + +/* ThreadRingResume -- resume all threads on a ring, except the + * current one. + */ +void ThreadRingResume(Ring threadRing, Ring deadRing) +{ + mapThreadRing(threadRing, deadRing, threadResume); +} + + +Thread ThreadRingThread(Ring threadRing) +{ + Thread thread; + AVERT(Ring, threadRing); + thread = RING_ELT(Thread, arenaRing, threadRing); + AVERT(Thread, thread); + return thread; +} + + +/* Must be thread-safe. . */ + +Arena ThreadArena(Thread thread) +{ + AVER(TESTT(Thread, thread)); + return thread->arena; +} + + +/* ThreadScan -- scan the state of a thread (stack and regs) */ + +#include "prmcxc.h" + +Res ThreadScan(ScanState ss, Thread thread, void *stackCold, + mps_area_scan_t scan_area, void *closure) +{ + mach_port_t self; + Res res; + + AVERT(Thread, thread); + self = mach_thread_self(); + AVER(MACH_PORT_VALID(self)); + if (thread->port == self) { + /* scan this thread's stack */ + AVER(thread->alive); + res = StackScan(ss, stackCold, scan_area, closure); + if(res != ResOK) + return res; + } else if (thread->alive) { + MutatorContextStruct context; + THREAD_STATE_S threadState; + Word *stackBase, *stackLimit; + Addr stackPtr; + mach_msg_type_number_t count; + kern_return_t kern_return; + + /* Note: We could get the thread state and check the suspend count in + order to assert that the thread is suspended, but it's probably + unnecessary and is a lot of work to check a static condition. */ + + MutatorContextInitThread(&context, &threadState); + + count = THREAD_STATE_COUNT; + AVER(sizeof(*context.threadState) == count * sizeof(natural_t)); + kern_return = thread_get_state(thread->port, + THREAD_STATE_FLAVOR, + (thread_state_t)context.threadState, + &count); + AVER(kern_return == KERN_SUCCESS); + AVER(count == THREAD_STATE_COUNT); + + stackPtr = MutatorContextSP(&context); + /* .stack.align */ + stackBase = (Word *)AddrAlignUp(stackPtr, sizeof(Word)); + stackLimit = stackCold; + if (stackBase >= stackLimit) + return ResOK; /* .stack.below-bottom */ + + /* scan stack inclusive of current sp and exclusive of + * stackCold (.stack.full-descend) + */ + res = TraceScanArea(ss, stackBase, stackLimit, + scan_area, closure); + if(res != ResOK) + return res; + + /* scan the registers in the mutator context */ + res = MutatorContextScan(ss, &context, scan_area, closure); + if(res != ResOK) + return res; + } + + return ResOK; +} + + +Res ThreadDescribe(Thread thread, mps_lib_FILE *stream, Count depth) +{ + Res res; + + res = WriteF(stream, depth, + "Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial, + " arena $P ($U)\n", + (WriteFP)thread->arena, (WriteFU)thread->arena->serial, + " alive $S\n", WriteFYesNo(thread->alive), + " port $U\n", (WriteFU)thread->port, + "} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial, + NULL); + if (res != ResOK) + return res; + + return ResOK; +} + + +/* threadAtForkPrepare -- for each arena, mark the current thread as + * forking . + */ + +static Bool threadForkPrepare(Thread thread) +{ + mach_port_t self; + AVERT(Thread, thread); + AVER(!thread->forking); + self = mach_thread_self(); + AVER(MACH_PORT_VALID(self)); + thread->forking = (thread->port == self); + return TRUE; +} + +static void threadRingForkPrepare(Arena arena) +{ + AVERT(Arena, arena); + mapThreadRing(ArenaThreadRing(arena), ArenaDeadRing(arena), threadForkPrepare); +} + +static void threadAtForkPrepare(void) +{ + GlobalsArenaMap(threadRingForkPrepare); +} + + +/* threadAtForkParent -- for each arena, clear the forking flag for + * all threads . + */ + +static Bool threadForkParent(Thread thread) +{ + AVERT(Thread, thread); + thread->forking = FALSE; + return TRUE; +} + +static void threadRingForkParent(Arena arena) +{ + AVERT(Arena, arena); + mapThreadRing(ArenaThreadRing(arena), ArenaDeadRing(arena), threadForkParent); +} + +static void threadAtForkParent(void) +{ + GlobalsArenaMap(threadRingForkParent); +} + + +/* threadAtForkChild -- For each arena, move all threads to the dead + * ring, except for the thread that was marked as forking by the + * prepare handler , for which + * update its mach port . + */ + +static Bool threadForkChild(Thread thread) +{ + AVERT(Thread, thread); + if (thread->forking) { + thread->port = mach_thread_self(); + AVER(MACH_PORT_VALID(thread->port)); + thread->forking = FALSE; + return TRUE; + } else { + return FALSE; + } +} + +static void threadRingForkChild(Arena arena) +{ + AVERT(Arena, arena); + mapThreadRing(ArenaThreadRing(arena), ArenaDeadRing(arena), threadForkChild); +} + +static void threadAtForkChild(void) +{ + GlobalsArenaMap(threadRingForkChild); +} + +void ThreadSetup(void) +{ + pthread_atfork(threadAtForkPrepare, threadAtForkParent, threadAtForkChild); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/trace.c b/mps/code/trace.c new file mode 100644 index 00000000000..0a3d95f4517 --- /dev/null +++ b/mps/code/trace.c @@ -0,0 +1,1949 @@ +/* trace.c: GENERIC TRACER IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2001-2023 Ravenbrook Limited. + * See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * .design: design.mps.trace. + */ + +#include "locus.h" +#include "mpm.h" +#include /* for LONG_MAX */ + +SRCID(trace, "$Id$"); + +/* Forward declarations */ +Rank traceBand(Trace); +Bool traceBandAdvance(Trace); +Bool traceBandFirstStretch(Trace); +void traceBandFirstStretchDone(Trace); + +/* Types */ + +enum { + traceAccountingPhaseRootScan = 1, + traceAccountingPhaseSegScan, + traceAccountingPhaseSingleScan +}; +typedef int traceAccountingPhase; + +/* ScanStateCheck -- check consistency of a ScanState object */ + +Bool ScanStateCheck(ScanState ss) +{ + TraceId ti; + Trace trace; + ZoneSet white; + + CHECKS(ScanState, ss); + CHECKL(FUNCHECK(ss->formatScan)); + CHECKL(FUNCHECK(ss->areaScan)); + /* Can't check ss->areaScanClosure. */ + CHECKL(FUNCHECK(ss->fix)); + /* Can't check ss->fixClosure. */ + CHECKL(ScanStateZoneShift(ss) == ss->arena->zoneShift); + white = ZoneSetEMPTY; + TRACE_SET_ITER(ti, trace, ss->traces, ss->arena) + white = ZoneSetUnion(white, ss->arena->trace[ti].white); + TRACE_SET_ITER_END(ti, trace, ss->traces, ss->arena); + CHECKL(ScanStateWhite(ss) == white); + CHECKU(Arena, ss->arena); + /* Summaries could be anything, and can't be checked. */ + CHECKL(TraceSetCheck(ss->traces)); + CHECKL(TraceSetSuper(ss->arena->busyTraces, ss->traces)); + CHECKL(RankCheck(ss->rank)); + CHECKL(BoolCheck(ss->wasMarked)); + /* @@@@ checks for counts missing */ + return TRUE; +} + + +/* traceNoAreaScan -- area scan function that must not be called */ + +static mps_res_t traceNoAreaScan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + UNUSED(closure); + return FormatNoScan(ss, base, limit); +} + + +/* ScanStateInit -- Initialize a ScanState object */ + +void ScanStateInit(ScanState ss, TraceSet ts, Arena arena, + Rank rank, ZoneSet white) +{ + TraceId ti; + Trace trace; + + AVERT(TraceSet, ts); + AVERT(Arena, arena); + AVERT(Rank, rank); + /* white is arbitrary and can't be checked */ + + /* .fix.single: NOTE: We can only currently support scanning for a + set of traces with the same fix method. To remove this + restriction, it would be necessary to dispatch to the fix methods + of sets of traces in TraceFix. See also impl.c.trans.park. */ + ss->fix = NULL; + ss->fixClosure = NULL; + TRACE_SET_ITER(ti, trace, ts, arena) { + if (ss->fix == NULL) { + ss->fix = trace->fix; + ss->fixClosure = trace->fixClosure; + } else { + AVER(ss->fix == trace->fix); + AVER(ss->fixClosure == trace->fixClosure); + } + } TRACE_SET_ITER_END(ti, trace, ts, arena); + AVER(ss->fix != NULL); + + /* If the fix method is the normal GC fix, then we optimise the test for + whether it's an emergency or not by updating the dispatch here, once. */ + if (ss->fix == SegFix && ArenaEmergency(arena)) + ss->fix = SegFixEmergency; + + ss->formatScan = FormatNoScan; + ss->areaScan = traceNoAreaScan; + ss->rank = rank; + ss->traces = ts; + ScanStateSetZoneShift(ss, arena->zoneShift); + ScanStateSetUnfixedSummary(ss, RefSetEMPTY); + ss->fixedSummary = RefSetEMPTY; + ss->arena = arena; + ss->wasMarked = TRUE; + ScanStateSetWhite(ss, white); + STATISTIC(ss->fixRefCount = (Count)0); + STATISTIC(ss->segRefCount = (Count)0); + STATISTIC(ss->whiteSegRefCount = (Count)0); + STATISTIC(ss->nailCount = (Count)0); + STATISTIC(ss->snapCount = (Count)0); + STATISTIC(ss->forwardedCount = (Count)0); + STATISTIC(ss->preservedInPlaceCount = (Count)0); + STATISTIC(ss->copiedSize = (Size)0); + ss->scannedSize = (Size)0; /* see .work */ + ss->sig = ScanStateSig; + + AVERT(ScanState, ss); +} + + +/* ScanStateInitSeg -- Initialize a ScanState object for scanning a segment */ + +void ScanStateInitSeg(ScanState ss, TraceSet ts, Arena arena, + Rank rank, ZoneSet white, Seg seg) +{ + Format format; + AVERT(Seg, seg); + + ScanStateInit(ss, ts, arena, rank, white); + if (PoolFormat(&format, SegPool(seg))) { + ss->formatScan = format->scan; + } +} + + +/* ScanStateFinish -- Finish a ScanState object */ + +void ScanStateFinish(ScanState ss) +{ + AVERT(ScanState, ss); + ss->sig = SigInvalid; +} + + +/* TraceIdCheck -- check that a TraceId is valid */ + +Bool TraceIdCheck(TraceId ti) +{ + CHECKL(ti < TraceLIMIT); + UNUSED(ti); /* */ + return TRUE; +} + + +/* TraceSetCheck -- check that a TraceSet is valid */ + +Bool TraceSetCheck(TraceSet ts) +{ + CHECKL(ts < ((ULongest)1 << TraceLIMIT)); + UNUSED(ts); /* */ + return TRUE; +} + + +/* TraceCheck -- check consistency of Trace object */ + +Bool TraceCheck(Trace trace) +{ + CHECKS(Trace, trace); + CHECKU(Arena, trace->arena); + CHECKL(TraceIdCheck(trace->ti)); + CHECKL(trace == &trace->arena->trace[trace->ti]); + CHECKL(TraceSetIsMember(trace->arena->busyTraces, trace)); + CHECKL(ZoneSetSub(trace->mayMove, trace->white)); + CHECKD_NOSIG(Ring, &trace->genRing); + /* Use trace->state to check more invariants. */ + switch(trace->state) { + case TraceINIT: + CHECKL(!TraceSetIsMember(trace->arena->flippedTraces, trace)); + /* @@@@ What can be checked here? */ + break; + + case TraceUNFLIPPED: + CHECKL(!RingIsSingle(&trace->genRing)); + CHECKL(!TraceSetIsMember(trace->arena->flippedTraces, trace)); + /* @@@@ Assert that mutator is grey for trace. */ + break; + + case TraceFLIPPED: + CHECKL(!RingIsSingle(&trace->genRing)); + CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace)); + CHECKL(RankCheck(trace->band)); + /* @@@@ Assert that mutator is black for trace. */ + break; + + case TraceRECLAIM: + CHECKL(!RingIsSingle(&trace->genRing)); + CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace)); + /* @@@@ Assert that grey set is empty for trace. */ + break; + + case TraceFINISHED: + CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace)); + /* @@@@ Assert that grey and white sets is empty for trace. */ + break; + + default: + NOTREACHED; + break; + } + CHECKL(FUNCHECK(trace->fix)); + /* Can't check trace->fixClosure. */ + + /* @@@@ checks for counts missing */ + + /* check pre-allocated messages for this traceid */ + CHECKL(TraceIdMessagesCheck(trace->arena, trace->ti)); + + return TRUE; +} + +/* traceBand - current band of the trace. + * + * The current band is the band currently being discovered. Each band + * corresponds to a rank. The R band is all objects that are reachable + * only by tracing references of rank R or earlier _and_ are not in some + * earlier band (thus, the bands are disjoint). Whilst a particular + * band is current all the objects that become marked are the objects in + * that band. + */ +Rank traceBand(Trace trace) +{ + AVERT(Trace, trace); + + return trace->band; +} + +/* traceBandAdvance - advance to next band. + * + * Advances (increments) the current band to the next band and returns TRUE + * if possible; + * otherwise, there are no more bands, so resets the band state and + * returns FALSE. + */ +Bool traceBandAdvance(Trace trace) +{ + AVER(trace->state == TraceFLIPPED); + + ++trace->band; + trace->firstStretch = TRUE; + if(trace->band >= RankLIMIT) { + trace->band = RankMIN; + return FALSE; + } + EVENT3(TraceBandAdvance, trace->arena, trace, trace->band); + return TRUE; +} + +/* traceBandFirstStretch - whether in first stretch or not. + * + * For a band R (see traceBand) the first stretch is defined as all the + * scanning work done up until the first point where we run out of grey + * rank R segments (and either scan something of an earlier rank or + * change bands). + * + * This function returns TRUE whilst we are in the first stretch, FALSE + * otherwise. + * + * Entering the first stretch is automatically performed by + * traceBandAdvance, but finishing it is detected in traceFindGrey. + */ +Bool traceBandFirstStretch(Trace trace) +{ + return trace->firstStretch; +} + +void traceBandFirstStretchDone(Trace trace) +{ + trace->firstStretch = FALSE; +} + +/* traceUpdateCounts - dumps the counts from a ScanState into the Trace */ + +static void traceUpdateCounts(Trace trace, ScanState ss, + traceAccountingPhase phase) +{ + switch(phase) { + case traceAccountingPhaseRootScan: { + trace->rootScanSize += ss->scannedSize; + STATISTIC(trace->rootCopiedSize += ss->copiedSize); + STATISTIC(++trace->rootScanCount); + break; + } + case traceAccountingPhaseSegScan: { + trace->segScanSize += ss->scannedSize; /* see .work */ + STATISTIC(trace->segCopiedSize += ss->copiedSize); + STATISTIC(++trace->segScanCount); + break; + } + case traceAccountingPhaseSingleScan: { + STATISTIC(trace->singleScanSize += ss->scannedSize); + STATISTIC(trace->singleCopiedSize += ss->copiedSize); + break; + } + default: + NOTREACHED; + } + STATISTIC(trace->fixRefCount += ss->fixRefCount); + STATISTIC(trace->segRefCount += ss->segRefCount); + STATISTIC(trace->whiteSegRefCount += ss->whiteSegRefCount); + STATISTIC(trace->nailCount += ss->nailCount); + STATISTIC(trace->snapCount += ss->snapCount); + STATISTIC(trace->forwardedCount += ss->forwardedCount); + STATISTIC(trace->preservedInPlaceCount += ss->preservedInPlaceCount); +} + + +/* traceSetUpdateCounts -- update counts for a set of traces */ + +static void traceSetUpdateCounts(TraceSet ts, Arena arena, ScanState ss, + traceAccountingPhase phase) +{ + TraceId ti; Trace trace; + + AVERT(ScanState, ss); /* check that we're not copying garbage */ + + TRACE_SET_ITER(ti, trace, ts, arena) + traceUpdateCounts(trace, ss, phase); + TRACE_SET_ITER_END(ti, trace, ts, arena); +} + + +/* traceSetWhiteUnion + * + * Returns a ZoneSet describing the union of the white sets of all the + * specified traces. */ + +static ZoneSet traceSetWhiteUnion(TraceSet ts, Arena arena) +{ + TraceId ti; + Trace trace; + ZoneSet white = ZoneSetEMPTY; + + TRACE_SET_ITER(ti, trace, ts, arena) + white = ZoneSetUnion(white, trace->white); + TRACE_SET_ITER_END(ti, trace, ts, arena); + + return white; +} + + +/* TraceIsEmpty -- return TRUE if trace has no condemned segments + * + * .empty.size: If the trace has a condemned size of zero, then it has + * no white segments, because we don't allow pools to whiten segments + * with no white objects in. + */ + +Bool TraceIsEmpty(Trace trace) +{ + AVERT(Trace, trace); + return trace->condemned == 0; +} + + +/* TraceAddWhite -- add a segment to the white set of a trace */ + +Res TraceAddWhite(Trace trace, Seg seg) +{ + Res res; + Pool pool; + Size condemnedBefore; + + AVERT(Trace, trace); + AVERT(Seg, seg); + AVER(!TraceSetIsMember(SegWhite(seg), trace)); /* .start.black */ + + pool = SegPool(seg); + AVERT(Pool, pool); + + condemnedBefore = trace->condemned; + + /* Give the pool the opportunity to turn the segment white. */ + /* If it fails, unwind. */ + res = SegWhiten(seg, trace); + if(res != ResOK) + return res; + + if (TraceSetIsMember(SegWhite(seg), trace)) { + /* Pools must not condemn empty segments, otherwise we can't tell + when a trace is empty and safe to destroy. See .empty.size. */ + AVER(trace->condemned > condemnedBefore); + + /* Add the segment to the approximation of the white set if the + pool made it white. */ + trace->white = ZoneSetUnion(trace->white, ZoneSetOfSeg(trace->arena, seg)); + + /* if the pool is a moving GC, then condemned objects may move */ + if (PoolHasAttr(pool, AttrMOVINGGC)) { + trace->mayMove = ZoneSetUnion(trace->mayMove, + ZoneSetOfSeg(trace->arena, seg)); + } + } + + return ResOK; +} + + +/* TraceCondemnStart -- start selecting generations to condemn for a trace */ + +void TraceCondemnStart(Trace trace) +{ + AVERT(Trace, trace); + AVER(trace->state == TraceINIT); + AVER(trace->white == ZoneSetEMPTY); + AVER(RingIsSingle(&trace->genRing)); +} + + +/* TraceCondemnEnd -- condemn segments for trace + * + * Condemn the segments in the generations that were selected since + * TraceCondemnStart, and compute the predicted mortality of the + * condemned memory. If successful, update *mortalityReturn and return + * ResOK. + * + * We suspend the mutator threads so that the PoolWhiten methods can + * calculate white sets without the mutator allocating in buffers + * under our feet. See request.dylan.160098 + * . + * + * TODO: Consider how to avoid this suspend in order to implement + * incremental condemn. + */ + +Res TraceCondemnEnd(double *mortalityReturn, Trace trace) +{ + Size casualtySize = 0; + Ring genNode, genNext; + Res res; + + AVER(mortalityReturn != NULL); + AVERT(Trace, trace); + AVER(trace->state == TraceINIT); + AVER(trace->white == ZoneSetEMPTY); + + ShieldHold(trace->arena); + RING_FOR(genNode, &trace->genRing, genNext) { + Size condemnedBefore, condemnedGen; + Ring segNode, segNext; + GenDesc gen = GenDescOfTraceRing(genNode, trace); + AVERT(GenDesc, gen); + condemnedBefore = trace->condemned; + RING_FOR(segNode, &gen->segRing, segNext) { + GCSeg gcseg = RING_ELT(GCSeg, genRing, segNode); + AVERC(GCSeg, gcseg); + res = TraceAddWhite(trace, &gcseg->segStruct); + if (res != ResOK) + goto failBegin; + } + AVER(trace->condemned >= condemnedBefore); + condemnedGen = trace->condemned - condemnedBefore; + casualtySize += (Size)((double)condemnedGen * gen->mortality); + } + ShieldRelease(trace->arena); + + if (TraceIsEmpty(trace)) + return ResFAIL; + + *mortalityReturn = (double)casualtySize / (double)trace->condemned; + return ResOK; + +failBegin: + /* If we successfully whitened one or more segments, but failed to + whiten them all, then the white sets would now be inconsistent. + This can't happen in practice (at time of writing) because all + PoolWhiten methods always succeed. If we ever have a pool class + that fails to whiten a segment, then this assertion will be + triggered. In that case, we'll have to recover here by blackening + the segments again. */ + AVER(TraceIsEmpty(trace)); + ShieldRelease(trace->arena); + return res; +} + + +/* traceFlipBuffers -- flip all buffers in the arena */ + +static void traceFlipBuffers(Globals arena) +{ + Ring nodep, nextp; + + RING_FOR(nodep, &arena->poolRing, nextp) { + Pool pool = RING_ELT(Pool, arenaRing, nodep); + Ring nodeb, nextb; + + AVERT(Pool, pool); + RING_FOR(nodeb, &pool->bufferRing, nextb) { + BufferFlip(RING_ELT(Buffer, poolRing, nodeb)); + } + } +} + + +/* traceScanRootRes -- scan a root, with result code */ + +static Res traceScanRootRes(TraceSet ts, Rank rank, Arena arena, Root root) +{ + ZoneSet white; + Res res; + ScanStateStruct ss; + + white = traceSetWhiteUnion(ts, arena); + + ScanStateInit(&ss, ts, arena, rank, white); + + res = RootScan(&ss, root); + + traceSetUpdateCounts(ts, arena, &ss, traceAccountingPhaseRootScan); + ScanStateFinish(&ss); + return res; +} + + +/* traceScanRoot + * + * Scan a root, entering emergency mode on allocation failure. + */ + +static Res traceScanRoot(TraceSet ts, Rank rank, Arena arena, Root root) +{ + Res res; + + res = traceScanRootRes(ts, rank, arena, root); + + if (ResIsAllocFailure(res)) { + ArenaSetEmergency(arena, TRUE); + res = traceScanRootRes(ts, rank, arena, root); + /* Should be OK in emergency mode */ + AVER(!ResIsAllocFailure(res)); + } + + return res; +} + + +/* traceFlip -- blacken the mutator */ + +struct rootFlipClosureStruct { + TraceSet ts; + Arena arena; + Rank rank; +}; + +static Res rootFlip(Root root, void *p) +{ + struct rootFlipClosureStruct *rf = (struct rootFlipClosureStruct *)p; + Res res; + + AVERT(Root, root); + AVER(p != NULL); + AVERT(TraceSet, rf->ts); + AVERT(Arena, rf->arena); + AVERT(Rank, rf->rank); + + AVER(RootRank(root) <= RankEXACT); /* see .root.rank */ + + if(RootRank(root) == rf->rank) { + res = traceScanRoot(rf->ts, rf->rank, rf->arena, root); + if (res != ResOK) + return res; + } + + return ResOK; +} + + +/* traceFlip -- flip the mutator from grey to black w.r.t. a trace + * + * The main job of traceFlip is to scan references which can't be protected + * from the mutator, changing the colour of the mutator from grey to black + * with respect to a trace. The mutator threads are suspended while this + * is happening, and the mutator perceives an instantaneous change in all + * the references, enforced by the shield (barrier) system. + * + * NOTE: We don't have a way to shield the roots, so they are all scanned + * here. This is a coincidence. There is no theoretical reason that the + * roots have to be scanned at flip time, provided we could protect them + * from the mutator. (The thread registers are unlikely ever to be + * protectable on stock hardware, however, as they were -- kind of -- on + * Lisp machines.) + * + * NOTE: Ambiguous references may only exist in roots, because we can't + * shield the exact roots and defer them for later scanning (after ambiguous + * heap references). + * + * NOTE: We don't support weak or final roots because we can't shield them + * and defer scanning until later. See above. + * + * If roots and segments were more similar, we could melt a lot of these + * problems. + */ + +static Res traceFlip(Trace trace) +{ + Ring node, nextNode; + Arena arena; + Rank rank; + struct rootFlipClosureStruct rfc; + Res res; + + AVERT(Trace, trace); + rfc.ts = TraceSetSingle(trace); + + arena = trace->arena; + rfc.arena = arena; + ShieldHold(arena); + + AVER(trace->state == TraceUNFLIPPED); + AVER(!TraceSetIsMember(arena->flippedTraces, trace)); + + EVENT2(TraceFlipBegin, trace, arena); + + traceFlipBuffers(ArenaGlobals(arena)); + + /* Update location dependency structures. */ + /* mayMove is a conservative approximation of the zones of objects */ + /* which may move during this collection. */ + if(trace->mayMove != ZoneSetEMPTY) { + LDAge(arena, trace->mayMove); + } + + /* .root.rank: At the moment we must scan all roots, because we don't have */ + /* a mechanism for shielding them. There can't be any weak or final roots */ + /* either, since we must protect these in order to avoid scanning them too */ + /* early, before the pool contents. @@@@ This isn't correct if there are */ + /* higher ranking roots than data in pools. */ + + for(rank = RankMIN; rank <= RankEXACT; ++rank) { + rfc.rank = rank; + res = RootsIterate(ArenaGlobals(arena), rootFlip, (void *)&rfc); + if (res != ResOK) + goto failRootFlip; + } + + /* .flip.alloc: Allocation needs to become black now. While we flip */ + /* at the start, we can get away with always allocating black. This */ + /* needs to change when we flip later (i.e. have a read-barrier */ + /* collector), so that we allocate grey or white before the flip */ + /* and black afterwards. For instance, see */ + /* . */ + /* (surely we mean "write-barrier" not "read-barrier" above? */ + /* drj 2003-02-19) */ + + /* Now that the mutator is black we must prevent it from reading */ + /* grey objects so that it can't obtain white pointers. */ + for(rank = RankMIN; rank < RankLIMIT; ++rank) + RING_FOR(node, ArenaGreyRing(arena, rank), nextNode) { + Seg seg = SegOfGreyRing(node); + SegFlip(seg, trace); + } + + /* @@@@ When write barrier collection is implemented, this is where */ + /* write protection should be removed for all segments which are */ + /* no longer blacker than the mutator. Possibly this can be done */ + /* lazily as they are touched. */ + + /* Mark the trace as flipped. */ + trace->state = TraceFLIPPED; + arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); + + EVENT2(TraceFlipEnd, trace, arena); + + ShieldRelease(arena); + return ResOK; + +failRootFlip: + ShieldRelease(arena); + return res; +} + + +/* TraceCreate -- create a Trace object + * + * Allocates and initializes a new Trace object with a TraceId which is + * not currently active. + * + * Returns ResLIMIT if there aren't any available trace IDs. + * + * Trace objects are allocated directly from a small array in the arena + * structure which is indexed by the TraceId. This is so that it's + * always possible to start a trace (provided there's a free TraceId) + * even if there's no available memory. + * + * This code is written to be adaptable to allocating Trace objects + * dynamically. */ + +ATTRIBUTE_UNUSED +static void traceCreatePoolGen(GenDesc gen) +{ + Ring n, nn; + RING_FOR(n, &gen->locusRing, nn) { + PoolGen pgen = RING_ELT(PoolGen, genRing, n); + EVENT11(TraceCreatePoolGen, gen, gen->capacity, gen->mortality, gen->zones, + pgen->pool, pgen->totalSize, pgen->freeSize, pgen->newSize, + pgen->oldSize, pgen->newDeferredSize, pgen->oldDeferredSize); + } +} + +Res TraceCreate(Trace *traceReturn, Arena arena, TraceStartWhy why) +{ + TraceId ti; + Trace trace; + + AVER(traceReturn != NULL); + AVERT(Arena, arena); + + /* Find a free trace ID */ + TRACE_SET_ITER(ti, trace, TraceSetComp(arena->busyTraces), arena) + goto found; + TRACE_SET_ITER_END(ti, trace, TraceSetComp(arena->busyTraces), arena); + return ResLIMIT; /* no trace IDs available */ + +found: + trace = ArenaTrace(arena, ti); + AVER(trace->sig == SigInvalid); /* */ + + trace->arena = arena; + trace->why = why; + trace->white = ZoneSetEMPTY; + trace->mayMove = ZoneSetEMPTY; + trace->ti = ti; + trace->state = TraceINIT; + trace->band = RankMIN; + trace->fix = SegFix; + trace->fixClosure = NULL; + RingInit(&trace->genRing); + STATISTIC(trace->preTraceArenaReserved = ArenaReserved(arena)); + trace->condemned = (Size)0; /* nothing condemned yet */ + trace->notCondemned = (Size)0; + trace->foundation = (Size)0; /* nothing grey yet */ + trace->quantumWork = (Work)0; /* computed in TraceStart */ + STATISTIC(trace->greySegCount = (Count)0); + STATISTIC(trace->greySegMax = (Count)0); + STATISTIC(trace->rootScanCount = (Count)0); + trace->rootScanSize = (Size)0; + STATISTIC(trace->rootCopiedSize = (Size)0); + STATISTIC(trace->segScanCount = (Count)0); + trace->segScanSize = (Size)0; /* see .work */ + STATISTIC(trace->segCopiedSize = (Size)0); + STATISTIC(trace->singleScanCount = (Count)0); + STATISTIC(trace->singleScanSize = (Size)0); + STATISTIC(trace->singleCopiedSize = (Size)0); + STATISTIC(trace->fixRefCount = (Count)0); + STATISTIC(trace->segRefCount = (Count)0); + STATISTIC(trace->whiteSegRefCount = (Count)0); + STATISTIC(trace->nailCount = (Count)0); + STATISTIC(trace->snapCount = (Count)0); + STATISTIC(trace->readBarrierHitCount = (Count)0); + STATISTIC(trace->pointlessScanCount = (Count)0); + STATISTIC(trace->forwardedCount = (Count)0); + trace->forwardedSize = (Size)0; /* see .message.data */ + STATISTIC(trace->preservedInPlaceCount = (Count)0); + trace->preservedInPlaceSize = (Size)0; /* see .message.data */ + STATISTIC(trace->reclaimCount = (Count)0); + STATISTIC(trace->reclaimSize = (Size)0); + trace->sig = TraceSig; + arena->busyTraces = TraceSetAdd(arena->busyTraces, trace); + AVERT(Trace, trace); + + EVENT3(TraceCreate, trace, arena, (EventFU)why); + + STATISTIC({ + /* Iterate over all chains, all GenDescs within a chain, and all + * PoolGens within a GenDesc. */ + Ring node; + Ring nextNode; + + RING_FOR(node, &arena->chainRing, nextNode) { + Chain chain = RING_ELT(Chain, chainRing, node); + Index i; + for (i = 0; i < chain->genCount; ++i) { + GenDesc gen = &chain->gens[i]; + traceCreatePoolGen(gen); + } + } + + /* Now do topgen GenDesc, and all PoolGens within it. */ + traceCreatePoolGen(&arena->topGen); + }); + + *traceReturn = trace; + return ResOK; +} + + +/* traceDestroyCommon -- common functionality for TraceDestroy* */ + +static void traceDestroyCommon(Trace trace) +{ + Ring node, nextNode; + + RING_FOR(node, &trace->genRing, nextNode) { + GenDesc gen = GenDescOfTraceRing(node, trace); + GenDescEndTrace(gen, trace); + } + RingFinish(&trace->genRing); + + /* Ensure that address space is returned to the operating system for + * traces that don't have any condemned objects (there might be + * manually allocated objects that were freed). See job003999. */ + ArenaCompact(trace->arena, trace); + + EVENT2(TraceDestroy, trace->arena, trace); + + /* Hopefully the trace reclaimed some memory, so clear any emergency. + * Do this before removing the trace from busyTraces, to avoid + * violating . */ + ArenaSetEmergency(trace->arena, FALSE); + + trace->sig = SigInvalid; + trace->arena->busyTraces = TraceSetDel(trace->arena->busyTraces, trace); + trace->arena->flippedTraces = TraceSetDel(trace->arena->flippedTraces, trace); +} + + +/* TraceDestroyInit -- destroy a trace object in state INIT */ + +void TraceDestroyInit(Trace trace) +{ + AVERT(Trace, trace); + AVER(trace->state == TraceINIT); + AVER(trace->condemned == 0); + AVER(!TraceSetIsMember(trace->arena->flippedTraces, trace)); + + traceDestroyCommon(trace); +} + + +/* TraceDestroyFinished -- destroy a trace object in state FINISHED + * + * Finish and deallocate a Trace object, freeing up a TraceId. + * + * This code does not allow a Trace to be destroyed while it is active. + * It would be possible to allow this, but the colours of segments + * etc. would need to be reset to black. This also means the error + * paths in this file don't work. @@@@ */ + +void TraceDestroyFinished(Trace trace) +{ + AVERT(Trace, trace); + AVER(trace->state == TraceFINISHED); + + STATISTIC(EVENT14(TraceStatScan, trace, trace->arena, + trace->rootScanCount, trace->rootScanSize, + trace->rootCopiedSize, + trace->segScanCount, trace->segScanSize, + trace->segCopiedSize, + trace->singleScanCount, trace->singleScanSize, + trace->singleCopiedSize, + trace->readBarrierHitCount, trace->greySegMax, + trace->pointlessScanCount)); + STATISTIC(EVENT11(TraceStatFix, trace, trace->arena, + trace->fixRefCount, trace->segRefCount, + trace->whiteSegRefCount, + trace->nailCount, trace->snapCount, + trace->forwardedCount, trace->forwardedSize, + trace->preservedInPlaceCount, + trace->preservedInPlaceSize)); + STATISTIC(EVENT4(TraceStatReclaim, trace, trace->arena, + trace->reclaimCount, trace->reclaimSize)); + + traceDestroyCommon(trace); +} + + +/* traceReclaim -- reclaim the remaining objects white for this trace */ + +static void traceReclaim(Trace trace) +{ + Arena arena; + Ring genNode, genNext; + + AVER(trace->state == TraceRECLAIM); + + + arena = trace->arena; + EVENT2(TraceReclaim, trace, arena); + RING_FOR(genNode, &trace->genRing, genNext) { + Ring segNode, segNext; + GenDesc gen = GenDescOfTraceRing(genNode, trace); + AVERT(GenDesc, gen); + RING_FOR(segNode, &gen->segRing, segNext) { + GCSeg gcseg = RING_ELT(GCSeg, genRing, segNode); + Seg seg = &gcseg->segStruct; + + /* There shouldn't be any grey stuff left for this trace. */ + AVER_CRITICAL(!TraceSetIsMember(SegGrey(seg), trace)); + if (TraceSetIsMember(SegWhite(seg), trace)) { + Addr base = SegBase(seg); + AVER_CRITICAL(PoolHasAttr(SegPool(seg), AttrGC)); + STATISTIC(++trace->reclaimCount); + SegReclaim(seg, trace); + + /* If the segment still exists, it should no longer be white. */ + /* Note that the seg returned by this SegOfAddr may not be */ + /* the same as the one above, but in that case it's new and */ + /* still shouldn't be white for this trace. */ + + /* The code from the class-specific reclaim methods to */ + /* unwhiten the segment could in fact be moved here. */ + { + Seg nonWhiteSeg = NULL; /* prevents compiler warning */ + AVER_CRITICAL(!SegOfAddr(&nonWhiteSeg, arena, base) + || !TraceSetIsMember(SegWhite(nonWhiteSeg), trace)); + UNUSED(nonWhiteSeg); /* */ + } + } + } + } + + trace->state = TraceFINISHED; + + ArenaCompact(arena, trace); /* let arenavm drop chunks */ + + TracePostMessage(trace); /* trace end */ + /* Immediately pre-allocate messages for next time; failure is okay */ + (void)TraceIdMessagesCreate(arena, trace->ti); +} + +/* TraceRankForAccess -- Returns rank to scan at if we hit a barrier. + * + * We assume a single trace as otherwise we need to implement rank + * filters on scanning. + * + * .scan.conservative: It's safe to scan at EXACT unless the band is + * WEAK and in that case the segment should be weak. + * + * If the trace band is EXACT then we scan EXACT. This might prevent + * finalisation messages and may preserve objects pointed to only by weak + * references but tough luck -- the mutator wants to look. + * + * If the trace band is FINAL and the segment is FINAL, we scan it FINAL. + * Any objects not yet preserved deserve to die, and we're only giving + * them a temporary reprieve. All the objects on the segment should be FINAL, + * otherwise they might get sent finalization messages. + * + * If the trace band is FINAL, and the segment is not FINAL, we scan at EXACT. + * This is safe to do for FINAL and WEAK references. + * + * If the trace band is WEAK then the segment must be weak only, and we + * scan at WEAK. All other segments for this trace should be scanned by now. + * We must scan at WEAK to avoid bringing any objects back to life. + * + * See the message + * for a description of these semantics. + */ +Rank TraceRankForAccess(Arena arena, Seg seg) +{ + TraceSet ts; + Trace trace; + TraceId ti; + Rank band; + RankSet rankSet; + + AVERT(Arena, arena); + AVERT(Seg, seg); + + band = RankLIMIT; /* initialize with invalid rank */ + ts = arena->flippedTraces; + AVER(TraceSetIsSingle(ts)); + TRACE_SET_ITER(ti, trace, ts, arena) + band = traceBand(trace); + TRACE_SET_ITER_END(ti, trace, ts, arena); + rankSet = SegRankSet(seg); + switch(band) { + case RankAMBIG: + NOTREACHED; + break; + case RankEXACT: + return RankEXACT; + case RankFINAL: + if(rankSet == RankSetSingle(RankFINAL)) { + return RankFINAL; + } + /* It's safe to scan at exact in the final band so do so if there are + * any non-final references. */ + return RankEXACT; + case RankWEAK: + AVER(rankSet == RankSetSingle(RankWEAK)); + return RankWEAK; + default: + NOTREACHED; + break; + } + NOTREACHED; + return RankEXACT; +} + +/* traceFindGrey -- find a grey segment + * + * This function finds the next segment to scan. It does this according + * to the current band of the trace. See design/trace/ + * + * This code also performs various checks about the ranks of the object + * graph. Explanations of the checks would litter the code, so the + * explanations are here, and the code references these. + * + * .check.ambig.not: RankAMBIG segments never appear on the grey ring. + * The current tracer cannot support ambiguous reference except as + * roots, so it's a bug if we ever find any. This behaviour is not set + * in stone, it's possible to imagine changing the tracer so that we can + * support ambiguous objects one day. For example, a fully conservative + * non-moving mode. + * + * .check.band.begin: At the point where we start working on a new band + * of Rank R, there are no grey objects at earlier ranks. If there + * were, we would've found them whilst the current band was the previous + * band. We don't check this, but I rely on this fact in the next + * check, .check.weak.no-preserve. + * + * .check.weak.band: Weak references cannot cause objects to be + * newly preserved (marked). Because of .check.band.begin all the + * scanning work performed when the current band is a weak rank will be + * scanning objects at that rank. There is currently only one weak + * rank, RankWEAK. + * + * .check.final.one-pass: Because all the RankFINAL references are + * allocated in PoolMRG and effectively treated as roots, all the + * RankFINAL references will be scanned in one push (possibly split up, + * incrementally). Once they have been scanned, no new RankFINAL + * references will be discovered (the mutator is not permitted to + * allocate RankFINAL references wherever they like). In fact because + * of various coincidences (no Ambig segments so band Exact never + * discovers an Ambig segment and then more Exact segments; the only + * other rank is weak so never discovers any new segments) it is the + * case that for any band R there is an initial burst of scanning + * segments at rank R then after that we see no more rank R segments + * whilst working in this band. That's what we check, although we + * expect to have to change the check if we introduce more ranks, or + * start changing the semantics of them. A flag is used to implement + * this check. See . + * + * For further discussion on the semantics of rank based tracing see + * + */ + +static Bool traceFindGrey(Seg *segReturn, Rank *rankReturn, + Arena arena, TraceId ti) +{ + Rank rank; + Trace trace; + Ring node, nextNode; + + AVER(segReturn != NULL); + AVERT(TraceId, ti); + + trace = ArenaTrace(arena, ti); + + while(1) { + Rank band = traceBand(trace); + + /* Within the R band we look for segments of rank R first, */ + /* then successively earlier ones. Slight hack: We never */ + /* expect to find any segments of RankAMBIG, so we use */ + /* this as a terminating condition for the loop. */ + for(rank = band; rank > RankAMBIG; --rank) { + RING_FOR(node, ArenaGreyRing(arena, rank), nextNode) { + Seg seg = SegOfGreyRing(node); + + AVERT(Seg, seg); + AVER(SegGrey(seg) != TraceSetEMPTY); + AVER(RankSetIsMember(SegRankSet(seg), rank)); + + if(TraceSetIsMember(SegGrey(seg), trace)) { + /* .check.band.weak */ + AVER(band != RankWEAK || rank == band); + if(rank != band) { + traceBandFirstStretchDone(trace); + } else { + /* .check.final.one-pass */ + AVER(traceBandFirstStretch(trace)); + } + *segReturn = seg; + *rankReturn = rank; + EVENT4(TraceFindGrey, arena, trace, seg, rank); + return TRUE; + } + } + } + /* .check.ambig.not */ + AVER(RingIsSingle(ArenaGreyRing(arena, RankAMBIG))); + if(!traceBandAdvance(trace)) { + /* No grey segments for this trace. */ + return FALSE; + } + } +} + + +/* ScanStateSetSummary -- set the summary of scanned references + * + * This function sets unfixedSummary and fixedSummary such that + * ScanStateSummary will return the summary passed. Subsequently fixed + * references are accumulated into this result. */ + +void ScanStateSetSummary(ScanState ss, RefSet summary) +{ + AVERT(ScanState, ss); + /* Can't check summary, as it can be anything. */ + + ScanStateSetUnfixedSummary(ss, RefSetEMPTY); + ss->fixedSummary = summary; + AVER(ScanStateSummary(ss) == summary); +} + + +/* ScanStateSummary -- calculate the summary of scanned references + * + * The summary of the scanned references is the summary of the unfixed + * references, minus the white set, plus the summary of the fixed + * references. This is because TraceFix is called for all references in + * the white set, and accumulates a summary of references after they + * have been fixed. */ + +RefSet ScanStateSummary(ScanState ss) +{ + AVERT(ScanState, ss); + + return RefSetUnion(ss->fixedSummary, + RefSetDiff(ScanStateUnfixedSummary(ss), + ScanStateWhite(ss))); +} + + +/* ScanStateUpdateSummary -- update segment summary after scan + * + * wasTotal is TRUE if we know that all references were scanned, FALSE + * if some references might not have been scanned. + */ +void ScanStateUpdateSummary(ScanState ss, Seg seg, Bool wasTotal) +{ + RefSet summary; + + AVERT(ScanState, ss); + AVERT(Seg, seg); + AVERT(Bool, wasTotal); + + /* Only apply the write barrier if it is not deferred. */ + if (seg->defer == 0) { + /* If we scanned every reference in the segment then we have a + complete summary we can set. Otherwise, we just have + information about more zones that the segment refers to. */ + if (wasTotal) + summary = ScanStateSummary(ss); + else + summary = RefSetUnion(SegSummary(seg), ScanStateSummary(ss)); + } else { + summary = RefSetUNIV; + } + SegSetSummary(seg, summary); +} + +/* traceScanSegRes -- scan a segment to remove greyness + * + * @@@@ During scanning, the segment should be write-shielded to prevent + * any other threads from updating it while fix is being applied to it + * (because fix is not atomic). At the moment, we don't bother, because + * we know that all threads are suspended. */ + +static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg) +{ + Bool wasTotal; + ZoneSet white; + Res res; + + /* The reason for scanning a segment is that it's grey. */ + AVER(TraceSetInter(ts, SegGrey(seg)) != TraceSetEMPTY); + + white = traceSetWhiteUnion(ts, arena); + + /* Only scan a segment if it refers to the white set. */ + if(ZoneSetInter(white, SegSummary(seg)) == ZoneSetEMPTY) { + SegBlacken(seg, ts); + /* Setup result code to return later. */ + res = ResOK; + } else { /* scan it */ + ScanStateStruct ssStruct; + ScanState ss = &ssStruct; + ScanStateInitSeg(ss, ts, arena, rank, white, seg); + + /* Expose the segment to make sure we can scan it. */ + ShieldExpose(arena, seg); + res = SegScan(&wasTotal, seg, ss); + /* Cover, regardless of result */ + ShieldCover(arena, seg); + + traceSetUpdateCounts(ts, arena, ss, traceAccountingPhaseSegScan); + /* Count segments scanned pointlessly */ + STATISTIC({ + TraceId ti; Trace trace; + Count whiteSegRefCount = 0; + + TRACE_SET_ITER(ti, trace, ts, arena) + whiteSegRefCount += trace->whiteSegRefCount; + TRACE_SET_ITER_END(ti, trace, ts, arena); + if(whiteSegRefCount == 0) + TRACE_SET_ITER(ti, trace, ts, arena) + ++trace->pointlessScanCount; + TRACE_SET_ITER_END(ti, trace, ts, arena); + }); + + /* Following is true whether or not scan was total. */ + /* . */ + /* .verify.segsummary: were the seg contents, as found by this + * scan, consistent with the recorded SegSummary? + */ + AVER(RefSetSub(ScanStateUnfixedSummary(ss), SegSummary(seg))); /* */ + + /* Write barrier deferral -- see . */ + /* Did the segment refer to the white set? */ + if (ZoneSetInter(ScanStateUnfixedSummary(ss), white) == ZoneSetEMPTY) { + /* Boring scan. One step closer to raising the write barrier. */ + if (seg->defer > 0) + --seg->defer; + } else { + /* Interesting scan. Defer raising the write barrier. */ + if (seg->defer < WB_DEFER_DELAY) + seg->defer = WB_DEFER_DELAY; + } + + ScanStateUpdateSummary(ss, seg, res == ResOK && wasTotal); + ScanStateFinish(ss); + } + + if(res == ResOK) { + /* The segment is now black only if scan was successful. */ + /* Remove the greyness from it. */ + SegSetGrey(seg, TraceSetDiff(SegGrey(seg), ts)); + } + + return res; +} + + +/* traceScanSeg + * + * Scans a segment, switching to emergency mode if there is an allocation + * failure. + */ + +static Res traceScanSeg(TraceSet ts, Rank rank, Arena arena, Seg seg) +{ + Res res; + + res = traceScanSegRes(ts, rank, arena, seg); + if(ResIsAllocFailure(res)) { + ArenaSetEmergency(arena, TRUE); + res = traceScanSegRes(ts, rank, arena, seg); + /* Should be OK in emergency mode. */ + AVER(!ResIsAllocFailure(res)); + } + + return res; +} + + +/* TraceSegAccess -- handle barrier hit on a segment */ + +void TraceSegAccess(Arena arena, Seg seg, AccessSet mode) +{ + Res res; + AccessSet shieldHit; + Bool readHit, writeHit; + + AVERT(Arena, arena); + AVERT(Seg, seg); + AVERT(AccessSet, mode); + + shieldHit = BS_INTER(mode, SegSM(seg)); + readHit = BS_INTER(shieldHit, AccessREAD) != AccessSetEMPTY; + writeHit = BS_INTER(shieldHit, AccessWRITE) != AccessSetEMPTY; + + /* If it's a read access, then the segment must be grey for a trace */ + /* which is flipped. */ + AVER(!readHit || + TraceSetInter(SegGrey(seg), arena->flippedTraces) != TraceSetEMPTY); + + /* If it's a write access, then the segment must have a summary that */ + /* is smaller than the mutator's summary (which is assumed to be */ + /* RefSetUNIV). */ + AVER(!writeHit || SegSummary(seg) != RefSetUNIV); + + EVENT3(TraceAccess, arena, seg, mode); + + /* Write barrier deferral -- see . */ + if (writeHit) + seg->defer = WB_DEFER_HIT; + + if (readHit) { + Rank rank; + TraceSet traces; + + AVER(SegRankSet(seg) != RankSetEMPTY); + + /* Pick set of traces to scan for: */ + traces = arena->flippedTraces; + rank = TraceRankForAccess(arena, seg); + res = traceScanSeg(traces, rank, arena, seg); + + /* Allocation failures should be handled my emergency mode, and we don't + expect any other kind of failure in a normal GC that causes access + faults. */ + AVER(res == ResOK); + + /* The pool should've done the job of removing the greyness that */ + /* was causing the segment to be protected, so that the mutator */ + /* can go ahead and access it. */ + AVER(TraceSetInter(SegGrey(seg), traces) == TraceSetEMPTY); + + STATISTIC({ + Trace trace; + TraceId ti; + TRACE_SET_ITER(ti, trace, traces, arena) + ++trace->readBarrierHitCount; + TRACE_SET_ITER_END(ti, trace, traces, arena); + }); + } + + /* The write barrier handling must come after the read barrier, */ + /* because the latter may set the summary and raise the write barrier. */ + if (writeHit) + SegSetSummary(seg, RefSetUNIV); + + /* The segment must now be accessible. */ + AVER(BS_INTER(mode, SegSM(seg)) == AccessSetEMPTY); +} + + +/* _mps_fix2 (a.k.a. "TraceFix") -- second stage of fixing a reference + * + * _mps_fix2 is on the [critical path](../design/critical-path.txt). A + * one-instruction difference in the early parts of this code will have a + * significant impact on overall run time. The priority is to eliminate + * irrelevant references early and fast using the colour information stored + * in the tract table. + * + * The name "TraceFix" is pervasive in the MPS and its documents to describe + * this function. Optimisation and strict aliasing rules have meant that we + * need to use the external name for it here. + */ + +mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io) +{ + ScanState ss = PARENT(ScanStateStruct, ss_s, mps_ss); + Ref ref; + Chunk chunk; + Index i; + Tract tract; + Seg seg; + Res res; + + /* Special AVER macros are used on the critical path. */ + /* */ + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(mps_ref_io != NULL); + + ref = (Ref)*mps_ref_io; + + /* The zone test should already have been passed by MPS_FIX1 in mps.h. */ + AVER_CRITICAL(ZoneSetInter(ScanStateWhite(ss), + ZoneSetAddAddr(ss->arena, ZoneSetEMPTY, ref)) != + ZoneSetEMPTY); + + STATISTIC(++ss->fixRefCount); + EVENT_CRITICAL4(TraceFix, ss, mps_ref_io, ref, ss->rank); + + /* This sequence of tests is equivalent to calling TractOfAddr(), + * but inlined so that we can distinguish between "not pointing to + * chunk" and "pointing to chunk but not to tract" so that we can + * check the rank in the latter case. See + * + * + * If compilers fail to do a good job of inlining ChunkOfAddr and + * TreeFind then it may become necessary to inline at least the + * comparison against the root of the tree. See + * + */ + if (!ChunkOfAddr(&chunk, ss->arena, ref)) + /* Reference points outside MPS-managed address space: ignore. */ + goto done; + + i = INDEX_OF_ADDR(chunk, ref); + if (!BTGet(chunk->allocTable, i)) { + /* Reference points into a chunk but not to an allocated tract. + * */ + AVER_CRITICAL(ss->rank < RankEXACT); /* */ + goto done; + } + + tract = PageTract(&chunk->pageTable[i]); + if (!TRACT_SEG(&seg, tract)) { + /* Reference points to a tract but not a segment, so it can't be white. */ + goto done; + } + + /* See for where we arrange to fool + this test when walking references in the roots. */ + if (TraceSetInter(SegWhite(seg), ss->traces) == TraceSetEMPTY) { + /* Reference points to a segment that is not white for any of the + * active traces. */ + STATISTIC({ + ++ss->segRefCount; + EVENT_CRITICAL1(TraceFixSeg, seg); + }); + goto done; + } + + STATISTIC(++ss->segRefCount); + STATISTIC(++ss->whiteSegRefCount); + EVENT_CRITICAL1(TraceFixSeg, seg); + res = (*ss->fix)(seg, ss, &ref); + if (res != ResOK) { + /* SegFixEmergency must not fail. */ + AVER_CRITICAL(ss->fix != SegFixEmergency); + /* Fix protocol (de facto): if Fix fails, ref must be unchanged + * Justification for this restriction: + * A: it simplifies; + * B: it's reasonable (given what may cause Fix to fail); + * C: the code (here) already assumes this: it returns without + * updating ss->fixedSummary. RHSK 2007-03-21. + */ + AVER_CRITICAL(ref == (Ref)*mps_ref_io); + return res; + } + +done: + /* */ + ss->fixedSummary = RefSetAdd(ss->arena, ss->fixedSummary, ref); + + *mps_ref_io = (mps_addr_t)ref; + return ResOK; +} + + +/* traceScanSingleRefRes -- scan a single reference, with result code */ + +static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena, + Seg seg, Ref *refIO) +{ + RefSet summary; + ZoneSet white; + Res res; + ScanStateStruct ss; + + EVENT4(TraceScanSingleRef, ts, rank, arena, refIO); + + white = traceSetWhiteUnion(ts, arena); + if(ZoneSetInter(SegSummary(seg), white) == ZoneSetEMPTY) { + return ResOK; + } + + ScanStateInit(&ss, ts, arena, rank, white); + ShieldExpose(arena, seg); + + TRACE_SCAN_BEGIN(&ss) { + res = TRACE_FIX12(&ss, refIO); + } TRACE_SCAN_END(&ss); + ss.scannedSize = sizeof *refIO; + + summary = SegSummary(seg); + summary = RefSetAdd(arena, summary, *refIO); + SegSetSummary(seg, summary); + ShieldCover(arena, seg); + + traceSetUpdateCounts(ts, arena, &ss, traceAccountingPhaseSingleScan); + ScanStateFinish(&ss); + + return res; +} + + +/* TraceScanSingleRef -- scan a single reference + * + * This one can't fail. It may put the traces into emergency mode in + * order to achieve this. */ + +void TraceScanSingleRef(TraceSet ts, Rank rank, Arena arena, + Seg seg, Ref *refIO) +{ + Res res; + + AVERT(TraceSet, ts); + AVERT(Rank, rank); + AVERT(Arena, arena); + AVERT(Seg, seg); + AVER(refIO != NULL); + + res = traceScanSingleRefRes(ts, rank, arena, seg, refIO); + if(res != ResOK) { + ArenaSetEmergency(arena, TRUE); + res = traceScanSingleRefRes(ts, rank, arena, seg, refIO); + /* Ought to be OK in emergency mode now. */ + } + AVER(ResOK == res); +} + + +/* TraceScanFormat -- scan a formatted area of memory for references + * + * This is a wrapper for format scanning functions, which should not + * otherwise be called directly from within the MPS. This function + * checks arguments and takes care of accounting for the scanned + * memory. + */ +Res TraceScanFormat(ScanState ss, Addr base, Addr limit) +{ + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(base != NULL); + AVER_CRITICAL(limit != NULL); + AVER_CRITICAL(base < limit); + + /* scannedSize is accumulated whether or not ss->formatScan + * succeeds, so it's safe to accumulate now so that we can tail-call + * ss->formatScan. */ + ss->scannedSize += AddrOffset(base, limit); + + return ss->formatScan(&ss->ss_s, base, limit); +} + + +/* TraceScanArea -- scan an area of memory for references + * + * This is a wrapper for area scanning functions, which should not + * otherwise be called directly from within the MPS. This function + * checks arguments and takes care of accounting for the scanned + * memory. + */ +Res TraceScanArea(ScanState ss, Word *base, Word *limit, + mps_area_scan_t scan_area, + void *closure) +{ + AVERT(ScanState, ss); + AVER(base != NULL); + AVER(limit != NULL); + AVER(base < limit); + + EVENT3(TraceScanArea, ss, base, limit); + + /* scannedSize is accumulated whether or not scan_area succeeds, so + it's safe to accumulate now so that we can tail-call + scan_area. */ + ss->scannedSize += AddrOffset(base, limit); + + return scan_area(&ss->ss_s, base, limit, closure); +} + + +/* TraceStart -- condemn a set of objects and start collection + * + * TraceStart should be passed a trace with state TraceINIT, i.e., + * recently returned from TraceCreate, with some condemned segments + * added. mortality is the fraction of the condemned set expected not + * to survive. finishingTime is relative to the current polling clock, + * see . + * + * .start.black: All segments are black w.r.t. a newly allocated trace. + * However, if TraceStart initialized segments to black when it + * calculated the grey set then this condition could be relaxed, making + * it easy to destroy traces half-way through. */ + +static Res rootGrey(Root root, void *p) +{ + Trace trace = (Trace)p; + + AVERT(Root, root); + AVERT(Trace, trace); + + if(ZoneSetInter(RootSummary(root), trace->white) != ZoneSetEMPTY) { + RootGrey(root, trace); + } + + return ResOK; +} + + +/* TraceStart -- start a trace whose white set has been established + * + * The main job of TraceStart is to set up the grey list for a trace. The + * trace is first created with TraceCreate, objects are whitened, then + * TraceStart is called to initialise the tracing process. + * + * NOTE: At present, TraceStart also flips the mutator, so there is no + * grey-mutator tracing. + */ + +Res TraceStart(Trace trace, double mortality, double finishingTime) +{ + Arena arena; + Res res; + Seg seg; + + AVERT(Trace, trace); + AVER(trace->state == TraceINIT); + AVER(0.0 <= mortality); + AVER(mortality <= 1.0); + AVER(finishingTime >= 0.0); + AVER(trace->condemned > 0); + + arena = trace->arena; + + /* From the already set up white set, derive a grey set. */ + + /* @@@@ Instead of iterating over all the segments, we could */ + /* iterate over all pools which are scannable and thence over */ + /* all their segments. This might be better if the minority */ + /* of segments are scannable. Perhaps we should choose */ + /* dynamically which method to use. */ + + if(SegFirst(&seg, arena)) { + do { + Size size = SegSize(seg); + AVER(!TraceSetIsMember(SegGrey(seg), trace)); + + /* A segment can only be grey if it contains some references. */ + /* This is indicated by the rankSet begin non-empty. Such */ + /* segments may only belong to scannable pools. */ + if(SegRankSet(seg) != RankSetEMPTY) { + /* Turn the segment grey if there might be a reference in it */ + /* to the white set. This is done by seeing if the summary */ + /* of references in the segment intersects with the */ + /* approximation to the white set. */ + if(ZoneSetInter(SegSummary(seg), trace->white) != ZoneSetEMPTY) { + /* Note: can a white seg get greyed as well? At this point */ + /* we still assume it may. (This assumption runs out in */ + /* PoolTrivGrey). */ + SegGreyen(seg, trace); + if(TraceSetIsMember(SegGrey(seg), trace)) { + trace->foundation += size; + } + } + + if(PoolHasAttr(SegPool(seg), AttrGC) + && !TraceSetIsMember(SegWhite(seg), trace)) + { + trace->notCondemned += size; + } + } + } while (SegNext(&seg, arena, seg)); + } + + res = RootsIterate(ArenaGlobals(arena), rootGrey, (void *)trace); + AVER(res == ResOK); + + /* Calculate the rate of scanning. */ + { + Size sSurvivors = (Size)((double)trace->condemned * (1.0 - mortality)); + double nPolls = finishingTime / ArenaPollALLOCTIME; + + /* There must be at least one poll. */ + if(nPolls < 1.0) + nPolls = 1.0; + /* We use casting to long to truncate nPolls down to the nearest */ + /* integer, so try to make sure it fits. */ + if(nPolls >= (double)LONG_MAX) + nPolls = (double)LONG_MAX; + /* One quantum of work equals total tracing work divided by number + * of polls, plus one to ensure it's not zero. */ + trace->quantumWork + = (trace->foundation + sSurvivors) / (unsigned long)nPolls + 1; + } + + /* TODO: compute rate of scanning here. */ + + EVENT9(TraceStart, trace->arena, trace, mortality, finishingTime, + trace->condemned, trace->notCondemned, trace->foundation, + trace->white, trace->quantumWork); + + trace->state = TraceUNFLIPPED; + TracePostStartMessage(trace); + + /* All traces must flip at beginning at the moment. */ + return traceFlip(trace); +} + + +/* traceWork -- a measure of the work done for this trace. + * + * . + */ + +#define traceWork(trace) ((Work)((trace)->segScanSize + (trace)->rootScanSize)) + + +/* TraceAdvance -- progress a trace by one step */ + +void TraceAdvance(Trace trace) +{ + Arena arena; + Work oldWork, newWork; + + AVERT(Trace, trace); + arena = trace->arena; + oldWork = traceWork(trace); + + switch (trace->state) { + case TraceUNFLIPPED: + /* all traces are flipped in TraceStart at the moment */ + NOTREACHED; + break; + case TraceFLIPPED: { + Seg seg; + Rank rank; + + if (traceFindGrey(&seg, &rank, arena, trace->ti)) { + Res res; + res = traceScanSeg(TraceSetSingle(trace), rank, arena, seg); + /* Allocation failures should be handled by emergency mode, and we + * don't expect any other error in a normal GC trace. */ + AVER(res == ResOK); + } else { + trace->state = TraceRECLAIM; + } + break; + } + case TraceRECLAIM: + traceReclaim(trace); + break; + default: + NOTREACHED; + break; + } + + newWork = traceWork(trace); + AVER(newWork >= oldWork); + arena->tracedWork += (double)(newWork - oldWork); +} + + +/* TraceStartCollectAll: start a trace which condemns everything in + * the arena. + * + * "why" is a TraceStartWhy* enum member that specifies why the + * collection is starting. */ + +Res TraceStartCollectAll(Trace *traceReturn, Arena arena, TraceStartWhy why) +{ + Trace trace = NULL; + Res res; + double mortality, finishingTime; + Ring chainNode, chainNext; + + AVERT(Arena, arena); + AVER(arena->busyTraces == TraceSetEMPTY); + + res = TraceCreate(&trace, arena, why); + AVER(res == ResOK); /* succeeds because no other trace is busy */ + + TraceCondemnStart(trace); + + /* Condemn all generations in all chains, plus the top generation. */ + RING_FOR(chainNode, &arena->chainRing, chainNext) { + size_t i; + Chain chain = RING_ELT(Chain, chainRing, chainNode); + AVERT(Chain, chain); + for (i = 0; i < chain->genCount; ++i) { + GenDesc gen = &chain->gens[i]; + AVERT(GenDesc, gen); + GenDescStartTrace(gen, trace); + } + } + GenDescStartTrace(&arena->topGen, trace); + + res = TraceCondemnEnd(&mortality, trace); + if(res != ResOK) /* should try some other trace, really @@@@ */ + goto failCondemn; + finishingTime = (double)ArenaAvail(arena) + - (double)trace->condemned * (1.0 - mortality); + if(finishingTime < 0) { + /* Run out of time, should really try a smaller collection. @@@@ */ + finishingTime = 0.0; + } + res = TraceStart(trace, mortality, finishingTime); + if (res != ResOK) + goto failStart; + *traceReturn = trace; + return ResOK; + +failStart: + /* TODO: We can't back-out from a failed TraceStart that has + already done some scanning, so this error path is somewhat bogus if it + destroys the trace. In the current system, TraceStartCollectAll is + only used for a normal GC, so TraceStart should not fail and this case + should never be reached. There's a chance the mutator will survive + if the assertion isn't hit, so drop through anyway. */ + NOTREACHED; +failCondemn: + TraceDestroyInit(trace); + return res; +} + + +/* TracePoll -- Check if there's any tracing work to be done + * + * Consider starting a trace if none is running; advance the running + * trace (if any) by one quantum. + * + * The collectWorldReturn and collectWorldAllowed arguments are as for + * PolicyStartTrace. + * + * If there may be more work to do, update *workReturn with a measure + * of the work done and return TRUE. Otherwise return FALSE. + */ + +Bool TracePoll(Work *workReturn, Bool *collectWorldReturn, Globals globals, + Bool collectWorldAllowed) +{ + Trace trace; + Arena arena; + Work oldWork, newWork, work, endWork; + + AVERT(Globals, globals); + arena = GlobalsArena(globals); + + if (arena->busyTraces != TraceSetEMPTY) { + trace = ArenaTrace(arena, (TraceId)0); + } else { + /* No traces are running: consider starting one now. */ + if (!PolicyStartTrace(&trace, collectWorldReturn, arena, + collectWorldAllowed)) + return FALSE; + } + + AVER(arena->busyTraces == TraceSetSingle(trace)); + oldWork = traceWork(trace); + endWork = oldWork + trace->quantumWork; + do { + TraceAdvance(trace); + } while (trace->state != TraceFINISHED && traceWork(trace) < endWork); + newWork = traceWork(trace); + AVER(newWork >= oldWork); + work = newWork - oldWork; + if (trace->state == TraceFINISHED) + TraceDestroyFinished(trace); + *workReturn = work; + return TRUE; +} + + +/* TraceDescribe -- describe a trace */ + +Res TraceDescribe(Trace trace, mps_lib_FILE *stream, Count depth) +{ + Ring node, next; + Res res; + const char *state; + + if (!TESTT(Trace, trace)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + switch (trace->state) { + case TraceINIT: state = "INIT"; break; + case TraceUNFLIPPED: state = "UNFLIPPED"; break; + case TraceFLIPPED: state = "FLIPPED"; break; + case TraceRECLAIM: state = "RECLAIM"; break; + case TraceFINISHED: state = "FINISHED"; break; + default: state = "unknown"; break; + } + + res = WriteF(stream, depth, + "Trace $P ($U) {\n", (WriteFP)trace, (WriteFU)trace->ti, + " arena $P ($U)\n", (WriteFP)trace->arena, + (WriteFU)trace->arena->serial, + " why \"$S\"\n", (WriteFS)TraceStartWhyToString(trace->why), + " state $S\n", (WriteFS)state, + " band $U\n", (WriteFU)trace->band, + " white $B\n", (WriteFB)trace->white, + " mayMove $B\n", (WriteFB)trace->mayMove, + " condemned $U\n", (WriteFU)trace->condemned, + " notCondemned $U\n", (WriteFU)trace->notCondemned, + " foundation $U\n", (WriteFU)trace->foundation, + " quantumWork $U\n", (WriteFU)trace->quantumWork, + " rootScanSize $U\n", (WriteFU)trace->rootScanSize, + STATISTIC_WRITE(" rootCopiedSize $U\n", + (WriteFU)trace->rootCopiedSize) + " segScanSize $U\n", (WriteFU)trace->segScanSize, + STATISTIC_WRITE(" segCopiedSize $U\n", + (WriteFU)trace->segCopiedSize) + " forwardedSize $U\n", (WriteFU)trace->forwardedSize, + " preservedInPlaceSize $U\n", (WriteFU)trace->preservedInPlaceSize, + NULL); + if (res != ResOK) + return res; + + RING_FOR(node, &trace->genRing, next) { + GenDesc gen = GenDescOfTraceRing(node, trace); + res = GenDescDescribe(gen, stream, depth + 2); + if (res != ResOK) + return res; + } + + res = WriteF(stream, depth, + "} Trace $P\n", (WriteFP)trace, + NULL); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/traceanc.c b/mps/code/traceanc.c new file mode 100644 index 00000000000..a6f4f3f49cc --- /dev/null +++ b/mps/code/traceanc.c @@ -0,0 +1,688 @@ +/* traceanc.c: ANCILLARY SUPPORT FOR TRACER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. + * See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * All this ancillary stuff was making trace.c very cluttered. + * Put it here instead. RHSK 2008-12-09. + * + * CONTENTS + * + * - TraceStartMessage. Posted when a trace starts. + * + * - TraceMessage. Posted when a trace ends. + * + * - TraceIdMessages. Pre-allocated messages for traceid. + * + * - ArenaRelease, ArenaClamp, ArenaPark. + */ + +#include "mpm.h" + + + +/* -------- TraceStartMessage -------- */ + + +/* TraceStartMessage -- posted when a trace starts + * + * Internal names: + * trace start + * TraceStartMessage, tsMessage (struct *) + * MessageTypeGCSTART (enum) + * + * External names: + * mps_message_type_gc_start (enum macro) + * MPS_MESSAGE_TYPE_GC_START (enum) + * + * (Note: this should properly be called "trace begin", but it's much + * too late to change it now!) + * + * . + */ + +#define TraceStartMessageSig ((Sig)0x51926535) /* SIGnature TRaceStartMeSsage */ + +/* .whybuf: + * .whybuf.len: Length (in chars) of a char buffer used to store the + * reason why a collection started in the TraceStartMessageStruct + * (used by mps_message_type_gc_start). If the reason is too long to + * fit, it must be truncated. + * .whybuf.nul: Check insists that the last char in the array is NUL + * (even if there is another NUL earlier in the buffer); this makes + * the NUL-termination check fast. + */ +#define TRACE_START_MESSAGE_WHYBUF_LEN 128 + +typedef struct TraceStartMessageStruct { + Sig sig; /* design.mps.sig.field */ + char why[TRACE_START_MESSAGE_WHYBUF_LEN]; /* .whybuf */ + MessageStruct messageStruct; +} TraceStartMessageStruct; + +#define TraceStartMessageMessage(traceStartMessage) \ + (&((traceStartMessage)->messageStruct)) +#define MessageTraceStartMessage(message) \ + (PARENT(TraceStartMessageStruct, messageStruct, message)) + +Bool TraceStartMessageCheck(TraceStartMessage tsMessage) +{ + CHECKS(TraceStartMessage, tsMessage); + CHECKD(Message, TraceStartMessageMessage(tsMessage)); + CHECKL(MessageGetType(TraceStartMessageMessage(tsMessage)) == + MessageTypeGCSTART); + + /* Check that why is NUL terminated. See .whybuf.nul */ + CHECKL(tsMessage->why[NELEMS(tsMessage->why)-1] == '\0'); + + return TRUE; +} + +static void TraceStartMessageDelete(Message message) +{ + TraceStartMessage tsMessage; + Arena arena; + + AVERT(Message, message); + tsMessage = MessageTraceStartMessage(message); + AVERT(TraceStartMessage, tsMessage); + + arena = MessageArena(message); + tsMessage->sig = SigInvalid; + MessageFinish(message); + + ControlFree(arena, (void *)tsMessage, sizeof(TraceStartMessageStruct)); +} + +static const char *TraceStartMessageWhy(Message message) +{ + TraceStartMessage tsMessage; + + AVERT(Message, message); + tsMessage = MessageTraceStartMessage(message); + AVERT(TraceStartMessage, tsMessage); + + return tsMessage->why; +} + +static MessageClassStruct TraceStartMessageClassStruct = { + MessageClassSig, /* sig */ + "TraceGCStart", /* name */ + MessageTypeGCSTART, /* Message Type */ + TraceStartMessageDelete, /* Delete */ + MessageNoFinalizationRef, /* FinalizationRef */ + MessageNoGCLiveSize, /* GCLiveSize */ + MessageNoGCCondemnedSize, /* GCCondemnedSize */ + MessageNoGCNotCondemnedSize, /* GCNotCondemnedSize */ + TraceStartMessageWhy, /* GCStartWhy */ + MessageClassSig /* */ +}; + +static void traceStartMessageInit(Arena arena, TraceStartMessage tsMessage) +{ + AVERT(Arena, arena); + + MessageInit(arena, TraceStartMessageMessage(tsMessage), + &TraceStartMessageClassStruct, MessageTypeGCSTART); + tsMessage->why[0] = '\0'; + tsMessage->why[NELEMS(tsMessage->why)-1] = '\0'; /* .whybuf.nul */ + + tsMessage->sig = TraceStartMessageSig; + AVERT(TraceStartMessage, tsMessage); +} + +/* TraceStartWhyToString -- why-code to text + * + * Converts a TraceStartWhy* code into a constant string describing + * why a trace started. + */ + +const char *TraceStartWhyToString(TraceStartWhy why) +{ + const char *r; + + switch (why) { +#define X(WHY, SHORT, LONG) case TraceStartWhy ## WHY: r = (LONG); break; + TRACE_START_WHY_LIST(X) +#undef X + default: + NOTREACHED; + r = "Unknown reason (internal error)."; + break; + } + + /* Must fit in buffer without truncation; see .whybuf.len */ + AVER(StringLength(r) < TRACE_START_MESSAGE_WHYBUF_LEN); + + return r; +} + + +/* traceStartWhyToTextBuffer + * + * Converts a TraceStartWhy* code into a string describing why a trace + * started, and copies that into the text buffer the caller provides. + * s specifies the beginning of the buffer to write the string + * into, len specifies the length of the buffer. + * The string written will be NUL terminated, and truncated if + * necessary. + */ + +static void traceStartWhyToTextBuffer(char *s, size_t len, TraceStartWhy why) +{ + const char *r; + size_t i; + + AVER(s); + /* len can be anything, including 0. */ + AVER(why < TraceStartWhyLIMIT); + + r = TraceStartWhyToString(why); + + for(i=0; istate == TraceUNFLIPPED); + + arena = trace->arena; + AVERT(Arena, arena); + + ti = trace->ti; + AVERT(TraceId, ti); + + tsMessage = arena->tsMessage[ti]; + if(tsMessage) { + AVERT(TraceStartMessage, tsMessage); + + traceStartWhyToTextBuffer(tsMessage->why, + sizeof tsMessage->why, trace->why); + + arena->tsMessage[ti] = NULL; + MessagePost(arena, TraceStartMessageMessage(tsMessage)); + } else { + arena->droppedMessages += 1; + } + + /* We have consumed the pre-allocated message */ + AVER(!arena->tsMessage[ti]); +} + + + +/* -------- TraceMessage (trace end) -------- */ + + +/* TraceMessage -- posted when a trace ends + * + * Internal names: + * trace end + * TraceMessage, tMessage (struct *) + * MessageTypeGC (enum) + * + * External names: + * mps_message_type_gc (enum macro) + * MPS_MESSAGE_TYPE_GC (enum) + * + * (Note: this should properly be called "trace end", but it's much + * too late to change it now!) + * + * . + */ + + +/* TraceMessage -- type of trace end messages */ + +#define TraceMessageSig ((Sig)0x51926359) /* SIGnature TRace MeSsaGe */ + +typedef struct TraceMessageStruct { + Sig sig; /* design.mps.sig.field */ + Size liveSize; + Size condemnedSize; + Size notCondemnedSize; + MessageStruct messageStruct; +} TraceMessageStruct; + +#define TraceMessageMessage(traceMessage) (&((traceMessage)->messageStruct)) +#define MessageTraceMessage(message) \ + (PARENT(TraceMessageStruct, messageStruct, message)) + +Bool TraceMessageCheck(TraceMessage tMessage) +{ + CHECKS(TraceMessage, tMessage); + CHECKD(Message, TraceMessageMessage(tMessage)); + CHECKL(MessageGetType(TraceMessageMessage(tMessage)) == + MessageTypeGC); + /* We can't check anything about the statistics. In particular, */ + /* liveSize may exceed condemnedSize because they are only estimates. */ + + return TRUE; +} + +static void TraceMessageDelete(Message message) +{ + TraceMessage tMessage; + Arena arena; + + AVERT(Message, message); + tMessage = MessageTraceMessage(message); + AVERT(TraceMessage, tMessage); + + arena = MessageArena(message); + tMessage->sig = SigInvalid; + MessageFinish(message); + + ControlFree(arena, (void *)tMessage, sizeof(TraceMessageStruct)); +} + +static Size TraceMessageLiveSize(Message message) +{ + TraceMessage tMessage; + + AVERT(Message, message); + tMessage = MessageTraceMessage(message); + AVERT(TraceMessage, tMessage); + + return tMessage->liveSize; +} + +static Size TraceMessageCondemnedSize(Message message) +{ + TraceMessage tMessage; + + AVERT(Message, message); + tMessage = MessageTraceMessage(message); + AVERT(TraceMessage, tMessage); + + return tMessage->condemnedSize; +} + +static Size TraceMessageNotCondemnedSize(Message message) +{ + TraceMessage tMessage; + + AVERT(Message, message); + tMessage = MessageTraceMessage(message); + AVERT(TraceMessage, tMessage); + + return tMessage->notCondemnedSize; +} + +static MessageClassStruct TraceMessageClassStruct = { + MessageClassSig, /* sig */ + "TraceGC", /* name */ + MessageTypeGC, /* Message Type */ + TraceMessageDelete, /* Delete */ + MessageNoFinalizationRef, /* FinalizationRef */ + TraceMessageLiveSize, /* GCLiveSize */ + TraceMessageCondemnedSize, /* GCCondemnedSize */ + TraceMessageNotCondemnedSize, /* GCNotCondemnedSize */ + MessageNoGCStartWhy, /* GCStartWhy */ + MessageClassSig /* */ +}; + +static void traceMessageInit(Arena arena, TraceMessage tMessage) +{ + AVERT(Arena, arena); + + MessageInit(arena, TraceMessageMessage(tMessage), + &TraceMessageClassStruct, MessageTypeGC); + tMessage->liveSize = (Size)0; + tMessage->condemnedSize = (Size)0; + tMessage->notCondemnedSize = (Size)0; + + tMessage->sig = TraceMessageSig; + AVERT(TraceMessage, tMessage); +} + +/* TracePostMessage -- complete and post trace end message + * + * .message.data: The trace end message contains the live size + * (forwardedSize + preservedInPlaceSize), the condemned size + * (condemned), and the not-condemned size (notCondemned). + */ + +void TracePostMessage(Trace trace) +{ + Arena arena; + TraceId ti; + TraceMessage tMessage; + + AVERT(Trace, trace); + AVER(trace->state == TraceFINISHED); + + arena = trace->arena; + AVERT(Arena, arena); + + ti = trace->ti; + AVERT(TraceId, ti); + + tMessage = arena->tMessage[ti]; + if(tMessage) { + AVERT(TraceMessage, tMessage); + + tMessage->liveSize = trace->forwardedSize + trace->preservedInPlaceSize; + tMessage->condemnedSize = trace->condemned; + tMessage->notCondemnedSize = trace->notCondemned; + + arena->tMessage[ti] = NULL; + MessagePost(arena, TraceMessageMessage(tMessage)); + } else { + arena->droppedMessages += 1; + } + + /* We have consumed the pre-allocated message */ + AVER(!arena->tMessage[ti]); +} + + + +/* -------- TraceIdMessages -------- */ + + +/* TraceIdMessagesCheck - pre-allocated messages for this traceid. + * + * Messages are absent when already sent, or when (exceptionally) + * ControlAlloc failed at the end of the previous trace. If present, + * they must be valid. + * + * Messages are pre-allocated all-or-nothing. So if we've got a + * start but no end, that's wrong. + * + * Note: this function does not take a pointer to a struct, so it is + * not a 'proper' _Check function. It can be used in CHECKL, but + * not CHECKD etc. + */ + +Bool TraceIdMessagesCheck(Arena arena, TraceId ti) +{ + CHECKL(!arena->tsMessage[ti] || TraceStartMessageCheck(arena->tsMessage[ti])); + CHECKL(!arena->tsMessage[ti] || arena->tMessage[ti]); + CHECKL(!arena->tMessage[ti] || TraceMessageCheck(arena->tMessage[ti])); + + return TRUE; +} + +/* TraceIdMessagesCreate -- pre-allocate all messages for this traceid + * + * . + * + * For remote control of ControlAlloc, to simulate low memory: + * #define ControlAlloc !TIMCA_remote() ? ResFAIL : ControlAlloc + * extern Bool TIMCA_remote(void); + * See TIMCA_remote() in zmess.c + */ + +Res TraceIdMessagesCreate(Arena arena, TraceId ti) +{ + void *p; + TraceStartMessage tsMessage; + TraceMessage tMessage; + Res res; + + /* Ensure we don't leak memory */ + AVER(!arena->tsMessage[ti]); + AVER(!arena->tMessage[ti]); + + res = ControlAlloc(&p, arena, sizeof(TraceStartMessageStruct)); + if(res != ResOK) + goto failTraceStartMessage; + tsMessage = p; + + res = ControlAlloc(&p, arena, sizeof(TraceMessageStruct)); + if(res != ResOK) + goto failTraceMessage; + tMessage = p; + + traceStartMessageInit(arena, tsMessage); + AVERT(TraceStartMessage, tsMessage); + + traceMessageInit(arena, tMessage); + AVERT(TraceMessage, tMessage); + + arena->tsMessage[ti] = tsMessage; + arena->tMessage[ti] = tMessage; + + AVER(TraceIdMessagesCheck(arena, ti)); + + return ResOK; + +failTraceMessage: + ControlFree(arena, tsMessage, sizeof(TraceStartMessageStruct)); +failTraceStartMessage: + AVER(TraceIdMessagesCheck(arena, ti)); + return res; +} + +/* TraceIdMessagesDestroy -- destroy any pre-allocated messages + * + * Only used during ArenaDestroy. + * + * . + */ + +void TraceIdMessagesDestroy(Arena arena, TraceId ti) +{ + TraceStartMessage tsMessage; + TraceMessage tMessage; + + AVER(TraceIdMessagesCheck(arena, ti)); + + tsMessage = arena->tsMessage[ti]; + if(tsMessage) { + arena->tsMessage[ti] = NULL; + TraceStartMessageDelete(TraceStartMessageMessage(tsMessage)); + } + + tMessage = arena->tMessage[ti]; + if(tMessage) { + arena->tMessage[ti] = NULL; + TraceMessageDelete(TraceMessageMessage(tMessage)); + } + + AVER(!arena->tsMessage[ti]); + AVER(!arena->tMessage[ti]); + AVER(TraceIdMessagesCheck(arena, ti)); +} + + + +/* ----- ArenaRelease, ArenaClamp, ArenaPark, ArenaPostmortem ----- */ + + +/* ArenaRelease, ArenaClamp, ArenaPark, ArenaPostmortem -- + * allow/prevent collection work. + */ + + +/* ArenaClamp -- clamp the arena (no optional collection increments) */ + +void ArenaClamp(Globals globals) +{ + AVERT(Globals, globals); + globals->clamped = TRUE; +} + + +/* ArenaRelease -- release the arena (allow optional collection + * increments) */ + +void ArenaRelease(Globals globals) +{ + AVERT(Globals, globals); + globals->clamped = FALSE; + ArenaPoll(globals); +} + + +/* ArenaPark -- finish all current collections and clamp the arena, + * thus leaving the arena parked. */ + +void ArenaPark(Globals globals) +{ + TraceId ti; + Trace trace; + Arena arena; + Clock start; + + AVERT(Globals, globals); + arena = GlobalsArena(globals); + + globals->clamped = TRUE; + start = ClockNow(); + + while(arena->busyTraces != TraceSetEMPTY) { + /* Advance all active traces. */ + TRACE_SET_ITER(ti, trace, arena->busyTraces, arena) + TraceAdvance(trace); + if(trace->state == TraceFINISHED) { + TraceDestroyFinished(trace); + } + TRACE_SET_ITER_END(ti, trace, arena->busyTraces, arena); + } + + ArenaAccumulateTime(arena, start, ClockNow()); + + /* All traces have finished so there must not be an emergency. */ + AVER(!ArenaEmergency(arena)); +} + + +/* arenaExpose -- discard all protection from MPS-managed memory + * + * This is called by ArenaPostmortem, which we expect only to be used + * after a fatal error. So we use the lowest-level description of the + * MPS-managed memory (the chunk ring page tables) to avoid the risk + * of higher-level structures (like the segments) having been + * corrupted. + * + * After calling this function memory may not be in a consistent + * state, so it is not safe to continue running the MPS. + */ + +static void arenaExpose(Arena arena) +{ + Ring node, next; + RING_FOR(node, &arena->chunkRing, next) { + Chunk chunk = RING_ELT(Chunk, arenaRing, node); + Index i; + for (i = 0; i < chunk->pages; ++i) { + if (Method(Arena, arena, chunkPageMapped)(chunk, i)) { + ProtSet(PageIndexBase(chunk, i), PageIndexBase(chunk, i + 1), + AccessSetEMPTY); + } + } + } +} + + +/* ArenaPostmortem -- enter the postmortem state */ + +void ArenaPostmortem(Globals globals) +{ + Arena arena = GlobalsArena(globals); + + /* Ensure lock is released. */ + while (LockIsHeld(globals->lock)) { + LockReleaseRecursive(globals->lock); + } + + /* Remove the arena from the global arena ring so that it no longer + * handles protection faults. (Don't call arenaDenounce because that + * needs to claim the global ring lock, but that might already be + * held, for example if we are inside ArenaAccess.) */ + RingRemove(&globals->globalRing); + + /* Clamp the arena so that ArenaPoll does nothing. */ + ArenaClamp(globals); + + /* Remove all protection from mapped pages. */ + arenaExpose(arena); +} + + +/* ArenaStartCollect -- start a collection of everything in the + * arena; leave unclamped. */ + +Res ArenaStartCollect(Globals globals, TraceStartWhy why) +{ + Arena arena; + Res res; + Trace trace; + + AVERT(Globals, globals); + arena = GlobalsArena(globals); + + ArenaPark(globals); + res = TraceStartCollectAll(&trace, arena, why); + if(res != ResOK) + goto failStart; + ArenaRelease(globals); + return ResOK; + +failStart: + ArenaRelease(globals); + return res; +} + +/* ArenaCollect -- collect everything in arena; leave parked */ + +Res ArenaCollect(Globals globals, TraceStartWhy why) +{ + Res res; + + AVERT(Globals, globals); + res = ArenaStartCollect(globals, why); + if(res != ResOK) + return res; + + ArenaPark(globals); + return ResOK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/tract.c b/mps/code/tract.c new file mode 100644 index 00000000000..baa01ef611d --- /dev/null +++ b/mps/code/tract.c @@ -0,0 +1,528 @@ +/* tract.c: PAGE TABLES + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .ullagepages: Pages whose page index is < allocBase are recorded as + * free but never allocated as alloc starts searching after the tables. + * TractOfAddr uses the fact that these pages are marked as free in order + * to detect "references" to these pages as being bogus. + * + * .chunk.at.base: The chunks are stored in a balanced binary tree. + * Looking up an address in this tree is on the critical path, and + * therefore vital that it runs quickly. It is an implementation + * detail of chunks that they are always stored at the base of the + * region of address space they represent. Thus chunk happens to + * always be the same as chunk->base. We take advantage of this in the + * tree search by using chunk as its own key (instead of looking up + * chunk->base): this saves a dereference and perhaps a cache miss. + * See ChunkKey and ChunkCompare for this optimization. The necessary + * property is asserted in ChunkCheck. + * + * .chunk.at.base.no-coalesce: The fact that the chunk structure is + * stored at the base of the base also ensures that free address + * ranges in adjacent chunks are not coalesced by the arena's CBS. See + * + */ + +#include "tract.h" +#include "boot.h" +#include "bt.h" +#include "mpm.h" + +SRCID(tract, "$Id$"); + + +/* TractArena -- get the arena of a tract */ + +#define TractArena(tract) PoolArena(TractPool(tract)) + + +/* TractCheck -- check the integrity of a tract */ + +Bool TractCheck(Tract tract) +{ + if (TractHasPool(tract)) { + CHECKU(Pool, TractPool(tract)); + CHECKL(AddrIsArenaGrain(TractBase(tract), TractArena(tract))); + } + if (TractHasSeg(tract)) { + CHECKU(Seg, TractSeg(tract)); + } + return TRUE; +} + + +/* TractInit -- initialize a tract */ + +void TractInit(Tract tract, Pool pool, Addr base) +{ + AVER_CRITICAL(tract != NULL); + AVERT_CRITICAL(Pool, pool); + + tract->pool = pool; + tract->base = base; + tract->seg = NULL; + + AVERT(Tract, tract); + +} + + +/* TractFinish -- finish a tract */ + +void TractFinish(Tract tract) +{ + AVERT(Tract, tract); + + /* Check that there's no segment - and hence no shielding. */ + AVER(!TractHasSeg(tract)); + tract->pool = NULL; +} + + + +/* .tract.critical: These tract functions are low-level and used + * throughout. They are therefore on the + * [critical path](../design/critical-path.txt) and their + * AVERs are so-marked. + */ + + +/* TractBase -- return the base address of a tract */ + +Addr (TractBase)(Tract tract) +{ + Addr base; + AVERT_CRITICAL(Tract, tract); /* .tract.critical */ + + base = tract->base; + return base; +} + + +/* TractLimit -- return the limit address of a tract */ + +Addr TractLimit(Tract tract, Arena arena) +{ + AVERT_CRITICAL(Tract, tract); /* .tract.critical */ + AVERT_CRITICAL(Arena, arena); + return AddrAdd(TractBase(tract), ArenaGrainSize(arena)); +} + + +/* Chunk functions */ + + +/* ChunkCheck -- check a chunk */ + +Bool ChunkCheck(Chunk chunk) +{ + CHECKS(Chunk, chunk); + CHECKU(Arena, chunk->arena); + CHECKL(chunk->serial < chunk->arena->chunkSerial); + /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ + CHECKL(TreeCheck(&chunk->chunkTree)); + CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk)); + CHECKL(ShiftCheck(ChunkPageShift(chunk))); + + CHECKL(chunk->base != (Addr)0); + CHECKL(chunk->base < chunk->limit); + /* check chunk structure is at its own base: see .chunk.at.base. */ + CHECKL(chunk->base == (Addr)chunk); + CHECKL((Addr)(chunk+1) <= chunk->limit); + CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages); + /* check that the tables fit in the chunk */ + CHECKL(chunk->allocBase <= chunk->pages); + CHECKL(chunk->allocBase >= chunk->pageTablePages); + + CHECKD_NOSIG(BT, chunk->allocTable); + /* check that allocTable is in the chunk overhead */ + CHECKL((Addr)chunk->allocTable >= chunk->base); + CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) + <= PageIndexBase(chunk, chunk->allocBase)); + + /* check they don't overlap (knowing the order) */ + CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) + <= (Addr)chunk->pageTable); + + CHECKL(chunk->pageTable != NULL); + CHECKL((Addr)chunk->pageTable >= chunk->base); + CHECKL((Addr)&chunk->pageTable[chunk->pageTablePages] + <= PageIndexBase(chunk, chunk->allocBase)); + CHECKL(NONNEGATIVE(INDEX_OF_ADDR(chunk, (Addr)chunk->pageTable))); + /* check there's enough space in the page table */ + CHECKL(INDEX_OF_ADDR(chunk, AddrSub(chunk->limit, 1)) < chunk->pages); + CHECKL(chunk->pageTablePages < chunk->pages); + + /* Could check the consistency of the tables, but not O(1). */ + return TRUE; +} + + +/* ChunkInit -- initialize generic part of chunk */ + +Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, + BootBlock boot) +{ + Size size; + Count pages; + Shift pageShift; + Size pageTableSize; + Addr allocBase; + void *p; + Res res; + + /* chunk is supposed to be uninitialized, so don't check it. */ + AVERT(Arena, arena); + AVER(base != NULL); + AVER(AddrIsAligned(base, ArenaGrainSize(arena))); + AVER(base < limit); + AVER(AddrIsAligned(limit, ArenaGrainSize(arena))); + AVERT(BootBlock, boot); + + chunk->serial = (arena->chunkSerial)++; + chunk->arena = arena; + RingInit(&chunk->arenaRing); + + chunk->pageSize = ArenaGrainSize(arena); + chunk->pageShift = pageShift = SizeLog2(chunk->pageSize); + chunk->base = base; + chunk->limit = limit; + chunk->reserved = reserved; + size = ChunkSize(chunk); + + /* .overhead.pages: Chunk overhead for the page allocation table. */ + chunk->pages = pages = size >> pageShift; + res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); + if (res != ResOK) + goto failAllocTable; + chunk->allocTable = p; + + pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize); + chunk->pageTablePages = pageTableSize >> pageShift; + + res = Method(Arena, arena, chunkInit)(chunk, boot); + if (res != ResOK) + goto failClassInit; + + /* @@@@ Is BootAllocated always right? */ + /* Last thing we BootAlloc'd is pageTable. We requested pageSize */ + /* alignment, and pageTableSize is itself pageSize aligned, so */ + /* BootAllocated should also be pageSize aligned. */ + AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize)); + chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift); + + /* Init allocTable after class init, because it might be mapped there. */ + BTResRange(chunk->allocTable, 0, pages); + + /* Check that there is some usable address space remaining in the chunk. */ + allocBase = PageIndexBase(chunk, chunk->allocBase); + AVER(allocBase < chunk->limit); + + /* Add the chunk's free address space to the arena's freeLand, so that + we can allocate from it. */ + if (arena->hasFreeLand) { + res = ArenaFreeLandInsert(arena, allocBase, chunk->limit); + if (res != ResOK) + goto failLandInsert; + } + + TreeInit(&chunk->chunkTree); + + chunk->sig = ChunkSig; + AVERT(Chunk, chunk); + + ArenaChunkInsert(arena, chunk); + + return ResOK; + +failLandInsert: + Method(Arena, arena, chunkFinish)(chunk); + /* .no-clean: No clean-ups needed past this point for boot, as we will + discard the chunk. */ +failClassInit: +failAllocTable: + return res; +} + + +/* ChunkFinish -- finish the generic fields of a chunk */ + +void ChunkFinish(Chunk chunk) +{ + Arena arena; + + AVERT(Chunk, chunk); + + AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages)); + arena = ChunkArena(chunk); + + if (arena->hasFreeLand) { + Res res = ArenaFreeLandDelete(arena, + PageIndexBase(chunk, chunk->allocBase), + chunk->limit); + /* Can't fail because the range can't split because we passed the + whole chunk and chunks never coalesce. */ + AVER(res == ResOK); + } + + ArenaChunkRemoved(arena, chunk); + + chunk->sig = SigInvalid; + + TreeFinish(&chunk->chunkTree); + RingRemove(&chunk->arenaRing); + + /* Finish all other fields before class finish, because they might be */ + /* unmapped there. */ + Method(Arena, arena, chunkFinish)(chunk); +} + + +/* ChunkCompare -- Compare key to [base,limit) */ + +Compare ChunkCompare(Tree tree, TreeKey key) +{ + Addr base1, base2, limit2; + Chunk chunk; + + AVERT_CRITICAL(Tree, tree); + AVER_CRITICAL(tree != TreeEMPTY); + + /* See .chunk.at.base. */ + chunk = ChunkOfTree(tree); + AVERT_CRITICAL(Chunk, chunk); + + base1 = AddrOfTreeKey(key); + base2 = chunk->base; + limit2 = chunk->limit; + + if (base1 < base2) + return CompareLESS; + else if (base1 >= limit2) + return CompareGREATER; + else + return CompareEQUAL; +} + + +/* ChunkKey -- Return the key corresponding to a chunk */ + +TreeKey ChunkKey(Tree tree) +{ + /* See .chunk.at.base. */ + Chunk chunk = ChunkOfTree(tree); + return TreeKeyOfAddrVar(chunk); +} + + +/* ChunkOfAddr -- return the chunk which encloses an address */ + +Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr) +{ + Tree tree; + + AVER_CRITICAL(chunkReturn != NULL); + AVERT_CRITICAL(Arena, arena); + /* addr is arbitrary */ + + if (TreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr), + ChunkCompare) + == CompareEQUAL) + { + Chunk chunk = ChunkOfTree(tree); + AVER_CRITICAL(chunk->base <= addr); + AVER_CRITICAL(addr < chunk->limit); + *chunkReturn = chunk; + return TRUE; + } + return FALSE; +} + + +/* IndexOfAddr -- return the index of the page containing an address + * + * Function version of INDEX_OF_ADDR, for debugging purposes. + */ + +Index IndexOfAddr(Chunk chunk, Addr addr) +{ + AVERT(Chunk, chunk); + /* addr is arbitrary */ + + return INDEX_OF_ADDR(chunk, addr); +} + + +/* ChunkNodeDescribe -- describe a single node in the tree of chunks, + * for SplayTreeDescribe + */ + +Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream) +{ + Chunk chunk; + + if (!TreeCheck(node)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + chunk = ChunkOfTree(node); + if (!TESTT(Chunk, chunk)) + return ResFAIL; + + return WriteF(stream, 0, "[$P,$P)", (WriteFP)chunk->base, + (WriteFP)chunk->limit, NULL); +} + + +/* Page table functions */ + +/* .tract.critical: These Tract functions are low-level and are on + * the [critical path](../design/critical-path.txt) in various ways. The + * more common therefore use AVER_CRITICAL. + */ + + +/* TractOfAddr -- return the tract the given address is in, if any + * + * If the address is within the bounds of the arena, calculate the + * page table index from the address and see if the page is allocated. + * If so, return it. + */ + +Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr) +{ + Bool b; + Index i; + Chunk chunk; + + /* */ + AVER_CRITICAL(tractReturn != NULL); /* .tract.critical */ + AVERT_CRITICAL(Arena, arena); + + b = ChunkOfAddr(&chunk, arena, addr); + if (!b) + return FALSE; + /* */ + i = INDEX_OF_ADDR(chunk, addr); + /* .addr.free: If the page is recorded as being free then */ + /* either the page is free or it is */ + /* part of the arena tables (see .ullagepages). */ + if (BTGet(chunk->allocTable, i)) { + *tractReturn = PageTract(ChunkPage(chunk, i)); + return TRUE; + } + + return FALSE; +} + + +/* TractOfBaseAddr -- return a tract given a base address + * + * The address must have been allocated to some pool. + */ + +Tract TractOfBaseAddr(Arena arena, Addr addr) +{ + Tract tract = NULL; + Bool found; + + AVERT_CRITICAL(Arena, arena); + AVER_CRITICAL(AddrIsAligned(addr, ArenaGrainSize(arena))); + + /* Check first in the cache, see . */ + if (arena->lastTractBase == addr) { + tract = arena->lastTract; + } else { + found = TractOfAddr(&tract, arena, addr); + AVER_CRITICAL(found); + } + + AVER_CRITICAL(TractBase(tract) == addr); + return tract; +} + + +/* PageAlloc + * + * Sets up the page descriptor for an allocated page to turn it into a Tract. + */ + +void PageAlloc(Chunk chunk, Index pi, Pool pool) +{ + Tract tract; + Addr base; + Page page; + + AVERT_CRITICAL(Chunk, chunk); + AVER_CRITICAL(pi >= chunk->allocBase); + AVER_CRITICAL(pi < chunk->pages); + AVER_CRITICAL(!BTGet(chunk->allocTable, pi)); + AVERT_CRITICAL(Pool, pool); + + page = ChunkPage(chunk, pi); + tract = PageTract(page); + base = PageIndexBase(chunk, pi); + BTSet(chunk->allocTable, pi); + TractInit(tract, pool, base); +} + + +/* PageInit -- initialize a page (as free) */ + +void PageInit(Chunk chunk, Index pi) +{ + Page page; + + AVERT_CRITICAL(Chunk, chunk); + AVER_CRITICAL(pi < chunk->pages); + + page = ChunkPage(chunk, pi); + + BTRes(chunk->allocTable, pi); + page->pool = NULL; +} + + +/* PageFree -- free an allocated page */ + +void PageFree(Chunk chunk, Index pi) +{ + AVERT(Chunk, chunk); + AVER(pi >= chunk->allocBase); + AVER(pi < chunk->pages); + AVER(BTGet(chunk->allocTable, pi)); + + PageInit(chunk, pi); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/tract.h b/mps/code/tract.h new file mode 100644 index 00000000000..6bfd33d126a --- /dev/null +++ b/mps/code/tract.h @@ -0,0 +1,236 @@ +/* tract.h: PAGE TABLE INTERFACE + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + + +#ifndef tract_h +#define tract_h + +#include "mpmtypes.h" +#include "bt.h" +#include "ring.h" +#include "tree.h" + + +/* TractStruct -- tract structure + * + * .tract: Tracts represent the grains of memory allocation from + * the arena. . + */ + +typedef struct TractStruct { /* Tract structure */ + Pool pool; /* MUST BE FIRST */ + Seg seg; /* NULL or segment containing tract */ + Addr base; /* Base address of the tract */ +} TractStruct; + + +extern Addr (TractBase)(Tract tract); +#define TractBase(tract) ((tract)->base) +extern Addr TractLimit(Tract tract, Arena arena); + +#define TractHasPool(tract) (TractPool(tract) != NULL) +#define TractPool(tract) RVALUE((tract)->pool) +#define TractHasSeg(tract) ((tract)->seg != NULL) +#define TractSeg(tract) RVALUE((tract)->seg) + +extern Bool TractCheck(Tract tract); +extern void TractInit(Tract tract, Pool pool, Addr base); +extern void TractFinish(Tract tract); + + +/* TRACT_*SEG -- Test / set / unset seg->tract associations + * + * These macros all multiply evaluate the tract parameter + */ + +#define TRACT_SEG(segReturn, tract) \ + (TractHasSeg(tract) && ((*(segReturn) = (tract)->seg), TRUE)) + +#define TRACT_SET_SEG(tract, _seg) \ + BEGIN (tract)->seg = (_seg); END + +#define TRACT_UNSET_SEG(tract) \ + BEGIN (tract)->seg = NULL; END + + +/* PageUnion -- page descriptor + * + * .page-table: The page table (defined as a PageUnion array) + * is central to the design of the arena. + * + * .page: The "pool" field must be the first field of the "tail" + * field of this union. . + */ + +typedef union PageUnion { /* page structure */ + Pool pool; /* discriminator */ + TractStruct alloc; /* allocated tract, pool != NULL */ +} PageUnion; + + +#define PageTract(page) (&(page)->alloc) +#define PageOfTract(tract) PARENT(PageUnion, alloc, tract) +#define PagePool(page) RVALUE((page)->pool) +#define PageIsAllocated(page) (PagePool(page) != NULL) + + +/* Chunks */ + + +#define ChunkSig ((Sig)0x519C804C) /* SIGnature CHUNK */ + +typedef struct ChunkStruct { + Sig sig; /* design.mps.sig.field */ + Serial serial; /* serial within the arena */ + Arena arena; /* parent arena */ + RingStruct arenaRing; /* node in ring of all chunks in arena */ + TreeStruct chunkTree; /* node in tree of all chunks in arena */ + Size pageSize; /* size of pages */ + Shift pageShift; /* log2 of page size, for shifts */ + Addr base; /* base address of chunk */ + Addr limit; /* limit address of chunk */ + Index allocBase; /* index of first page allocatable to clients */ + Index pages; /* index of the page after the last allocatable page */ + BT allocTable; /* page allocation table */ + Page pageTable; /* the page table */ + Count pageTablePages; /* number of pages occupied by page table */ + Size reserved; /* reserved address space for chunk (including overhead + such as losses due to alignment): must not change + (or arena reserved calculation will break) */ +} ChunkStruct; + + +#define ChunkArena(chunk) RVALUE((chunk)->arena) +#define ChunkSize(chunk) AddrOffset((chunk)->base, (chunk)->limit) +#define ChunkPageSize(chunk) RVALUE((chunk)->pageSize) +#define ChunkPageShift(chunk) RVALUE((chunk)->pageShift) +#define ChunkPagesToSize(chunk, pages) ((Size)(pages) << (chunk)->pageShift) +#define ChunkSizeToPages(chunk, size) ((Count)((size) >> (chunk)->pageShift)) +#define ChunkPage(chunk, pi) (&(chunk)->pageTable[pi]) +#define ChunkOfTree(tree) PARENT(ChunkStruct, chunkTree, tree) +#define ChunkReserved(chunk) RVALUE((chunk)->reserved) + +extern Bool ChunkCheck(Chunk chunk); +extern Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, + Size reserved, BootBlock boot); +extern void ChunkFinish(Chunk chunk); +extern Compare ChunkCompare(Tree tree, TreeKey key); +extern TreeKey ChunkKey(Tree tree); +extern Bool ChunkCacheEntryCheck(ChunkCacheEntry entry); +extern void ChunkCacheEntryInit(ChunkCacheEntry entry); +extern Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr); +extern Res ChunkNodeDescribe(Tree node, mps_lib_FILE *stream); + + +/* AddrPageBase -- the base of the page this address is on */ + +#define AddrPageBase(chunk, addr) \ + AddrAlignDown((addr), ChunkPageSize(chunk)) + + +/* Page table functions */ + +extern Tract TractOfBaseAddr(Arena arena, Addr addr); +extern Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr); + + +/* INDEX_OF_ADDR -- return the index of the page containing an address + * + * .index.addr: The address passed may be equal to the limit of the + * arena, in which case the last page index plus one is returned. (It + * is, in a sense, the limit index of the page table.) + */ + +#define INDEX_OF_ADDR(chunk, addr) \ + ((Index)ChunkSizeToPages(chunk, AddrOffset((chunk)->base, addr))) + +extern Index IndexOfAddr(Chunk chunk, Addr addr); + + +/* PageIndexBase -- map page index to base address of page + * + * + */ + +#define PageIndexBase(chunk, i) \ + AddrAdd((chunk)->base, ChunkPagesToSize(chunk, i)) + + +/* TractAverContiguousRange -- verify that range is contiguous */ + +#define TractAverContiguousRange(arena, rangeBase, rangeLimit) \ + BEGIN \ + Chunk _ch = NULL; \ + \ + UNUSED(_ch); \ + AVER(ChunkOfAddr(&_ch, arena, rangeBase)); \ + AVER((rangeLimit) <= _ch->limit); \ + END + + +/* TRACT_TRACT_FOR -- iterate over a range of tracts in a chunk + * + * . + * Parameters arena & limit are evaluated multiple times. + * Check first tract & last tract lie with the same chunk. + */ + +#define TRACT_TRACT_FOR(tract, addr, arena, firstTract, limit) \ + tract = (firstTract); addr = TractBase(tract); \ + TractAverContiguousRange(arena, addr, limit); \ + for(; tract != NULL; \ + (addr = AddrAdd(addr, ArenaGrainSize(arena))), \ + (addr < (limit) ? \ + (tract = PageTract(PageOfTract(tract) + 1)) : \ + (tract = NULL) /* terminate loop */)) + + +/* TRACT_FOR -- iterate over a range of tracts in a chunk + * + * . + * Parameters arena & limit are evaluated multiple times. + */ + +#define TRACT_FOR(tract, addr, arena, base, limit) \ + TRACT_TRACT_FOR(tract, addr, arena, TractOfBaseAddr(arena, base), limit) + + +extern void PageAlloc(Chunk chunk, Index pi, Pool pool); +extern void PageInit(Chunk chunk, Index pi); +extern void PageFree(Chunk chunk, Index pi); + + +#endif /* tract_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/trans.c b/mps/code/trans.c new file mode 100644 index 00000000000..3c7f83883e8 --- /dev/null +++ b/mps/code/trans.c @@ -0,0 +1,371 @@ +/* trans.c: TRANSFORMS IMPLEMENTATION + * + * $Id$ + * Copyright 2011-2023 Ravenbrook Limited. See end of file for license. + * + * A transform is a special kind of garbage collection that replaces + * references to a set of objects. The transform is piggybacked onto + * a garbage collection by overriding the fix method for a trace + * (design.mps.trace.fix). The mapping used to replace the references + * is built up in a hash table by the client. See + * design.mps.transform. + */ + +#include "trans.h" +#include "table.h" + + +#define TransformSig ((Sig)0x51926A45) /* SIGnature TRANSform */ + +typedef struct mps_transform_s { + Sig sig; /* */ + Arena arena; /* owning arena */ + Table oldToNew; /* map to apply to refs */ + Epoch epoch; /* epoch in which transform was created */ + Bool aborted; /* no longer transforming, just GCing */ +} TransformStruct; + + +Bool TransformCheck(Transform transform) +{ + CHECKS(Transform, transform); + CHECKU(Arena, transform->arena); + /* .check.boot: avoid bootstrap problem in transformTableAlloc where + transformTableFree checks the transform while the table is being + destroyed */ + if (transform->oldToNew != NULL) + CHECKD(Table, transform->oldToNew); + CHECKL(BoolCheck(transform->aborted)); + CHECKL(transform->epoch <= ArenaEpoch(transform->arena)); + return TRUE; +} + + +/* Allocator functions for the Table oldToNew */ + +static void *transformTableAlloc(void *closure, size_t size) +{ + Transform transform = (Transform)closure; + Res res; + void *p; + + AVERT(Transform, transform); + + res = ControlAlloc(&p, transform->arena, size); + if (res != ResOK) + return NULL; + + return p; +} + +static void transformTableFree(void *closure, void *p, size_t size) +{ + Transform transform = (Transform)closure; + AVERT(Transform, transform); + ControlFree(transform->arena, p, size); +} + + +Res TransformCreate(Transform *transformReturn, Arena arena) +{ + Transform transform; + Res res; + void *p; + + AVER(transformReturn != NULL); + AVERT(Arena, arena); + + res = ControlAlloc(&p, arena, sizeof(TransformStruct)); + if (res != ResOK) + goto failAlloc; + transform = (Transform)p; + + transform->oldToNew = NULL; + transform->arena = arena; + transform->epoch = ArenaEpoch(arena); + transform->aborted = FALSE; + + transform->sig = TransformSig; + + AVERT(Transform, transform); + + res = TableCreate(&transform->oldToNew, + 0, /* no point guessing size before TransformAddOldNew */ + transformTableAlloc, + transformTableFree, + transform, + 0, 1); /* use invalid refs as special keys */ + if (res != ResOK) + goto failTable; + + *transformReturn = transform; + return ResOK; + +failTable: + ControlFree(arena, transform, sizeof(TransformStruct)); +failAlloc: + return res; +} + + +void TransformDestroy(Transform transform) +{ + Arena arena; + Table oldToNew; + + AVERT(Transform, transform); + + /* TODO: Log some transform statistics. */ + + /* Workaround bootstrap problem, see .check.boot */ + oldToNew = transform->oldToNew; + transform->oldToNew = NULL; + TableDestroy(oldToNew); + + arena = TransformArena(transform); + transform->sig = SigInvalid; + ControlFree(arena, transform, sizeof(TransformStruct)); +} + + +/* TransformArena -- return transform's arena + * + * Must be thread-safe as it is called outside the arena lock. See + * + */ + +Arena TransformArena(Transform transform) +{ + Arena arena; + AVER(TESTT(Transform, transform)); + arena = transform->arena; + AVER(TESTT(Arena, arena)); + return arena; +} + + +Res TransformAddOldNew(Transform transform, + Ref old_list[], + Ref new_list[], + Count count) +{ + Res res; + Index i; + Arena arena; + + AVERT(Transform, transform); + AVER(old_list != NULL); + AVER(new_list != NULL); + /* count: cannot check */ + + /* .assume.parked: If the mutator isn't adding references while the + arena is parked, we might need to access the client-provided + lists (old_list, new_list), using ArenaRead. Insisting on + parking keeps things simple. */ + arena = transform->arena; + AVER(ArenaGlobals(arena)->clamped); /* .assume.parked */ + AVER(arena->busyTraces == TraceSetEMPTY); /* .assume.parked */ + + res = TableGrow(transform->oldToNew, count); + if (res != ResOK) + return res; + + for (i = 0; i < count; ++i) { + if (old_list[i] == NULL) + continue; /* permitted, but no transform to do */ + if (old_list[i] == new_list[i]) + continue; /* ignore identity-transforms */ + + /* .old-white: Old refs must be in managed memory, because + transformFix is only reached when a reference is to something + in the condemned set. Other references are eliminated by + TraceFix, and we can't (currently) transformation of them. */ + { + Seg seg; + AVER(SegOfAddr(&seg, transform->arena, old_list[i])); + } + + res = TableDefine(transform->oldToNew, (Word)old_list[i], new_list[i]); + AVER(res != ResFAIL); /* It's a static error to add the same old twice. */ + if (res != ResOK) + return res; + } + + AVERT(Transform, transform); + + return ResOK; +} + + +/* TransformApply -- transform references on the heap */ + +static Res transformFix(Seg seg, ScanState ss, Ref *refIO) +{ + Ref ref; + Transform transform; + Res res; + + AVERT_CRITICAL(Seg, seg); + AVERT_CRITICAL(ScanState, ss); + AVER_CRITICAL(refIO != NULL); + + transform = ss->fixClosure; + AVERT_CRITICAL(Transform, transform); + + /* .aborted: If the transform has been aborted, drop through to + normal GC fix, making the transform a normal GC. */ + if (!transform->aborted) { + void *refNew; + + ref = *refIO; + + if (TableLookup(&refNew, transform->oldToNew, (Word)ref)) { + if (ss->rank == RankAMBIG) { + /* .rank-order: We rely on the fact that ambiguous references + are fixed first, so that no exact references have been + transformed yet. See design.mps.trace.rank. */ + transform->aborted = TRUE; + } else { + /* NOTE: We could fix refNew in the table before copying it, + since any summaries etc. collected in the scan state will still + apply when it's copied. That could save a few snap-outs. */ + *refIO = refNew; + } + } + } + + /* Now progress to a normal GC fix. */ + /* TODO: Make a clean interface to this kind of dynamic binding. */ + ss->fix = ss->arena->emergency ? SegFixEmergency : SegFix; + TRACE_SCAN_BEGIN(ss) { + res = TRACE_FIX12(ss, refIO); + } TRACE_SCAN_END(ss); + ss->fix = transformFix; + + return res; +} + + +static void transformCondemn(void *closure, Word old, void *value) +{ + Seg seg = NULL; /* suppress "may be used uninitialized" from GCC 11.3.0 */ + GenDesc gen; + Bool b; + Trace trace = closure; + + AVERT(Trace, trace); + UNUSED(value); + + /* Find segment containing old address. */ + b = SegOfAddr(&seg, trace->arena, (Ref)old); + AVER(b); /* should've been enforced by .old-white */ + + /* Condemn generation containing seg if not already condemned. */ + gen = PoolSegPoolGen(SegPool(seg), seg)->gen; + AVERT(GenDesc, gen); + if (RingIsSingle(&gen->trace[trace->ti].traceRing)) + GenDescStartTrace(gen, trace); +} + + +Res TransformApply(Bool *appliedReturn, Transform transform) +{ + Res res; + Arena arena; + Globals globals; + Trace trace; + double mortality; + + AVER(appliedReturn != NULL); + AVERT(Transform, transform); + + arena = TransformArena(transform); + + /* If there have been any flips since the transform was created, the old + and new pointers will be invalid, since they are not scanned as roots. + The client program must park the arena before applying the transform. */ + if (transform->epoch != ArenaEpoch(arena)) + return ResPARAM; + + globals = ArenaGlobals(arena); + AVERT(Globals, globals); + + /* .park: Parking the arena ensures that there is a trace available + and that no other traces are running, so that the tracer will + dispatch to transformFix correctly. See + impl.c.trace.fix.single. */ + ArenaPark(globals); + + res = TraceCreate(&trace, arena, TraceStartWhyEXTENSION); + AVER(res == ResOK); /* parking should make a trace available */ + if (res != ResOK) + return res; + + /* Condemn the generations containing the transform's old objects, + so that all references to them are scanned. */ + TraceCondemnStart(trace); + TableMap(transform->oldToNew, transformCondemn, trace); + res = TraceCondemnEnd(&mortality, trace); + if (res != ResOK) { + /* Nothing to transform. */ + TraceDestroyInit(trace); + goto done; + } + + trace->fix = transformFix; + trace->fixClosure = transform; + + res = TraceStart(trace, 1.0, 0.0); + AVER(res == ResOK); /* transformFix can't fail */ + + /* If transformFix during traceFlip found ambiguous references and + aborted the transform then the rest of the trace is just a normal + GC (see .aborted). Note that aborting a trace part-way through + is pretty much impossible without corrupting the mutator graph. + + We could optimise this safely at a later date if required, with:: + + if (transform->aborted) { + trace->fix = PoolFix; + trace->fixClosure = NULL; + } + */ + + /* Force the trace to complete now. */ + ArenaPark(globals); + +done: + *appliedReturn = !transform->aborted; + + return ResOK; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2011-2023 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/trans.h b/mps/code/trans.h new file mode 100644 index 00000000000..a572138601a --- /dev/null +++ b/mps/code/trans.h @@ -0,0 +1,63 @@ +/* trans.h: TRANSFORMS INTERFACE + * + * $Id$ + * Copyright 2011-2022 Ravenbrook Limited. See end of file for license. + */ + +#ifndef trans_h +#define trans_h + +#include "mpm.h" + + +typedef struct mps_transform_s *Transform; + +typedef struct OldNewStruct *OldNew; + +extern Res TransformCreate(Transform *transformReturn, Arena arena); + +extern Res TransformAddOldNew(Transform transform, + Ref old_list[], + Ref new_list[], + Count count); + +extern Res TransformApply(Bool *appliedReturn, Transform transform); + +extern void TransformDestroy(Transform transform); + +extern Bool TransformCheck(Transform transform); + +extern Arena TransformArena(Transform transform); + + +#endif /* trans_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2011-2022 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/tree.c b/mps/code/tree.c new file mode 100644 index 00000000000..3a57f7ed214 --- /dev/null +++ b/mps/code/tree.c @@ -0,0 +1,597 @@ +/* tree.c: BINARY TREE IMPLEMENTATION + * + * $Id$ + * Copyright (C) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * Simple binary trees with utilities, for use as building blocks. + * Keep it simple, like Rings (see ring.h). + * + * The performance requirements on tree implementation will depend on + * how each individual function is applied in the MPS. + * + * .note.stack: It's important that the MPS have a bounded stack size, + * and this is a problem for tree algorithms. Basically, we have to + * avoid recursion. . + */ + +#include "tree.h" +#include "mpm.h" + +SRCID(tree, "$Id$"); + + +Bool TreeCheck(Tree tree) +{ + if (tree != TreeEMPTY) { + CHECKL(tree != NULL); + CHECKL(tree->left == TreeEMPTY || tree->left != NULL); + CHECKL(tree->right == TreeEMPTY || tree->right != NULL); + } + return TRUE; +} + +Bool TreeCheckLeaf(Tree tree) +{ + CHECKL(TreeCheck(tree)); + CHECKL(tree != TreeEMPTY); + CHECKL(tree->left == TreeEMPTY); + CHECKL(tree->right == TreeEMPTY); + return TRUE; +} + + +/* TreeDebugCount -- count and check order of tree + * + * This function may be called from a debugger or temporarily inserted + * during development to check a tree's integrity. It may not be called + * from the production MPS because it uses indefinite stack depth. + * See .note.stack. + */ + +static Count TreeDebugCountBetween(Tree node, + TreeCompareFunction compare, + TreeKeyFunction key, + TreeKey min, TreeKey max) +{ + if (node == TreeEMPTY) + return 0; + AVERT(Tree, node); + AVER(min == NULL || compare(node, min) != CompareGREATER); + AVER(max == NULL || compare(node, max) != CompareLESS); + return TreeDebugCountBetween(TreeLeft(node), compare, key, min, key(node)) + + 1 + + TreeDebugCountBetween(TreeRight(node), compare, key, key(node), max); +} + +Count TreeDebugCount(Tree tree, TreeCompareFunction compare, + TreeKeyFunction key) +{ + AVERT(Tree, tree); + return TreeDebugCountBetween(tree, compare, key, NULL, NULL); +} + + +/* TreeFind -- search for a node matching the key + * + * If a matching node is found, sets *treeReturn to that node and returns + * CompareEQUAL. Otherwise returns values useful for inserting a node with + * the key. If the tree is empty, returns CompareEQUAL and sets *treeReturn + * to NULL. Otherwise, sets *treeReturn to a potential parent for the new + * node and returns CompareLESS if the new node should be its left child, + * or CompareGREATER for its right. + */ + +Compare TreeFind(Tree *treeReturn, Tree root, TreeKey key, + TreeCompareFunction compare) +{ + Tree node, parent; + Compare cmp = CompareEQUAL; + + AVERT_CRITICAL(Tree, root); + AVER_CRITICAL(treeReturn != NULL); + AVER_CRITICAL(FUNCHECK(compare)); + /* key is arbitrary */ + + parent = NULL; + node = root; + while (node != TreeEMPTY) { + parent = node; + cmp = compare(node, key); + switch (cmp) { + case CompareLESS: + node = node->left; + break; + case CompareEQUAL: + *treeReturn = node; + return cmp; + case CompareGREATER: + node = node->right; + break; + default: + NOTREACHED; + *treeReturn = NULL; + return cmp; + } + } + + *treeReturn = parent; + return cmp; +} + + +/* TreeFindNext -- search for node containing key, or next node + * + * If there is a node that is greater than key, set *treeReturn to that + * node and return TRUE. + * + * Otherwise, key is greater than all nodes in the tree, so leave + * *treeReturn unchanged and return FALSE. + */ + +Bool TreeFindNext(Tree *treeReturn, Tree root, TreeKey key, + TreeCompareFunction compare) +{ + Tree node, best = NULL; + Bool result = FALSE; + + AVERT(Tree, root); + AVER(treeReturn != NULL); + AVER(FUNCHECK(compare)); + /* key is arbitrary */ + + node = root; + while (node != TreeEMPTY) { + Compare cmp = compare(node, key); + switch (cmp) { + case CompareLESS: + best = node; + result = TRUE; + node = node->left; + break; + case CompareEQUAL: + case CompareGREATER: + node = node->right; + break; + default: + NOTREACHED; + return FALSE; + } + } + + *treeReturn = best; + return result; +} + + +/* TreeInsert -- insert a node into a tree + * + * If the key doesn't exist in the tree, inserts a node as a leaf of the + * tree, returning the resulting tree in *treeReturn, and returns TRUE. + * Otherwise, *treeReturn points to the existing matching node, the tree + * is not modified, and returns FALSE. + */ + +Bool TreeInsert(Tree *treeReturn, Tree root, Tree node, + TreeKey key, TreeCompareFunction compare) +{ + Tree parent; + Compare cmp; + + AVER(treeReturn != NULL); + AVERT(Tree, root); + AVER(TreeCheckLeaf(node)); + AVER(FUNCHECK(compare)); + /* key is arbitrary */ + + cmp = TreeFind(&parent, root, key, compare); + switch (cmp) { + case CompareLESS: + parent->left = node; + break; + case CompareEQUAL: + if (parent != NULL) { + *treeReturn = parent; + return FALSE; + } + AVER(root == TreeEMPTY); + root = node; + break; + case CompareGREATER: + parent->right = node; + break; + default: + NOTREACHED; + *treeReturn = NULL; + return FALSE; + } + + *treeReturn = root; + return TRUE; +} + + +#if 0 /* This code is currently not in use in the MPS */ + +/* TreeTraverseMorris -- traverse tree inorder in constant space + * + * The tree may not be accessed or modified during the traversal, and + * the traversal must complete in order to repair the tree. + * + * The visitor should return FALSE to terminate the traversal early, + * in which case FALSE is returned. + * + * TreeTraverse is generally superior if comparisons are cheap, but + * TreeTraverseMorris does not require any comparison function. + * + * + * + * Joseph M. Morris (1979). "Traversing Binary Trees Simply and Cheaply". + * Information Processing Letters 9:5 pp. 197-200. + */ + +Bool TreeTraverseMorris(Tree tree, TreeVisitor visit, + void *closure) +{ + Tree node; + Bool visiting = TRUE; + + AVERT(Tree, tree); + AVER(FUNCHECK(visit)); + /* closure arbitrary */ + + node = tree; + while (node != TreeEMPTY) { + if (node->left == TreeEMPTY) { + if (visiting) + visiting = visit(node, closure); + node = node->right; + } else { + Tree pre = node->left; + for (;;) { + if (pre->right == TreeEMPTY) { + pre->right = node; + node = node->left; + break; + } + if (pre->right == node) { + pre->right = TreeEMPTY; + if (visiting) + visiting = visit(node, closure); + else if (node == tree) + return FALSE; + node = node->right; + break; + } + pre = pre->right; + } + } + } + + return visiting; +} + +#endif /* not currently in use */ + + +/* TreeTraverse -- traverse tree in-order using pointer reversal + * + * The tree may not be accessed or modified during the traversal, and + * the traversal must complete in order to repair the tree. + * + * The visitor should return FALSE to terminate the traversal early, + * in which case FALSE is returned. + * + * TreeTraverseMorris is an alternative when no cheap comparison is available. + */ + +static Tree stepDownLeft(Tree node, Tree *parentIO) +{ + Tree parent = *parentIO; + Tree child = TreeLeft(node); + TreeSetLeft(node, parent); + *parentIO = node; + return child; +} + +static Tree stepDownRight(Tree node, Tree *parentIO) +{ + Tree parent = *parentIO; + Tree child = TreeRight(node); + TreeSetRight(node, parent); + *parentIO = node; + return child; +} + +static Tree stepUpRight(Tree node, Tree *parentIO) +{ + Tree parent = *parentIO; + Tree grandparent = TreeLeft(parent); + TreeSetLeft(parent, node); + *parentIO = grandparent; + return parent; +} + +static Tree stepUpLeft(Tree node, Tree *parentIO) +{ + Tree parent = *parentIO; + Tree grandparent = TreeRight(parent); + TreeSetRight(parent, node); + *parentIO = grandparent; + return parent; +} + +Bool TreeTraverse(Tree tree, + TreeCompareFunction compare, + TreeKeyFunction key, + TreeVisitor visit, void *closure) +{ + Tree parent, node; + + AVERT(Tree, tree); + AVER(FUNCHECK(visit)); + /* closure arbitrary */ + + parent = TreeEMPTY; + node = tree; + + if (node == TreeEMPTY) + return TRUE; + +down: + if (TreeHasLeft(node)) { + node = stepDownLeft(node, &parent); + AVER(compare(parent, key(node)) == CompareLESS); + goto down; + } + if (!visit(node, closure)) + goto abort; + if (TreeHasRight(node)) { + node = stepDownRight(node, &parent); + AVER(compare(parent, key(node)) != CompareLESS); + goto down; + } + +up: + if (parent == TreeEMPTY) + return TRUE; + if (compare(parent, key(node)) != CompareLESS) { + node = stepUpLeft(node, &parent); + goto up; + } + node = stepUpRight(node, &parent); + if (!visit(node, closure)) + goto abort; + if (!TreeHasRight(node)) + goto up; + node = stepDownRight(node, &parent); + goto down; + +abort: + if (parent == TreeEMPTY) + return FALSE; + if (compare(parent, key(node)) != CompareLESS) + node = stepUpLeft(node, &parent); + else + node = stepUpRight(node, &parent); + goto abort; +} + + +/* TreeRotateLeft -- Rotate right child edge of node + * + * Rotates node, right child of node, and left child of right + * child of node, leftwards in the order stated. Preserves tree + * ordering. + */ + +void TreeRotateLeft(Tree *treeIO) +{ + Tree tree, right; + + AVER(treeIO != NULL); + tree = *treeIO; + AVERT(Tree, tree); + right = TreeRight(tree); + AVERT(Tree, right); + + TreeSetRight(tree, TreeLeft(right)); + TreeSetLeft(right, tree); + + *treeIO = right; +} + + +/* TreeRotateRight -- Rotate left child edge of node + * + * Rotates node, left child of node, and right child of left + * child of node, leftwards in the order stated. Preserves tree + * ordering. + */ + +void TreeRotateRight(Tree *treeIO) +{ + Tree tree, left; + + AVER(treeIO != NULL); + tree = *treeIO; + AVERT(Tree, tree); + left = TreeLeft(tree); + AVERT(Tree, left); + + TreeSetLeft(*treeIO, TreeRight(left)); + TreeSetRight(left, *treeIO); + + *treeIO = left; +} + + +/* TreeReverseLeftSpine -- reverse the pointers on the left spine + * + * Descends the left spine of a tree, updating each node's left child + * to point to its parent instead. The root's left child is set to + * TreeEMPTY. Returns the leftmost child, or TreeEMPTY if the tree + * was empty. + */ + +Tree TreeReverseLeftSpine(Tree tree) +{ + Tree node, parent; + + AVERT(Tree, tree); + + parent = TreeEMPTY; + node = tree; + while (node != TreeEMPTY) { + Tree child = TreeLeft(node); + TreeSetLeft(node, parent); + parent = node; + node = child; + } + + return parent; +} + + +/* TreeReverseRightSpine -- reverse the pointers on the right spine + * + * Descends the right spine of a tree, updating each node's right child + * to point to its parent instead. The root's right child is set to + * TreeEMPTY. Returns the rightmost child or TreeEMPTY if the tree + * was empty. + */ + +Tree TreeReverseRightSpine(Tree tree) +{ + Tree node, parent; + + AVERT(Tree, tree); + + parent = TreeEMPTY; + node = tree; + while (node != TreeEMPTY) { + Tree child = TreeRight(node); + TreeSetRight(node, parent); + parent = node; + node = child; + } + + return parent; +} + + +/* TreeToVine -- unbalance a tree into a single right spine */ + +Count TreeToVine(Tree *link) +{ + Count count = 0; + + AVER(link != NULL); + AVERT(Tree, *link); + + while (*link != TreeEMPTY) { + while (TreeHasLeft(*link)) + TreeRotateRight(link); + link = &((*link)->right); + ++count; + } + + return count; +} + + +/* TreeBalance -- rebalance a tree + * + * Linear time, constant space rebalance. + * + * Quentin F. Stout and Bette L. Warren, + * "Tree Rebalancing in Optimal Time and Space", + * Communications of the ACM, Vol. 29, No. 9 (September 1986), p. 902-908 + */ + +void TreeBalance(Tree *treeIO) +{ + Count depth; + + AVER(treeIO != NULL); + AVERT(Tree, *treeIO); + + depth = TreeToVine(treeIO); + + if (depth > 2) { + Count n = depth - 1; + do { + Count m = n / 2, i; + Tree *link = treeIO; + for (i = 0; i < m; ++i) { + TreeRotateLeft(link); + link = &((*link)->right); + } + n = n - m - 1; + } while (n > 1); + } +} + + +/* TreeTraverseAndDelete -- traverse a tree while deleting nodes + * + * The visitor function must return TRUE to delete the current node, + * or FALSE to keep it. + * + * . + */ +void TreeTraverseAndDelete(Tree *treeIO, TreeVisitor visitor, + void *closure) +{ + Tree *treeref = treeIO; + + AVER(treeIO != NULL); + AVERT(Tree, *treeIO); + AVER(FUNCHECK(visitor)); + /* closure arbitrary */ + + TreeToVine(treeIO); + + while (*treeref != TreeEMPTY) { + Tree tree = *treeref; /* Current node. */ + Tree *nextref = &tree->right; /* Location of pointer to next node. */ + Tree next = *nextref; /* Next node. */ + if ((*visitor)(tree, closure)) { + /* Delete current node. */ + *treeref = next; + } else { + /* Keep current node. */ + treeref = nextref; + } + } + TreeBalance(treeIO); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/tree.h b/mps/code/tree.h new file mode 100644 index 00000000000..f8f5a9687ee --- /dev/null +++ b/mps/code/tree.h @@ -0,0 +1,173 @@ +/* tree.h: BINARY TREE HEADER + * + * $Id$ + * Copyright (C) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * Simple binary trees with utilities, for use as building blocks. + * Keep it simple, like Rings (see ring.h). + */ + +#ifndef tree_h +#define tree_h + +#include "check.h" +#include "mpmtypes.h" + + +/* TreeStruct -- binary tree structure + * + * The tree structure is used in a field in other structures in order + * to link them together in a binary tree. + */ + +typedef struct TreeStruct *Tree; +typedef struct TreeStruct { + Tree left, right; +} TreeStruct; + +typedef Res (*TreeDescribeFunction)(Tree tree, mps_lib_FILE *stream); + + +/* TreeKeyFunction and TreeCompareFunction -- ordered binary trees + * + * Binary trees are almost always ordered, and these types provide the + * abstraction for ordering. A TreeCompareFunction method returns + * whether a key is less than, equal to, or greater than the key in a + * tree node. A TreeKeyFunction extracts a key from a node, depending + * on how TreeStruct is embedded within its parent structure. + */ + +typedef void *TreeKey; +typedef Compare (*TreeCompareFunction)(Tree tree, TreeKey key); +typedef TreeKey (*TreeKeyFunction)(Tree tree); + + +/* When storing Addrs in a tree, it is fastest to cast the Addr + * directly to a TreeKey. This assumes that Addr and TreeKey are + * compatible, possibly breaking . On an exotic + * platform where the types are not convertible, take the address of + * the variable in TreeKeyOfAddrVar, and dereference the address in + * AddrOfTreeKey. + */ +#define TreeKeyOfAddrVar(var) ((TreeKey)(var)) +#define AddrOfTreeKey(key) ((Addr)(key)) + + +/* TreeEMPTY -- the empty tree + * + * TreeEMPTY is the tree with no nodes, and hence unable to satisfy its + * olfactory senses. Empty trees should not be represented with NULL, + * as this is ambiguous. However, TreeEMPTY is in fact a null pointer for + * performance. To check whether you have it right, try temporarily + * defining TreeEMPTY to (Tree)1 or similar. + */ + +#define TreeEMPTY ((Tree)0) + + +extern Bool TreeCheck(Tree tree); +extern Bool TreeCheckLeaf(Tree tree); +extern Count TreeDebugCount(Tree tree, TreeCompareFunction compare, + TreeKeyFunction key); + +#define TreeInit(tree) \ + BEGIN \ + Tree _tree = (tree); \ + AVER(_tree != NULL); \ + _tree->left = TreeEMPTY; \ + _tree->right = TreeEMPTY; \ + AVERT(Tree, _tree); \ + END + +#define TreeFinish(tree) \ + BEGIN \ + Tree _tree = (tree); \ + AVERT(Tree, _tree); \ + END + +#define TREE_ELT(type, field, node) \ + PARENT(type ## Struct, field, node) + +#define TreeLeft(tree) RVALUE((tree)->left) + +#define TreeRight(tree) RVALUE((tree)->right) + +#define TreeSetLeft(tree, child) \ + BEGIN \ + (tree)->left = (child); \ + END + +#define TreeSetRight(tree, child) \ + BEGIN \ + (tree)->right = (child); \ + END + +#define TreeClearLeft(tree) \ + BEGIN \ + (tree)->left = TreeEMPTY; \ + END + +#define TreeClearRight(tree) \ + BEGIN \ + (tree)->right = TreeEMPTY; \ + END + +#define TreeHasLeft(tree) (TreeLeft(tree) != TreeEMPTY) +#define TreeHasRight(tree) (TreeRight(tree) != TreeEMPTY) + +extern Compare TreeFind(Tree *treeReturn, Tree root, + TreeKey key, TreeCompareFunction compare); +extern Bool TreeFindNext(Tree *treeReturn, Tree root, + TreeKey key, TreeCompareFunction compare); + +extern Bool TreeInsert(Tree *treeReturn, Tree root, Tree node, + TreeKey key, TreeCompareFunction compare); + +typedef Bool TreeVisitor(Tree tree, void *closure); +extern Bool TreeTraverse(Tree tree, + TreeCompareFunction compare, + TreeKeyFunction key, + TreeVisitor visit, void *closure); +extern Bool TreeTraverseMorris(Tree tree, TreeVisitor visit, + void *closure); + +extern void TreeRotateLeft(Tree *nodeIO); +extern void TreeRotateRight(Tree *nodeIO); +extern Tree TreeReverseLeftSpine(Tree tree); +extern Tree TreeReverseRightSpine(Tree tree); +extern Count TreeToVine(Tree *treeIO); +extern void TreeBalance(Tree *treeIO); + +extern void TreeTraverseAndDelete(Tree *treeIO, TreeVisitor visitor, + void *closure); + +#endif /* tree_h */ + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/version.c b/mps/code/version.c new file mode 100644 index 00000000000..ce9dbb619cc --- /dev/null +++ b/mps/code/version.c @@ -0,0 +1,103 @@ +/* version.c: VERSION INSPECTION + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. + * See end of file for license. + * + * PURPOSE + * + * The purpose of this module is to provide a means by which the + * version of the MM library being used can be determined. + * + * DESIGN + * + * .design: See , but -- to let you in on a + * secret -- it works by declaring a string with all the necessary info + * in. + */ + +#include "mpm.h" + +SRCID(version, "$Id$"); + + +/* MPS_RELEASE -- the release name + * + * .release.use: This macro is used (i) to prepare MPSVersionString + * (see below) and so identify any binary built using this source + * file; (ii) by the Sphinx documentation (see manual/source/conf.py) + * to identify the documentation; (iii) by the Autoconf script (see + * configure.ac) to identify the configure script. + * + * .release.meaning: This names the next release that is expected to + * be built from these sources. + * + * .release.procedure: After making a version branch, update this + * string in the master sources to name the next version. After making + * a point release, update this string to name the next point release. + * + * .release.old: before 2006-02-01 the style was "release.epcore.chub". + */ + +#define MPS_RELEASE "release/1.118.0" + + +/* MPSCopyrightNotice -- copyright notice for the binary + * + * .copyright.year: This one should have the current year in it + * (assuming we've made any substantial changes to the library this year). + */ + +extern char MPSCopyrightNotice[]; +char MPSCopyrightNotice[] = + "Portions copyright (c) 2010-2020 Ravenbrook Limited and Global Graphics Software."; + + +/* MPSVersion -- return version string + * + * The value of MPSVersion is a declared object comprising the + * concatenation of all the version info. The "@(#)" prefix is the + * convention used by the BSD Unix command what(1); see also + * . + */ + +extern char MPSVersionString[]; +char MPSVersionString[] = + "@(#)Ravenbrook MPS, " + "product." MPS_PROD_STRING ", " MPS_RELEASE ", platform." MPS_PF_STRING + ", variety." MPS_VARIETY_STRING ", compiled on " __DATE__ " " __TIME__; + +char *MPSVersion(void) +{ + return MPSVersionString; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/vm.c b/mps/code/vm.c new file mode 100644 index 00000000000..107e3dc6326 --- /dev/null +++ b/mps/code/vm.c @@ -0,0 +1,121 @@ +/* vm.c: VIRTUAL MEMORY IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + * + * This is the portable part of the virtual memory implementation. + */ + +#include "mpm.h" +#include "vm.h" + +SRCID(vm, "$Id$"); + + +/* VMCheck -- check a VM structure */ + +Bool VMCheck(VM vm) +{ + CHECKS(VM, vm); + CHECKL(vm->base != (Addr)0); + CHECKL(vm->limit != (Addr)0); + CHECKL(vm->base < vm->limit); + CHECKL(ArenaGrainSizeCheck(vm->pageSize)); + CHECKL(AddrIsAligned(vm->base, vm->pageSize)); + CHECKL(AddrIsAligned(vm->limit, vm->pageSize)); + CHECKL(vm->block != NULL); + CHECKL((Addr)vm->block <= vm->base); + CHECKL(vm->mapped <= vm->reserved); + return TRUE; +} + + +/* VMPageSize -- return the page size cached in the VM */ + +Size (VMPageSize)(VM vm) +{ + AVERT(VM, vm); + + return VMPageSize(vm); +} + + +/* VMBase -- return the base address of the memory reserved */ + +Addr (VMBase)(VM vm) +{ + AVERT(VM, vm); + + return VMBase(vm); +} + + +/* VMLimit -- return the limit address of the memory reserved */ + +Addr (VMLimit)(VM vm) +{ + AVERT(VM, vm); + + return VMLimit(vm); +} + + +/* VMReserved -- return the amount of address space reserved */ + +Size (VMReserved)(VM vm) +{ + AVERT(VM, vm); + + return VMReserved(vm); +} + + +/* VMMapped -- return the amount of memory actually mapped */ + +Size (VMMapped)(VM vm) +{ + AVERT(VM, vm); + + return VMMapped(vm); +} + + +/* VMCopy -- copy VM descriptor */ + +void VMCopy(VM dest, VM src) +{ + AVER(dest != NULL); + AVERT(VM, src); + + (void)mps_lib_memcpy(dest, src, sizeof(VMStruct)); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/vm.h b/mps/code/vm.h new file mode 100644 index 00000000000..0d3d5b72360 --- /dev/null +++ b/mps/code/vm.h @@ -0,0 +1,83 @@ +/* vm.h: VIRTUAL MEMORY INTERFACE + * + * $Id$ + * Copyright (c) 2014-2020 Ravenbrook Limited. See end of file for license. + */ + +#ifndef vm_h +#define vm_h + +#include "mpmtypes.h" + + +/* VMStruct -- virtual memory structure + * + * Unlike most other datatypes we permit this structure to be moved + * around in memory, and in particular, allocated temporarily on the + * stack, to help with bootstrapping. Look for uses of VMCopy. + */ + +#define VMSig ((Sig)0x519B3999) /* SIGnature VM */ + +typedef struct VMStruct { + Sig sig; /* design.mps.sig.field */ + Size pageSize; /* operating system page size */ + void *block; /* unaligned base of mmap'd memory */ + Addr base, limit; /* aligned boundaries of reserved space */ + Size reserved; /* total reserved address space */ + Size mapped; /* total mapped memory */ +} VMStruct; + + +#define VMPageSize(vm) RVALUE((vm)->pageSize) +#define VMBase(vm) RVALUE((vm)->base) +#define VMLimit(vm) RVALUE((vm)->limit) +#define VMReserved(vm) RVALUE((vm)->reserved) +#define VMMapped(vm) RVALUE((vm)->mapped) + +extern Size PageSize(void); +extern Size (VMPageSize)(VM vm); +extern Bool VMCheck(VM vm); +extern Res VMParamFromArgs(void *params, size_t paramSize, ArgList args); +extern Res VMInit(VM vmReturn, Size size, Size grainSize, void *params); +extern void VMFinish(VM vm); +extern Addr (VMBase)(VM vm); +extern Addr (VMLimit)(VM vm); +extern Res VMMap(VM vm, Addr base, Addr limit); +extern void VMUnmap(VM vm, Addr base, Addr limit); +extern Size (VMReserved)(VM vm); +extern Size (VMMapped)(VM vm); +extern void VMCopy(VM dest, VM src); + + +#endif /* vm_h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/vman.c b/mps/code/vman.c new file mode 100644 index 00000000000..fba4afe7d5a --- /dev/null +++ b/mps/code/vman.c @@ -0,0 +1,177 @@ +/* vman.c: ANSI VM: MALLOC-BASED PSEUDO MEMORY MAPPING + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "vm.h" + +#include /* for malloc and free */ + +SRCID(vman, "$Id$"); + + +/* PageSize -- return the page size */ + +Size PageSize(void) +{ + return VMAN_PAGE_SIZE; +} + + +Res VMParamFromArgs(void *params, size_t paramSize, ArgList args) +{ + AVER(params != NULL); + AVERT(ArgList, args); + UNUSED(paramSize); + return ResOK; +} + + +/* VMInit -- reserve some virtual address space, and create a VM structure */ + +Res VMInit(VM vm, Size size, Size grainSize, void *params) +{ + void *vbase; + Size pageSize, reserved; + + AVER(vm != NULL); + AVERT(ArenaGrainSize, grainSize); + AVER(size > 0); + AVER(params != NULL); + + pageSize = PageSize(); + + /* Grains must consist of whole pages. */ + AVER(grainSize % pageSize == 0); + + /* Check that the rounded-up sizes will fit in a Size. */ + size = SizeRoundUp(size, grainSize); + if (size < grainSize || size > (Size)(size_t)-1) + return ResRESOURCE; + /* Note that because we add a whole grainSize here (not grainSize - + * pageSize), we are not in danger of overflowing vm->limit even if + * malloc were perverse enough to give us a block at the end of + * memory. Compare vmix.c#.assume.not-last. */ + reserved = size + grainSize; + if (reserved < grainSize || reserved > (Size)(size_t)-1) + return ResRESOURCE; + + vbase = malloc((size_t)reserved); + if (vbase == NULL) + return ResMEMORY; + (void)mps_lib_memset(vbase, VMJunkBYTE, reserved); + + vm->pageSize = pageSize; + vm->block = vbase; + vm->base = AddrAlignUp(vbase, grainSize); + vm->limit = AddrAdd(vm->base, size); + AVER(vm->base < vm->limit); /* can't overflow, as discussed above */ + AVER(vm->limit < AddrAdd((Addr)vm->block, reserved)); + vm->reserved = reserved; + vm->mapped = (Size)0; + + vm->sig = VMSig; + AVERT(VM, vm); + + EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm)); + return ResOK; +} + + +/* VMFinish -- release all address space and finish VM structure */ + +void VMFinish(VM vm) +{ + AVERT(VM, vm); + /* Descriptor must not be stored inside its own VM at this point. */ + AVER(PointerAdd(vm, sizeof *vm) <= vm->block + || PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm); + /* All address space must have been unmapped. */ + AVER(VMMapped(vm) == (Size)0); + + EVENT1(VMFinish, vm); + + vm->sig = SigInvalid; + + (void)mps_lib_memset(vm->block, VMJunkBYTE, vm->reserved); + free(vm->block); +} + + +/* VMMap -- map the given range of memory */ + +Res VMMap(VM vm, Addr base, Addr limit) +{ + Size size; + + AVER(base != (Addr)0); + AVER(VMBase(vm) <= base); + AVER(base < limit); + AVER(limit <= VMLimit(vm)); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + + size = AddrOffset(base, limit); + (void)mps_lib_memset((void *)base, VMJunkBYTE, size); + + vm->mapped += size; + AVER(VMMapped(vm) <= VMReserved(vm)); + + EVENT3(VMMap, vm, base, limit); + return ResOK; +} + + +/* VMUnmap -- unmap the given range of memory */ + +void VMUnmap(VM vm, Addr base, Addr limit) +{ + Size size; + + AVER(base != (Addr)0); + AVER(VMBase(vm) <= base); + AVER(base < limit); + AVER(limit <= VMLimit(vm)); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + + size = AddrOffset(base, limit); + AVER(VMMapped(vm) >= size); + + (void)mps_lib_memset((void *)base, VMJunkBYTE, size); + vm->mapped -= size; + + EVENT3(VMUnmap, vm, base, limit); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/vmix.c b/mps/code/vmix.c new file mode 100644 index 00000000000..4bdd0caefad --- /dev/null +++ b/mps/code/vmix.c @@ -0,0 +1,270 @@ +/* vmix.c: VIRTUAL MEMORY MAPPING (POSIX) + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .purpose: This is the implementation of the virtual memory mapping + * interface (vm.h) for Unix-like operating systems. It was created + * by copying vmfr.c (the FreeBSD) implementation (as that seemed to + * use the most standards conforming interfaces). vmfr.c was itself + * copied from vli.c (Linux) which was itself copied from vmo1.c (OSF/1 + * / DIGITAL UNIX / Tru64). + * + * .deployed: Currently used on Darwin (macOS) and FreeBSD. + * + * .design: . .design.mmap: mmap(2) is used to + * reserve address space by creating a mapping with page access none. + * mmap(2) is used to map pages onto store by creating a copy-on-write + * (MAP_PRIVATE) mapping with the flag MAP_ANON. + * + * .non-standard: Note that the MAP_ANON flag is non-standard; it is + * available on Darwin and FreeBSD. .non-standard.linux: Linux seems + * to use MAP_ANONYMOUS instead. Some Linux systems make MAP_ANON + * available and deprecate it. .non-standard.sesame: On Linux getting + * a definition of MAP_ANON requires a _BSD_SOURCE to be defined prior + * to ; see config.h. + * + * .assume.not-last: The implementation of VMInit assumes that + * mmap() will not choose a region which contains the last page + * in the address space, so that the limit of the mapped area + * is representable. + * + * .assume.mmap.err: ENOMEM is the only error we really expect to + * get from mmap. The others are either caused by invalid params + * or features we don't use. See mmap(2) for details. + * + * .remap: Possibly this should use mremap to reduce the number of + * distinct mappings. According to our current testing, it doesn't + * seem to be a problem. + */ + +#include "mpm.h" + +#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC) +#error "vmix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC" +#endif + +#include "vm.h" + +#include /* errno */ +#include /* sig_atomic_t */ +#include /* see .feature.li in config.h */ +#include /* mmap, munmap */ +#include /* sysconf, _SC_PAGESIZE */ + +SRCID(vmix, "$Id$"); + + +/* PageSize -- return operating system page size */ + +Size PageSize(void) +{ + long pageSize; + + /* Find out the operating system page size + (see design.mps.vm.impl.ix.page.size) */ + pageSize = sysconf(_SC_PAGESIZE); + + /* Check the page size will fit in a Size. */ + AVER((unsigned long)pageSize <= (unsigned long)(Size)-1); + + return (Size)pageSize; +} + + +Res VMParamFromArgs(void *params, size_t paramSize, ArgList args) +{ + AVER(params != NULL); + AVERT(ArgList, args); + UNUSED(paramSize); + return ResOK; +} + + +/* VMInit -- reserve some virtual address space, and create a VM structure */ + +Res VMInit(VM vm, Size size, Size grainSize, void *params) +{ + Size pageSize, reserved; + void *vbase; + + AVER(vm != NULL); + AVERT(ArenaGrainSize, grainSize); + AVER(size > 0); + AVER(params != NULL); + + pageSize = PageSize(); + + /* Grains must consist of whole pages. */ + AVER(grainSize % pageSize == 0); + + /* Check that the rounded-up sizes will fit in a Size. */ + size = SizeRoundUp(size, grainSize); + if (size < grainSize || size > (Size)(size_t)-1) + return ResRESOURCE; + reserved = size + grainSize - pageSize; + if (reserved < grainSize || reserved > (Size)(size_t)-1) + return ResRESOURCE; + + /* See .assume.not-last. */ + vbase = mmap(0, reserved, + PROT_NONE, MAP_ANON | MAP_PRIVATE, + -1, 0); + /* On Darwin the MAP_FAILED return value is not documented, but does + * work. MAP_FAILED _is_ documented by POSIX. + */ + if (vbase == MAP_FAILED) { + int e = errno; + AVER(e == ENOMEM); /* .assume.mmap.err */ + return ResRESOURCE; + } + + vm->pageSize = pageSize; + vm->block = vbase; + vm->base = AddrAlignUp(vbase, grainSize); + vm->limit = AddrAdd(vm->base, size); + AVER(vm->base < vm->limit); /* .assume.not-last */ + AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved)); + vm->reserved = reserved; + vm->mapped = 0; + + vm->sig = VMSig; + AVERT(VM, vm); + + EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm)); + return ResOK; +} + + +/* VMFinish -- release all address space and finish VM structure */ + +void VMFinish(VM vm) +{ + int r; + + AVERT(VM, vm); + /* Descriptor must not be stored inside its own VM at this point. */ + AVER(PointerAdd(vm, sizeof *vm) <= vm->block + || PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm); + /* All address space must have been unmapped. */ + AVER(VMMapped(vm) == (Size)0); + + EVENT1(VMFinish, vm); + + vm->sig = SigInvalid; + + r = munmap(vm->block, vm->reserved); + AVER(r == 0); +} + + +/* Value to use for protection of newly allocated pages. + * We use a global variable and not a constant so that we can clear + * the executable flag from future requests if Apple Hardened Runtime + * is detected. See for details. */ + +static sig_atomic_t vm_prot = PROT_READ | PROT_WRITE | PROT_EXEC; + + +/* VMMap -- map the given range of memory */ + +Res VMMap(VM vm, Addr base, Addr limit) +{ + Size size; + void *result; + + AVERT(VM, vm); + AVER(sizeof(void *) == sizeof(Addr)); + AVER(base < limit); + AVER(base >= VMBase(vm)); + AVER(limit <= VMLimit(vm)); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + + size = AddrOffset(base, limit); + + result = mmap((void *)base, (size_t)size, (int)vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + if (MAYBE_HARDENED_RUNTIME && result == MAP_FAILED && errno == EACCES + && (vm_prot & PROT_WRITE) && (vm_prot & PROT_EXEC)) + { + /* Apple Hardened Runtime is enabled, so that we cannot have + * memory that is simultaneously writable and executable. Handle + * this by dropping the executable part of the request. See + * for details. */ + vm_prot = PROT_READ | PROT_WRITE; + result = mmap((void *)base, (size_t)size, vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + } + if (result == MAP_FAILED) { + AVER(errno == ENOMEM); /* .assume.mmap.err */ + return ResMEMORY; + } + + vm->mapped += size; + AVER(VMMapped(vm) <= VMReserved(vm)); + + EVENT3(VMMap, vm, base, limit); + return ResOK; +} + + +/* VMUnmap -- unmap the given range of memory */ + +void VMUnmap(VM vm, Addr base, Addr limit) +{ + Size size; + void *addr; + + AVERT(VM, vm); + AVER(base < limit); + AVER(base >= VMBase(vm)); + AVER(limit <= VMLimit(vm)); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + + size = AddrOffset(base, limit); + AVER(size <= VMMapped(vm)); + + /* see */ + addr = mmap((void *)base, (size_t)size, + PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + AVER(addr == (void *)base); + + vm->mapped -= size; + + EVENT3(VMUnmap, vm, base, limit); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/vmw3.c b/mps/code/vmw3.c new file mode 100644 index 00000000000..de12e0bd2a9 --- /dev/null +++ b/mps/code/vmw3.c @@ -0,0 +1,259 @@ +/* vmw3.c: VIRTUAL MEMORY MAPPING FOR WIN32 + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * .design: . + * + * .purpose: This is the implementation of the virtual memory mapping + * interface (vm.h) for Win32s. + * + * The documentation for Win32 used is the "Win32 Programmer's Reference" + * provided with Microsoft Visual C++ 2.0. + * + * VirtualAlloc is used to reserve address space and to "commit" (map) + * address ranges onto storage. VirtualFree is used to release and + * "decommit" (unmap) pages. These functions are documented in the + * Win32 SDK help, under System Services/Memory Management. + * + * .assume.free.success: We assume that VirtualFree will never return + * an error; this is because we always pass in legal parameters + * (hopefully). + * + * .assume.not-last: We assume that VirtualAlloc will never return + * a block of memory that occupies the last page in memory, so + * that limit is representable and bigger than base. + * + * .assume.lpvoid-addr: We assume that the windows type LPVOID and + * the MM type Addr are assignment-compatible. + * + * .assume.sysalign: We assume that the page size on the system + * is a power of two. + * + * Notes + * 1. GetSystemInfo returns a thing called szAllocationGranularity + * the purpose of which is unclear but which might affect the + * reservation of address space. Experimentally, it does not. + * Microsoft's documentation is extremely unclear on this point. + * richard 1995-02-15 + */ + +#include "mpm.h" + +#if !defined(MPS_OS_W3) +#error "vmw3.c is specific to MPS_OS_W3" +#endif + +#include "mpswin.h" +#include "vm.h" + +SRCID(vmw3, "$Id$"); + + +/* PageSize -- return the operating system page size */ + +Size PageSize(void) +{ + SYSTEM_INFO si; + + /* Find out the page size from the OS */ + GetSystemInfo(&si); + + /* Check the page size will fit in a Size. */ + AVER(si.dwPageSize <= (Size)(SIZE_T)-1); + + return (Size)si.dwPageSize; +} + + +typedef struct VMParamsStruct { + /* TODO: Add sig and check with AVERT in VMInit and CHECKD in + VMArenaCheck. */ + Bool topDown; +} VMParamsStruct, *VMParams; + +static const VMParamsStruct vmParamsDefaults = { + /* .topDown = */ FALSE, +}; + +Res VMParamFromArgs(void *params, size_t paramSize, ArgList args) +{ + VMParams vmParams; + ArgStruct arg; + AVER(params != NULL); + AVERT(ArgList, args); + AVER(paramSize >= sizeof(VMParamsStruct)); + UNUSED(paramSize); + vmParams = (VMParams)params; + memcpy(vmParams, &vmParamsDefaults, sizeof(VMParamsStruct)); + if (ArgPick(&arg, args, MPS_KEY_VMW3_TOP_DOWN)) + vmParams->topDown = arg.val.b; + return ResOK; +} + + +/* VMInit -- reserve some virtual address space, and create a VM structure */ + +Res VMInit(VM vm, Size size, Size grainSize, void *params) +{ + LPVOID vbase; + Size pageSize, reserved; + VMParams vmParams = params; + + AVER(vm != NULL); + AVERT(ArenaGrainSize, grainSize); + AVER(size > 0); + AVER(params != NULL); + + AVER(COMPATTYPE(LPVOID, Addr)); /* .assume.lpvoid-addr */ + AVER(COMPATTYPE(SIZE_T, Size)); + + pageSize = PageSize(); + + /* Grains must consist of whole pages. */ + AVER(grainSize % pageSize == 0); + + /* Check that the rounded-up sizes will fit in a Size. */ + size = SizeRoundUp(size, grainSize); + if (size < grainSize || size > (Size)(SIZE_T)-1) + return ResRESOURCE; + reserved = size + grainSize - pageSize; + if (reserved < grainSize || reserved > (Size)(SIZE_T)-1) + return ResRESOURCE; + + /* Allocate the address space. */ + vbase = VirtualAlloc(NULL, + reserved, + vmParams->topDown ? + MEM_RESERVE | MEM_TOP_DOWN : + MEM_RESERVE, + PAGE_NOACCESS); + if (vbase == NULL) + return ResRESOURCE; + + AVER(AddrIsAligned(vbase, pageSize)); + + vm->pageSize = pageSize; + vm->block = vbase; + vm->base = AddrAlignUp(vbase, grainSize); + vm->limit = AddrAdd(vm->base, size); + AVER(vm->base < vm->limit); /* .assume.not-last */ + AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved)); + vm->reserved = reserved; + vm->mapped = 0; + + vm->sig = VMSig; + AVERT(VM, vm); + + EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm)); + return ResOK; +} + + +/* VMFinish -- release address space and finish the VM structure */ + +void VMFinish(VM vm) +{ + BOOL b; + + AVERT(VM, vm); + /* Descriptor must not be stored inside its own VM at this point. */ + AVER(PointerAdd(vm, sizeof *vm) <= vm->block + || PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm); + /* All address space must have been unmapped. */ + AVER(VMMapped(vm) == (Size)0); + + EVENT1(VMFinish, vm); + + vm->sig = SigInvalid; + + b = VirtualFree((LPVOID)vm->block, (SIZE_T)0, MEM_RELEASE); + AVER(b != 0); +} + + +/* VMMap -- map the given range of memory */ + +Res VMMap(VM vm, Addr base, Addr limit) +{ + LPVOID b; + + AVERT(VM, vm); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + AVER(VMBase(vm) <= base); + AVER(base < limit); + AVER(limit <= VMLimit(vm)); + + /* .improve.query-map: We could check that the pages we are about to + * map are unmapped using VirtualQuery. */ + + b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit), + MEM_COMMIT, PAGE_EXECUTE_READWRITE); + if (b == NULL) + return ResMEMORY; + AVER((Addr)b == base); /* base should've been aligned */ + + vm->mapped += AddrOffset(base, limit); + AVER(VMMapped(vm) <= VMReserved(vm)); + + EVENT3(VMMap, vm, base, limit); + return ResOK; +} + + +/* VMUnmap -- unmap the given range of memory */ + +void VMUnmap(VM vm, Addr base, Addr limit) +{ + BOOL b; + Size size; + + AVERT(VM, vm); + AVER(AddrIsAligned(base, vm->pageSize)); + AVER(AddrIsAligned(limit, vm->pageSize)); + AVER(VMBase(vm) <= base); + AVER(base < limit); + AVER(limit <= VMLimit(vm)); + + size = AddrOffset(base, limit); + AVER(size <= VMMapped(vm)); + + /* .improve.query-unmap: Could check that the pages we are about */ + /* to unmap are mapped, using VirtualQuery. */ + b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT); + AVER(b != 0); /* .assume.free.success */ + vm->mapped -= size; + + EVENT3(VMUnmap, vm, base, limit); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/w3i3mv.nmk b/mps/code/w3i3mv.nmk new file mode 100644 index 00000000000..891df209616 --- /dev/null +++ b/mps/code/w3i3mv.nmk @@ -0,0 +1,51 @@ +# w3i3mv.nmk: WINDOWS (IA-32) NMAKE FILE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = w3i3mv + +MPMPF = \ + [lockw3] \ + [mpsiw3] \ + [prmci3] \ + [prmcw3] \ + [prmcw3i3] \ + [protw3] \ + [spw3i3] \ + [thw3] \ + [vmw3] + +!INCLUDE commpre.nmk +!INCLUDE mv.nmk +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/w3i3pc.nmk b/mps/code/w3i3pc.nmk new file mode 100644 index 00000000000..c0bf15bf20b --- /dev/null +++ b/mps/code/w3i3pc.nmk @@ -0,0 +1,51 @@ +# w3i3pc.nmk: WINDOWS (IA-32) NMAKE FILE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = w3i3pc + +MPMPF = \ + [lockw3] \ + [mpsiw3] \ + [prmci3] \ + [prmcw3] \ + [prmcw3i3] \ + [protw3] \ + [spw3i3] \ + [thw3] \ + [vmw3] + +!INCLUDE commpre.nmk +!INCLUDE pc.nmk +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/w3i6mv.bat b/mps/code/w3i6mv.bat new file mode 100644 index 00000000000..9dbe8b8bac6 --- /dev/null +++ b/mps/code/w3i6mv.bat @@ -0,0 +1,10 @@ +@rem w3i6mv.bat -- set up and invoke build from raw CMD on Windows +@rem This can be invoked from a plain CMD from the MPS workspace +@rem like this:: +@rem code\w3i6mv.bat +@rem or from Git Bash in Travis CI build machines like this:: +@rem MSYS2_ARG_CONV_EXCL='*' cmd /k 'code\w3i6mv.bat' +@rem which is how it's invoked from .travis.yml +call "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64 +cd code +nmake /f w3i6mv.nmk diff --git a/mps/code/w3i6mv.nmk b/mps/code/w3i6mv.nmk new file mode 100644 index 00000000000..f3fe4753389 --- /dev/null +++ b/mps/code/w3i6mv.nmk @@ -0,0 +1,51 @@ +# w3i6mv.nmk: WINDOWS (x86-64) NMAKE FILE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = w3i6mv + +MPMPF = \ + [lockw3] \ + [mpsiw3] \ + [prmci6] \ + [prmcw3] \ + [prmcw3i6] \ + [protw3] \ + [spw3i6] \ + [thw3] \ + [vmw3] + +!INCLUDE commpre.nmk +!INCLUDE mv.nmk +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/w3i6pc.nmk b/mps/code/w3i6pc.nmk new file mode 100644 index 00000000000..fcf5ebfe4e1 --- /dev/null +++ b/mps/code/w3i6pc.nmk @@ -0,0 +1,55 @@ +# -*- makefile -*- +# +# w3i6pc.nmk: NMAKE FILE FOR WINDOWS/x64/PELLES C +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + +PFM = w3i6pc + +CFLAGSTARGETPRE = /Tamd64-coff + +MPMPF = \ + [lockw3] \ + [mpsiw3] \ + [prmci6] \ + [prmcw3] \ + [prmcw3i6] \ + [protw3] \ + [spw3i6] \ + [thw3] \ + [vmw3] + +!INCLUDE commpre.nmk +!INCLUDE pc.nmk +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/w3mv/.gitignore b/mps/code/w3mv/.gitignore new file mode 100755 index 00000000000..81b5d6c8620 --- /dev/null +++ b/mps/code/w3mv/.gitignore @@ -0,0 +1,10 @@ +# code/w3mv/.p4ignore -- Perforce files to ignore from Visual Studio +# $Id$ +# Subproject build results +*/Debug +*/Release +# Temporary user preferences +*.vcxproj.user +# Mystery rubbish +*.sdf +*.opensdf diff --git a/mps/code/w3mv/.p4ignore b/mps/code/w3mv/.p4ignore new file mode 120000 index 00000000000..3e4e48b0b5f --- /dev/null +++ b/mps/code/w3mv/.p4ignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/mps/code/w3mv/amcss/amcss.vcxproj b/mps/code/w3mv/amcss/amcss.vcxproj new file mode 100755 index 00000000000..e263332de92 --- /dev/null +++ b/mps/code/w3mv/amcss/amcss.vcxproj @@ -0,0 +1,153 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE} + Win32Proj + amcss + + + + Application + true + Unicode + + + Application + true + Unicode + + + Application + false + true + Unicode + + + Application + false + true + Unicode + + + + + + + + + + + + + + + + + + + true + + + true + + + false + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + + + + + + {339b244a-c76f-4663-a39d-fa90b97f5381} + + + + + + \ No newline at end of file diff --git a/mps/code/w3mv/eventcnv/eventcnv.vcxproj b/mps/code/w3mv/eventcnv/eventcnv.vcxproj new file mode 100755 index 00000000000..fdd75edade6 --- /dev/null +++ b/mps/code/w3mv/eventcnv/eventcnv.vcxproj @@ -0,0 +1,148 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {945E1C40-AADC-4F3D-B59E-3028338805A5} + Win32Proj + eventcnv + + + + Application + true + Unicode + + + Application + true + Unicode + + + Application + false + true + Unicode + + + Application + false + true + Unicode + + + + + + + + + + + + + + + + + + + true + + + true + + + false + + + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + + + Console + true + true + true + + + + + + + + {339b244a-c76f-4663-a39d-fa90b97f5381} + + + + + + diff --git a/mps/code/w3mv/mps.sln b/mps/code/w3mv/mps.sln new file mode 100755 index 00000000000..c1c1c78ccb2 --- /dev/null +++ b/mps/code/w3mv/mps.sln @@ -0,0 +1,44 @@ + +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual Studio 2010 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "amcss", "amcss\amcss.vcxproj", "{8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mps", "mps\mps.vcxproj", "{339B244A-C76F-4663-A39D-FA90B97F5381}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "eventcnv", "eventcnv\eventcnv.vcxproj", "{945E1C40-AADC-4F3D-B59E-3028338805A5}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Win32 = Debug|Win32 + Debug|x64 = Debug|x64 + Release|Win32 = Release|Win32 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Debug|Win32.ActiveCfg = Debug|Win32 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Debug|Win32.Build.0 = Debug|Win32 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Debug|x64.ActiveCfg = Debug|x64 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Debug|x64.Build.0 = Debug|x64 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Release|Win32.ActiveCfg = Release|Win32 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Release|Win32.Build.0 = Release|Win32 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Release|x64.ActiveCfg = Release|x64 + {8412D60D-8D05-4D41-AB7C-8FEA3A7F32CE}.Release|x64.Build.0 = Release|x64 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Debug|Win32.ActiveCfg = Debug|Win32 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Debug|Win32.Build.0 = Debug|Win32 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Debug|x64.ActiveCfg = Debug|x64 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Debug|x64.Build.0 = Debug|x64 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Release|Win32.ActiveCfg = Release|Win32 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Release|Win32.Build.0 = Release|Win32 + {339B244A-C76F-4663-A39D-FA90B97F5381}.Release|x64.ActiveCfg = Release|Win32 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Debug|Win32.ActiveCfg = Debug|Win32 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Debug|Win32.Build.0 = Debug|Win32 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Debug|x64.ActiveCfg = Debug|x64 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Debug|x64.Build.0 = Debug|x64 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Release|Win32.ActiveCfg = Release|Win32 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Release|Win32.Build.0 = Release|Win32 + {945E1C40-AADC-4F3D-B59E-3028338805A5}.Release|x64.ActiveCfg = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/mps/code/w3mv/mps/mps.vcxproj b/mps/code/w3mv/mps/mps.vcxproj new file mode 100755 index 00000000000..3cc75482e1c --- /dev/null +++ b/mps/code/w3mv/mps/mps.vcxproj @@ -0,0 +1,132 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + {339B244A-C76F-4663-A39D-FA90B97F5381} + Win32Proj + mps + + + + StaticLibrary + true + Unicode + + + StaticLibrary + true + Unicode + + + StaticLibrary + false + true + Unicode + + + StaticLibrary + false + true + Unicode + + + + + + + + + + + + + + + + + + + + + + + Level4 + Disabled + CONFIG_VAR_COOL;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + + + Windows + true + + + + + + + Level4 + Disabled + CONFIG_VAR_COOL;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + + + Windows + true + + + + + Level4 + + + MaxSpeed + true + true + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + + + Windows + true + true + true + + + + + Level4 + + + MaxSpeed + true + true + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + + + Windows + true + true + true + + + + + + \ No newline at end of file diff --git a/mps/code/walk.c b/mps/code/walk.c new file mode 100644 index 00000000000..c1738d91a01 --- /dev/null +++ b/mps/code/walk.c @@ -0,0 +1,528 @@ +/* walk.c: OBJECT WALKER + * + * $Id$ + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + */ + +#include "mpm.h" +#include "mps.h" + +SRCID(walk, "$Id$"); + + +/* Heap Walking + */ + + +#define FormattedObjectsStepClosureSig ((Sig)0x519F05C1) /* SIGnature Formatted Objects Step CLosure */ + +typedef struct FormattedObjectsStepClosureStruct *FormattedObjectsStepClosure; + +typedef struct FormattedObjectsStepClosureStruct { + Sig sig; /* design.mps.sig.field */ + mps_formatted_objects_stepper_t f; + void *p; + size_t s; +} FormattedObjectsStepClosureStruct; + + +ATTRIBUTE_UNUSED +static Bool FormattedObjectsStepClosureCheck(FormattedObjectsStepClosure c) +{ + CHECKS(FormattedObjectsStepClosure, c); + CHECKL(FUNCHECK(c->f)); + /* p and s fields are arbitrary closures which cannot be checked */ + return TRUE; +} + + +static void ArenaFormattedObjectsStep(Addr object, Format format, Pool pool, + void *p, size_t s) +{ + FormattedObjectsStepClosure c; + /* Can't check object */ + AVERT(Format, format); + AVERT(Pool, pool); + c = p; + AVERT(FormattedObjectsStepClosure, c); + AVER(s == UNUSED_SIZE); + + (*c->f)((mps_addr_t)object, (mps_fmt_t)format, (mps_pool_t)pool, + c->p, c->s); +} + + +/* ArenaFormattedObjectsWalk -- iterate over all objects + * + * So called because it walks all formatted objects in an arena. */ + +static void ArenaFormattedObjectsWalk(Arena arena, FormattedObjectsVisitor f, + void *p, size_t s) +{ + Seg seg; + FormattedObjectsStepClosure c; + Format format; + + AVERT(Arena, arena); + AVER(FUNCHECK(f)); + AVER(f == ArenaFormattedObjectsStep); + /* Know that p is a FormattedObjectsStepClosure */ + c = p; + AVERT(FormattedObjectsStepClosure, c); + /* Know that s is UNUSED_SIZE */ + AVER(s == UNUSED_SIZE); + + if (SegFirst(&seg, arena)) { + do { + if (PoolFormat(&format, SegPool(seg))) { + ShieldExpose(arena, seg); + SegWalk(seg, format, f, p, s); + ShieldCover(arena, seg); + } + } while(SegNext(&seg, arena, seg)); + } +} + + +/* mps_arena_formatted_objects_walk -- iterate over all objects + * + * Client interface to ArenaFormattedObjectsWalk. */ + +void mps_arena_formatted_objects_walk(mps_arena_t mps_arena, + mps_formatted_objects_stepper_t f, + void *p, size_t s) +{ + Arena arena = (Arena)mps_arena; + FormattedObjectsStepClosureStruct c; + + ArenaEnter(arena); + AVERT(Arena, arena); + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures, hence can't be checked */ + c.sig = FormattedObjectsStepClosureSig; + c.f = f; + c.p = p; + c.s = s; + ArenaFormattedObjectsWalk(arena, ArenaFormattedObjectsStep, &c, UNUSED_SIZE); + ArenaLeave(arena); +} + + + +/* Root Walking + * + * This involves more code than it should. The roots are walked by + * scanning them. But there's no direct support for invoking the scanner + * without there being a trace, and there's no direct support for + * creating a trace without also condemning part of the heap. (@@@@ This + * looks like a useful candidate for inclusion in the future). For now, + * the root walker contains its own code for creating a minimal trace + * and scan state. + * + * ASSUMPTIONS + * + * .assume.parked: The root walker must be invoked with a parked + * arena. It's only strictly necessary for there to be no current trace, + * but the client has no way to ensure this apart from parking the + * arena. + * + * .assume.rootaddr: The client closure is called with a parameter which + * is the address of a reference to an object referenced from a + * root. The client may desire this address to be the address of the + * actual reference in the root (so that the debugger can be used to + * determine details about the root). This is not always possible, since + * the root might actually be a register, or the format scan method + * might not pass this address directly to the fix method. If the format + * code does pass on the address, the client can be sure to be passed + * the address of any root other than a register or stack. */ + + +/* rootsStepClosure -- closure environment for root walker + * + * Defined as a subclass of ScanState. */ + +#define rootsStepClosureSig ((Sig)0x51965C10) /* SIGnature Roots Step CLOsure */ + +typedef struct rootsStepClosureStruct *rootsStepClosure; +typedef struct rootsStepClosureStruct { + ScanStateStruct ssStruct; /* generic scan state object */ + mps_roots_stepper_t f; /* client closure function */ + void *p; /* client closure data */ + size_t s; /* client closure data */ + Root root; /* current root, or NULL */ + Sig sig; /* design.mps.sig.field.end.outer */ +} rootsStepClosureStruct; + +#define rootsStepClosure2ScanState(rsc) (&(rsc)->ssStruct) +#define ScanState2rootsStepClosure(ss) \ + PARENT(rootsStepClosureStruct, ssStruct, ss) + + +/* rootsStepClosureCheck -- check a rootsStepClosure */ + +ATTRIBUTE_UNUSED +static Bool rootsStepClosureCheck(rootsStepClosure rsc) +{ + CHECKS(rootsStepClosure, rsc); + CHECKD(ScanState, &rsc->ssStruct); + CHECKL(FUNCHECK(rsc->f)); + /* p and s fields are arbitrary closures which cannot be checked */ + if (rsc->root != NULL) { + CHECKD_NOSIG(Root, rsc->root); /* */ + } + return TRUE; +} + + +/* rootsStepClosureInit -- Initialize a rootsStepClosure + * + * Initialize the parent ScanState too. */ + +static void rootsStepClosureInit(rootsStepClosure rsc, + Globals arena, Trace trace, + SegFixMethod rootFix, + mps_roots_stepper_t f, void *p, size_t s) +{ + ScanState ss; + + /* First initialize the ScanState superclass */ + ss = &rsc->ssStruct; + ScanStateInit(ss, TraceSetSingle(trace), GlobalsArena(arena), RankMIN, + trace->white); + + /* Initialize the fix method in the ScanState */ + ss->fix = rootFix; + + /* Initialize subclass specific data */ + rsc->f = f; + rsc->p = p; + rsc->s = s; + rsc->root = NULL; + + rsc->sig = rootsStepClosureSig; + + AVERT(rootsStepClosure, rsc); +} + + +/* rootsStepClosureFinish -- Finish a rootsStepClosure + * + * Finish the parent ScanState too. */ + +static void rootsStepClosureFinish(rootsStepClosure rsc) +{ + ScanState ss; + + ss = rootsStepClosure2ScanState(rsc); + rsc->sig = SigInvalid; + ScanStateFinish(ss); +} + + +/* RootsWalkFix -- the fix method used during root walking + * + * This doesn't cause further scanning of transitive references, it just + * calls the client closure. */ + +static Res RootsWalkFix(Seg seg, ScanState ss, Ref *refIO) +{ + rootsStepClosure rsc; + Ref ref; + + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(refIO != NULL); + rsc = ScanState2rootsStepClosure(ss); + AVERT(rootsStepClosure, rsc); + + ref = *refIO; + + /* Call the client closure - .assume.rootaddr */ + rsc->f((mps_addr_t*)refIO, (mps_root_t)rsc->root, rsc->p, rsc->s); + + AVER(ref == *refIO); /* can walk object graph - but not modify it */ + + return ResOK; +} + + +/* rootWalk -- the step function for ArenaRootsWalk */ + +static Res rootWalk(Root root, void *p) +{ + ScanState ss = (ScanState)p; + + AVERT(ScanState, ss); + + if (RootRank(root) == ss->rank) { + /* set the root for the benefit of the fix method */ + ScanState2rootsStepClosure(ss)->root = root; + /* Scan it */ + ScanStateSetSummary(ss, RefSetEMPTY); + return RootScan(ss, root); + } else + return ResOK; +} + + +/* rootWalkGrey -- make the root grey for the trace passed as p */ + +static Res rootWalkGrey(Root root, void *p) +{ + Trace trace = p; + + AVERT(Root, root); + AVERT(Trace, trace); + + RootGrey(root, trace); + return ResOK; +} + + +/* ArenaRootsWalk -- walks all the root in the arena */ + +static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, + void *p, size_t s) +{ + Arena arena; + rootsStepClosureStruct rscStruct; + rootsStepClosure rsc = &rscStruct; + Trace trace; + ScanState ss; + Rank rank; + Res res; + Seg seg; + + AVERT(Globals, arenaGlobals); + AVER(FUNCHECK(f)); + /* p and s are arbitrary client-provided closure data. */ + arena = GlobalsArena(arenaGlobals); + + /* Scan all the roots with a minimal trace. Invoke the scanner with a */ + /* rootsStepClosure, which is a subclass of ScanState and contains the */ + /* client-provided closure. Supply a special fix method in order to */ + /* call the client closure. This fix method must perform no tracing */ + /* operations of its own. */ + + res = TraceCreate(&trace, arena, TraceStartWhyWALK); + /* Have to fail if no trace available. Unlikely due to .assume.parked. */ + if (res != ResOK) + return res; + + /* .roots-walk.first-stage: In order to fool MPS_FIX12 into calling + _mps_fix2 for a reference in a root, the reference must pass the + first-stage test (against the summary of the trace's white + set), so make the summary universal. */ + trace->white = ZoneSetUNIV; + + /* .roots-walk.second-stage: In order to fool _mps_fix2 into calling + our fix function (RootsWalkFix), the reference must be to a + segment that is white for the trace, so make all segments white + for the trace. */ + if (SegFirst(&seg, arena)) { + do { + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + } while (SegNext(&seg, arena, seg)); + } + + /* Make the roots grey so that they are scanned */ + res = RootsIterate(arenaGlobals, rootWalkGrey, trace); + /* Make this trace look like any other trace. */ + arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); + + rootsStepClosureInit(rsc, arenaGlobals, trace, RootsWalkFix, f, p, s); + ss = rootsStepClosure2ScanState(rsc); + + for(rank = RankMIN; rank < RankLIMIT; ++rank) { + ss->rank = rank; + AVERT(ScanState, ss); + res = RootsIterate(arenaGlobals, rootWalk, (void *)ss); + if (res != ResOK) + break; + } + + /* Turn segments black again. */ + if (SegFirst(&seg, arena)) { + do { + SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); + } while (SegNext(&seg, arena, seg)); + } + + rootsStepClosureFinish(rsc); + /* Make this trace look like any other finished trace. */ + trace->state = TraceFINISHED; + TraceDestroyFinished(trace); + AVER(!ArenaEmergency(arena)); /* There was no allocation. */ + + return res; +} + + +/* mps_arena_roots_walk -- Client interface for walking */ + +void mps_arena_roots_walk(mps_arena_t mps_arena, mps_roots_stepper_t f, + void *p, size_t s) +{ + Arena arena = (Arena)mps_arena; + Res res; + + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + AVER(FUNCHECK(f)); + /* p and s are arbitrary closures, hence can't be checked */ + + AVER(ArenaGlobals(arena)->clamped); /* .assume.parked */ + AVER(arena->busyTraces == TraceSetEMPTY); /* .assume.parked */ + + res = ArenaRootsWalk(ArenaGlobals(arena), f, p, s); + AVER(res == ResOK); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); +} + + +/* walkNoFix -- third-stage fix function for poolWalk. + * + * The second-stage fix is not called via poolWalk; so this is not + * called either. The NOTREACHED checks that this is the case. + */ +static Res walkNoFix(Seg seg, ScanState ss, Addr *refIO) +{ + AVERT(Seg, seg); + AVERT(ScanState, ss); + AVER(refIO != NULL); + + NOTREACHED; + + return ResUNIMPL; +} + + +/* poolWalkScan -- format scanner for poolWalk */ + +static mps_res_t poolWalkScan(mps_ss_t mps_ss, void *base, void *limit) +{ + ScanState ss = PARENT(ScanStateStruct, ss_s, mps_ss); + + AVERT(ScanState, ss); + AVER(base != NULL); + AVER(limit != NULL); + AVER(base < limit); + + return ss->areaScan(mps_ss, base, limit, ss->areaScanClosure); +} + + +/* poolWalk -- walk formatted areas in a pool + * + * See . + */ + +static Res poolWalk(Arena arena, Pool pool, mps_area_scan_t area_scan, void *closure) +{ + Trace trace; + TraceSet ts; + ScanStateStruct ss; + Ring node, nextNode; + Res res = ResOK; + + AVERT(Arena, arena); + AVERT(Pool, pool); + AVER(FUNCHECK(area_scan)); + /* closure is arbitrary and can't be checked */ + + AVER(ArenaGlobals(arena)->clamped); /* .assume.parked */ + AVER(arena->busyTraces == TraceSetEMPTY); /* .assume.parked */ + + /* Synthesize a flipped trace with an empty white set. The empty + * white set means that the MPS_FIX1 test will always fail and + * _mps_fix2 will never be called. */ + res = TraceCreate(&trace, arena, TraceStartWhyWALK); + /* Fail if no trace available. Unlikely due to .assume.parked. */ + if (res != ResOK) + return res; + trace->white = ZoneSetEMPTY; + trace->state = TraceFLIPPED; + arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); + ts = TraceSetSingle(trace); + + ScanStateInit(&ss, ts, arena, RankEXACT, trace->white); + ss.formatScan = poolWalkScan; + ss.areaScan = area_scan; + ss.areaScanClosure = closure; + ss.fix = walkNoFix; + + RING_FOR(node, &pool->segRing, nextNode) { + Bool wasTotal; + Seg seg = SegOfPoolRing(node); + Bool needSummary = SegRankSet(seg) != RankSetEMPTY; + + if (needSummary) + ScanStateSetSummary(&ss, RefSetEMPTY); + + /* Expose the segment to make sure we can scan it. */ + ShieldExpose(arena, seg); + res = SegScan(&wasTotal, seg, &ss); + ShieldCover(arena, seg); + + if (needSummary) + ScanStateUpdateSummary(&ss, seg, res == ResOK && wasTotal); + + if (res != ResOK) + break; + } + + ScanStateFinish(&ss); + trace->state = TraceFINISHED; + TraceDestroyFinished(trace); + AVER(!ArenaEmergency(arena)); /* There was no allocation. */ + + return res; +} + + +mps_res_t mps_pool_walk(mps_pool_t pool, mps_area_scan_t area_scan, void *closure) +{ + Arena arena; + Res res; + + AVER(TESTT(Pool, pool)); + arena = PoolArena(pool); + ArenaEnter(arena); + AVER(FUNCHECK(area_scan)); + /* closure is arbitrary and can't be checked */ + + res = poolWalk(arena, pool, area_scan, closure); + ArenaLeave(arena); + return res; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2001-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/walkt0.c b/mps/code/walkt0.c new file mode 100644 index 00000000000..dd2b5d8fbae --- /dev/null +++ b/mps/code/walkt0.c @@ -0,0 +1,341 @@ +/* walkt0.c: WALK TEST 0 + * + * $Id$ + * Copyright (c) 1998-2020 Ravenbrook Limited. See end of file for license. + * + * Loosely based on . + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "testlib.h" +#include "mpslib.h" +#include "mpscamc.h" +#include "mpscams.h" +#include "mpscawl.h" +#include "mpsclo.h" +#include "mpscsnc.h" +#include "mpsavm.h" +#include "mps.h" +#include "mpm.h" + +#include /* printf */ + +#define testArenaSIZE ((size_t)((size_t)64 << 20)) +#define avLEN 3 +#define exactRootsCOUNT 200 +#define objCOUNT 20000 + +#define genCOUNT 3 +#define gen1SIZE 750 /* kB */ +#define gen2SIZE 2000 /* kB */ +#define gen3SIZE 5000 /* kB */ +#define gen1MORTALITY 0.85 +#define gen2MORTALITY 0.60 +#define gen3MORTALITY 0.40 + +/* testChain -- generation parameters for the test */ + +static mps_gen_param_s testChain[genCOUNT] = { + {gen1SIZE, gen1MORTALITY}, + {gen2SIZE, gen2MORTALITY}, + {gen3SIZE, gen3MORTALITY}, +}; + +/* objNULL needs to be odd so that it's ignored in exactRoots. */ +#define objNULL ((mps_addr_t)MPS_WORD_CONST(0xDECEA5ED)) + +static mps_ap_t ap; +static mps_addr_t exactRoots[exactRootsCOUNT]; +static size_t alloc_bytes; + +/* Make a single Dylan object */ + +static mps_addr_t make(void) +{ + size_t length = rnd() % (avLEN * 2); + size_t size = (length+2) * sizeof(mps_word_t); + mps_addr_t p; + mps_res_t res; + + alloc_bytes += size; + + for(;;) { + mps_bool_t commit_res; + MPS_RESERVE_BLOCK(res, p, ap, size); + if(res) + die(res, "MPS_RESERVE_BLOCK"); + res = dylan_init(p, size, exactRoots, exactRootsCOUNT); + if(res) + die(res, "dylan_init"); + commit_res = mps_commit(ap, p, size); + if(commit_res) + break; + } + + return p; +} + +/* A formatted objects stepper function. Passed to + * mps_arena_formatted_objects_walk. + * + * Tests the (pool, format) values that MPS passes to it for each + * object, by... + * + * ...1: making explicit queries with: + * mps_arena_has_addr + * mps_addr_pool + * mps_addr_fmt + * + * ...2: comparing with what we expect for: + * pool + * fmt + * + * ...3: accumulating the count and size of objects found + */ +typedef struct object_stepper_data { + mps_arena_t arena; + mps_pool_t expect_pool; + mps_fmt_t expect_fmt; + size_t count; /* number of non-padding objects found */ + size_t objSize; /* total size of non-padding objects */ + size_t padSize; /* total size of padding objects */ +} object_stepper_data_s, *object_stepper_data_t; + +static void object_stepper(mps_addr_t object, mps_fmt_t format, + mps_pool_t pool, void *p, size_t s) +{ + object_stepper_data_t sd; + mps_arena_t arena; + mps_bool_t b; + mps_pool_t query_pool; + mps_fmt_t query_fmt; + size_t size; + + Insist(s == sizeof *sd); + sd = p; + arena = sd->arena; + + Insist(mps_arena_has_addr(arena, object)); + + b = mps_addr_pool(&query_pool, arena, object); + Insist(b); + Insist(query_pool == pool); + Insist(pool == sd->expect_pool); + + b = mps_addr_fmt(&query_fmt, arena, object); + Insist(b); + Insist(query_fmt == format); + Insist(format == sd->expect_fmt); + + size = AddrOffset(object, dylan_skip(object)); + if (dylan_ispad(object)) { + sd->padSize += size; + } else { + ++ sd->count; + sd->objSize += size; + } +} + + +/* area_scan -- area scanning function for mps_pool_walk */ + +static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) +{ + object_stepper_data_t sd = closure; + mps_res_t res; + while (base < limit) { + size_t size = AddrOffset(base, dylan_skip(base)); + mps_addr_t prev = base; + if (dylan_ispad(base)) { + sd->padSize += size; + } else { + ++ sd->count; + sd->objSize += size; + } + res = dylan_scan1(ss, &base); + if (res != MPS_RES_OK) return res; + Insist(prev < base); + } + Insist(base == limit); + return MPS_RES_OK; +} + + +/* A roots stepper function. Passed to mps_arena_roots_walk. */ + +typedef struct roots_stepper_data { + mps_root_t exactRoot; + size_t count; +} roots_stepper_data_s, *roots_stepper_data_t; + +static void roots_stepper(mps_addr_t *ref, mps_root_t root, void *p, size_t s) +{ + roots_stepper_data_t data = p; + Insist(ref != NULL); + Insist(p != NULL); + Insist(s == sizeof *data); + Insist(root == data->exactRoot); + ++ data->count; +} + + +/* test -- the body of the test */ + +static void test(mps_arena_t arena, mps_pool_class_t pool_class) +{ + mps_chain_t chain; + mps_fmt_t format; + mps_pool_t pool; + mps_root_t exactRoot; + size_t i; + size_t totalSize, freeSize, allocSize, bufferSize; + unsigned long objs; + object_stepper_data_s objectStepperData, *sd; + roots_stepper_data_s rootsStepperData, *rsd; + int walk; + + die(dylan_fmt(&format, arena), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_FORMAT, format); + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + die(mps_pool_create_k(&pool, arena, pool_class, args), "pool_create"); + } MPS_ARGS_END(args); + + die(mps_ap_create(&ap, pool, mps_rank_exact()), "ap_create"); + + for(i = 0; i < exactRootsCOUNT; ++i) + exactRoots[i] = objNULL; + + die(mps_root_create_table_masked(&exactRoot, arena, + mps_rank_exact(), (mps_rm_t)0, + &exactRoots[0], exactRootsCOUNT, + (mps_word_t)1), + "root_create_table(exact)"); + + objs = 0; + + while(objs < objCOUNT) { + size_t r; + + r = objs; + i = r % exactRootsCOUNT; + if(exactRoots[i] != objNULL) { + cdie(dylan_check(exactRoots[i]), "dying root check"); + } + exactRoots[i] = make(); + if(exactRoots[(exactRootsCOUNT-1) - i] != objNULL) + dylan_write(exactRoots[(exactRootsCOUNT-1) - i], + exactRoots, exactRootsCOUNT); + + ++objs; + } + + mps_arena_park(arena); + + rsd = &rootsStepperData; + rsd->exactRoot = exactRoot; + rsd->count = 0; + mps_arena_roots_walk(arena, roots_stepper, rsd, sizeof *rsd); + printf("%lu %lu\n", (unsigned long)rsd->count, (unsigned long)exactRootsCOUNT); + Insist(rsd->count == exactRootsCOUNT); + + for (walk = 0; walk < 2; ++walk) + { + sd = &objectStepperData; + sd->arena = arena; + sd->expect_pool = pool; + sd->expect_fmt = format; + sd->count = 0; + sd->objSize = 0; + sd->padSize = 0; + if (walk) { + mps_arena_formatted_objects_walk(arena, object_stepper, + sd, sizeof *sd); + } else { + die(mps_pool_walk(pool, area_scan, sd), "mps_pool_walk"); + } + Insist(sd->count == objs); + + totalSize = mps_pool_total_size(pool); + freeSize = mps_pool_free_size(pool); + allocSize = totalSize - freeSize; + bufferSize = AddrOffset(ap->init, ap->limit); + printf("%s: obj=%lu pad=%lu total=%lu free=%lu alloc=%lu buffer=%lu\n", + ClassName(pool_class), + (unsigned long)sd->objSize, + (unsigned long)sd->padSize, + (unsigned long)totalSize, + (unsigned long)freeSize, + (unsigned long)allocSize, + (unsigned long)bufferSize); + Insist(sd->objSize + sd->padSize + bufferSize == allocSize); + } + + mps_arena_collect(arena); + + mps_ap_destroy(ap); + mps_root_destroy(exactRoot); + mps_pool_destroy(pool); + mps_chain_destroy(chain); + mps_fmt_destroy(format); + mps_arena_release(arena); +} + +int main(int argc, char *argv[]) +{ + mps_arena_t arena; + mps_thr_t thread; + + testlib_init(argc, argv); + + die(mps_arena_create(&arena, mps_arena_class_vm(), + testArenaSIZE), + "arena_create"); + die(mps_thread_reg(&thread, arena), "thread_reg"); + + test(arena, mps_class_amc()); + test(arena, mps_class_amcz()); + test(arena, mps_class_ams()); + test(arena, mps_class_awl()); + test(arena, mps_class_lo()); + test(arena, mps_class_snc()); + + mps_thread_dereg(thread); + mps_arena_destroy(arena); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 1998-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/xca6ll.gmk b/mps/code/xca6ll.gmk new file mode 100644 index 00000000000..c4834070759 --- /dev/null +++ b/mps/code/xca6ll.gmk @@ -0,0 +1,58 @@ +# -*- makefile -*- +# +# xca6ll.gmk: BUILD FOR macOS/ARM64/Clang/LLVM PLATFORM +# +# $Id$ +# Copyright (c) 2001-2021 Ravenbrook Limited. See end of file for license. +# +# .prefer.xcode: The documented and preferred way to develop the MPS +# for this platform is to use the Xcode project (mps.xcodeproj). This +# makefile provides a way to compile the MPS one source file at a +# time, rather than all at once via mps.c (which can hide errors due +# to missing headers). + +PFM = xca6ll + +MPMPF = \ + lockix.c \ + prmcanan.c \ + prmcxc.c \ + prmcxca6.c \ + protix.c \ + protxc.c \ + span.c \ + thxc.c \ + vmix.c + +include ll.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2021 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/xci3gc.gmk b/mps/code/xci3gc.gmk new file mode 100644 index 00000000000..bb50300aea5 --- /dev/null +++ b/mps/code/xci3gc.gmk @@ -0,0 +1,54 @@ +# -*- makefile -*- +# +# xci3gc.gmk: BUILD FOR macOS/IA-32/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# Naively copied from xcppgc.gmk, could do with going over properly. + +PFM = xci3gc + +MPMPF = \ + lockix.c \ + prmci3.c \ + prmcxc.c \ + prmcxci3.c \ + protix.c \ + protxc.c \ + span.c \ + thxc.c \ + vmix.c + +include gc.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/xci3ll.gmk b/mps/code/xci3ll.gmk new file mode 100644 index 00000000000..4da264a3f4c --- /dev/null +++ b/mps/code/xci3ll.gmk @@ -0,0 +1,61 @@ +# -*- makefile -*- +# +# xci3ll.gmk: BUILD FOR macOS/IA-32/Clang/LLVM PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# .prefer.xcode: The documented and preferred way to develop the MPS +# for this platform is to use the Xcode project (mps.xcodeproj). This +# makefile provides a way to compile the MPS one source file at a +# time, rather than all at once via mps.c (which can hide errors due +# to missing headers). + +PFM = xci3ll + +MPMPF = \ + lockix.c \ + prmci3.c \ + prmcxc.c \ + prmcxci3.c \ + protix.c \ + protxc.c \ + span.c \ + thxc.c \ + vmix.c + +include ll.gmk + +CC = clang -arch i386 + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/xci6gc.gmk b/mps/code/xci6gc.gmk new file mode 100644 index 00000000000..9b616dff6f0 --- /dev/null +++ b/mps/code/xci6gc.gmk @@ -0,0 +1,58 @@ +# -*- makefile -*- +# +# xci6gc.gmk: BUILD FOR macOS/x86-64/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# .prefer.xcode: The documented and preferred way to develop the MPS +# for this platform is to use the Xcode project (mps.xcodeproj). This +# makefile provides a way to compile the MPS one source file at a +# time, rather than all at once via mps.c (which can hide errors due +# to missing headers). + +PFM = xci6gc + +MPMPF = \ + lockix.c \ + prmci6.c \ + prmcxc.c \ + prmcxci6.c \ + protix.c \ + protxc.c \ + span.c \ + thxc.c \ + vmix.c + +include gc.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/xci6ll.gmk b/mps/code/xci6ll.gmk new file mode 100644 index 00000000000..0bf37a751b6 --- /dev/null +++ b/mps/code/xci6ll.gmk @@ -0,0 +1,58 @@ +# -*- makefile -*- +# +# xci6ll.gmk: BUILD FOR macOS/x86-64/Clang/LLVM PLATFORM +# +# $Id$ +# Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. +# +# .prefer.xcode: The documented and preferred way to develop the MPS +# for this platform is to use the Xcode project (mps.xcodeproj). This +# makefile provides a way to compile the MPS one source file at a +# time, rather than all at once via mps.c (which can hide errors due +# to missing headers). + +PFM = xci6ll + +MPMPF = \ + lockix.c \ + prmci6.c \ + prmcxc.c \ + prmcxci6.c \ + protix.c \ + protxc.c \ + span.c \ + thxc.c \ + vmix.c + +include ll.gmk +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/mps/code/zcoll.c b/mps/code/zcoll.c new file mode 100644 index 00000000000..f3d472e761a --- /dev/null +++ b/mps/code/zcoll.c @@ -0,0 +1,947 @@ +/* zcoll.c: Collection test + * + * $Id$ + * Copyright (c) 2008-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * OBJECTIVE + * + * Test MPS collections. In particular, reporting of how collections + * progress. + * + * Please add tests for other collection behaviour into this file. + * (It's easier to maintain a few big tests than myriad small tests). + * Expand the script language as necessary! RHSK 2008-12-22. + * + * + * DESIGN OVERVIEW + * + * Each script runs in a newly created arena. + * + * [preliminary, incomplete, code still being written] + * The commands are: + * Arena -- governs initial arena size, required, must be first + * Make -- makes some objects, stores a proportion (chosen at + * random) in the specified myroot array slots, and + * drops the rest (which therefore become garbage) + * Katalog -- (will be renamed Catalog) makes a Catalog, which + * is a 40 MB 4-level tree of 10^5 objects; see .catalog; + * see also .catalog.broken. + * Collect -- request a synchronous full garbage collection + * + * + * CODE OVERVIEW + * + * main() has the list of testscripts. + * + * testscriptA() sets up a new arena and calls testscriptB(). + * + * testscriptB() creates pools and objects for this test script. + * + * testscriptC() actually runs the script. + * + * + * DEPENDENCIES + * + * This test uses the dylan object format, but the reliance on this + * particular format is not great and could be removed. + * + * + * BUGS, FUTURE IMPROVEMENTS, ETC + * + * HISTORY + * + * This code was created by first copying . + */ + +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpstd.h" + +#include /* fflush, printf, putchar, puts, stdout */ + + +/* testChain -- generation parameters for the test */ +#define genCOUNT 2 +static mps_gen_param_s testChain[genCOUNT] = { + { 100, 0.85 }, { 170, 0.45 } }; + + +/* myroot -- arrays of references that are the root */ +#define myrootAmbigCOUNT 30000 +static void *myrootAmbig[myrootAmbigCOUNT]; +#define myrootExactCOUNT 30000 +static void *myrootExact[myrootExactCOUNT]; + +static mps_root_t root_stackreg; +static void *stack_start; +static mps_thr_t stack_thr; + + +static ulongest_t cols(size_t bytes) +{ + double M; /* Mebibytes */ + ulongest_t cM; /* hundredths of a Mebibyte */ + + M = (double)bytes / ((ulongest_t)1<<20); + cM = (ulongest_t)(M * 100.0 + 0.5); /* round to nearest */ + return cM; +} + +/* showStatsAscii -- present collection stats, 'graphically' + * + */ +static void showStatsAscii(size_t notcon, size_t con, size_t live, size_t alimit) +{ + ulongest_t n = cols(notcon); + ulongest_t c = cols(notcon + con); + ulongest_t l = cols(notcon + live); /* a fraction of con */ + ulongest_t a = cols(alimit); + ulongest_t count; + ulongest_t i; + + /* if we can show alimit within 200 cols, do so */ + count = (a < 200) ? a + 1 : c; + + for(i = 0; i < count; i++) { + putchar((i == a) ? 'A' + : (i < n) ? 'n' + : (i < l) ? 'L' + : (i < c) ? '_' + : ' '); + } + printf("\n"); +} + + +/* print_M -- print count of bytes as Mebibytes or Megabytes + * + * Print as a whole number, "m" for the decimal point, and + * then the decimal fraction. + * + * Input: 208896 + * Output: (Mebibytes) 0m199 + * Output: (Megabytes) 0m209 + */ +#if 0 +#define bPerM ((size_t)1 << 20) /* Mebibytes */ +#else +#define bPerM ((size_t)1000000) /* Megabytes */ +#endif +static void print_M(size_t bytes) +{ + size_t M; /* M thingies */ + double Mfrac; /* fraction of an M thingy */ + + M = bytes / bPerM; + Mfrac = (double)(bytes % bPerM); + Mfrac = (Mfrac / bPerM); + + printf("%1"PRIuLONGEST"m%03.f", (ulongest_t)M, Mfrac * 1000); +} + + +/* showStatsText -- present collection stats + * + * prints: + * Coll End 0m137[->0m019 14%-live] (0m211-not ) + */ +static void showStatsText(size_t notcon, size_t con, size_t live) +{ + double liveFrac = (double)live / (double)con; + + print_M(con); + printf("[->"); + print_M(live); + printf("% 3.f%%-live]", liveFrac * 100); + printf(" ("); + print_M(notcon); + printf("-not "); + printf(")\n"); +} + +/* get -- get messages + * + */ +static void get(mps_arena_t arena) +{ + mps_message_type_t type; + + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + static mps_clock_t mclockBegin = 0; + static mps_clock_t mclockEnd = 0; + mps_word_t *obj; + mps_word_t objind; + mps_addr_t objaddr; + + cdie(mps_message_get(&message, arena, type), + "get"); + + switch(type) { + case mps_message_type_gc_start(): { + mclockBegin = mps_message_clock(arena, message); + printf(" %5"PRIuLONGEST": (%5"PRIuLONGEST")", + (ulongest_t)mclockBegin, (ulongest_t)(mclockBegin - mclockEnd)); + printf(" Coll Begin (%s)\n", + mps_message_gc_start_why(arena, message)); + break; + } + case mps_message_type_gc(): { + size_t con = mps_message_gc_condemned_size(arena, message); + size_t notcon = mps_message_gc_not_condemned_size(arena, message); + /* size_t other = 0; -- cannot determine; new method reqd */ + size_t live = mps_message_gc_live_size(arena, message); + size_t alimit = mps_arena_reserved(arena); + + mclockEnd = mps_message_clock(arena, message); + + printf(" %5"PRIuLONGEST": (%5"PRIuLONGEST")", + (ulongest_t)mclockEnd, (ulongest_t)(mclockEnd - mclockBegin)); + printf(" Coll End "); + showStatsText(notcon, con, live); + if (rnd()==0) + showStatsAscii(notcon, con, live, alimit); + break; + } + case mps_message_type_finalization(): { + mps_message_finalization_ref(&objaddr, arena, message); + obj = objaddr; + objind = DYLAN_INT_INT(DYLAN_VECTOR_SLOT(obj, 0)); + printf(" Finalization for object %"PRIuLONGEST" at %p\n", + (ulongest_t)objind, objaddr); + break; + } + default: { + cdie(0, "message type"); + break; + } + } + + mps_message_discard(arena, message); + } +} + + +/* .catalog: The Catalog client: + * + * This is an MPS client for testing the MPS. It simulates + * converting a multi-page "Catalog" document from a page-description + * into a bitmap. + * + * The intention is that this task will cause memory usage that is + * fairly realistic (much more so than randomly allocated objects + * with random interconnections. The patterns in common with real + * clients are: + * - the program input and its task are 'fractal', with a + * self-similar hierarchy; + * - object allocation is prompted by each successive element of + * the input/task; + * - objects are often used to store a transformed version of the + * program input; + * - there may be several stages of transformation; + * - at each stage, the old object (holding the untransformed data) + * may become dead; + * - sometimes a tree of objects becomes dead once an object at + * some level of the hierarchy has been fully processed; + * - there is more than one hierarchy, and objects in different + * hierarchies interact. + * + * The entity-relationship diagram is: + * Catalog -< Page -< Article -< Polygon + * v + * | + * Palette --------------------< Colour + * + * The first hierarchy is a Catalog, containing Pages, each + * containing Articles (bits of artwork etc), each composed of + * Polygons. Each polygon has a single colour. + * + * The second hierarchy is a top-level Palette, containing Colours. + * Colours (in this client) are expensive, large objects (perhaps + * because of complex colour modelling or colour blending). + * + * The things that matter for their effect on MPS behaviour are: + * - when objects are allocated, and how big they are; + * - how the reference graph mutates over time; + * - how the mutator accesses objects (barrier hits). + */ + + +#define CatalogRootIndex 0 +#define CatalogSig MPS_WORD_CONST(0x0000CA2A) /* CATAlog */ +#define CatalogFix 1 +#define CatalogVar 10 +#define PageSig MPS_WORD_CONST(0x0000BA9E) /* PAGE */ +#define PageFix 1 +#define PageVar 100 +#define ArtSig MPS_WORD_CONST(0x0000A621) /* ARTIcle */ +#define ArtFix 1 +#define ArtVar 100 +#define PolySig MPS_WORD_CONST(0x0000B071) /* POLYgon */ +#define PolyFix 1 +#define PolyVar 100 + + +static void CatalogCheck(void) +{ + mps_word_t w; + void *Catalog, *Page, *Art, *Poly; + unsigned long Catalogs = 0, Pages = 0, Arts = 0, Polys = 0; + size_t i, j, k; + + /* retrieve Catalog from root */ + Catalog = myrootExact[CatalogRootIndex]; + if(!Catalog) + return; + Insist(DYLAN_VECTOR_SLOT(Catalog, 0) == DYLAN_INT(CatalogSig)); + Catalogs += 1; + + for(i = 0; i < CatalogVar; i += 1) { + /* retrieve Page from Catalog */ + w = DYLAN_VECTOR_SLOT(Catalog, CatalogFix + i); + /* printf("Page = 0x%8x\n", (unsigned int) w); */ + if(w == DYLAN_INT(0)) + break; + Page = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Page, 0) == DYLAN_INT(PageSig)); + Pages += 1; + + for(j = 0; j < PageVar; j += 1) { + /* retrieve Art from Page */ + w = DYLAN_VECTOR_SLOT(Page, PageFix + j); + if(w == DYLAN_INT(0)) + break; + Art = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Art, 0) == DYLAN_INT(ArtSig)); + Arts += 1; + + for(k = 0; k < ArtVar; k += 1) { + /* retrieve Poly from Art */ + w = DYLAN_VECTOR_SLOT(Art, ArtFix + k); + if(w == DYLAN_INT(0)) + break; + Poly = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Poly, 0) == DYLAN_INT(PolySig)); + Polys += 1; + } + } + } + printf("Catalog ok with: Catalogs: %lu, Pages: %lu, Arts: %lu, Polys: %lu.\n", + Catalogs, Pages, Arts, Polys); +} + + +/* CatalogDo -- make a Catalog and its tree of objects + * + * .catalog.broken: this code, when compiled with + * moderate optimization, may have ambiguous interior pointers but + * lack corresponding ambiguous base pointers to MPS objects. This + * means the interior pointers are unmanaged references, and the + * code goes wrong. The hack in poolamc.c#4 cures this, but not very + * nicely. For further discussion, see: + * + */ +static void CatalogDo(mps_arena_t arena, mps_ap_t ap) +{ + mps_word_t v; + void *Catalog, *Page, *Art, *Poly; + size_t i, j, k; + + die(make_dylan_vector(&v, ap, CatalogFix + CatalogVar), "Catalog"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(CatalogSig); + Catalog = (void *)v; + + /* store Catalog in root */ + myrootExact[CatalogRootIndex] = Catalog; + get(arena); + + (void)fflush(stdout); + CatalogCheck(); + + for(i = 0; i < CatalogVar; i += 1) { + die(make_dylan_vector(&v, ap, PageFix + PageVar), "Page"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(PageSig); + Page = (void *)v; + + /* store Page in Catalog */ + DYLAN_VECTOR_SLOT(Catalog, CatalogFix + i) = (mps_word_t)Page; + get(arena); + + printf("Page %"PRIuLONGEST": make articles\n", (ulongest_t)i); + (void)fflush(stdout); + + for(j = 0; j < PageVar; j += 1) { + die(make_dylan_vector(&v, ap, ArtFix + ArtVar), "Art"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(ArtSig); + Art = (void *)v; + + /* store Art in Page */ + DYLAN_VECTOR_SLOT(Page, PageFix + j) = (mps_word_t)Art; + get(arena); + + for(k = 0; k < ArtVar; k += 1) { + die(make_dylan_vector(&v, ap, PolyFix + PolyVar), "Poly"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(PolySig); + Poly = (void *)v; + + /* store Poly in Art */ + DYLAN_VECTOR_SLOT(Art, ArtFix + k) = (mps_word_t)Poly; + /* get(arena); */ + } + } + } + (void)fflush(stdout); + CatalogCheck(); +} + + +/* MakeThing -- make an object of the size requested (in bytes) + * + * Any size is accepted. MakeThing may round it up (MakeThing always + * makes a dylan vector, which has a minimum size of 8 bytes). Vector + * slots, if any, are initialized to DYLAN_INT(0). + * + * After making the object, calls get(), to retrieve MPS messages. + * + * make_dylan_vector [fmtdytst.c] says: + * size = (slots + 2) * sizeof(mps_word_t); + * That is: a dylan vector has two header words before the first slot. + */ +static void* MakeThing(mps_arena_t arena, mps_ap_t ap, size_t size) +{ + mps_word_t v; + ulongest_t words; + ulongest_t slots; + + words = (size + (sizeof(mps_word_t) - 1) ) / sizeof(mps_word_t); + if(words < 2) + words = 2; + + slots = words - 2; + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + get(arena); + + return (void *)v; +} + +static void BigdropSmall(mps_arena_t arena, mps_ap_t ap, size_t big, char small_ref) +{ + static unsigned keepCount = 0; + unsigned i; + + mps_arena_park(arena); + for(i = 0; i < 100; i++) { + (void) MakeThing(arena, ap, big); + if(small_ref == 'A') { + myrootAmbig[keepCount++ % myrootAmbigCOUNT] = MakeThing(arena, ap, 1); + } else if(small_ref == 'E') { + myrootExact[keepCount++ % myrootExactCOUNT] = MakeThing(arena, ap, 1); + } else { + cdie(0, "BigdropSmall: small must be 'A' or 'E'.\n"); + } + } +} + + +/* df -- diversity function + * + * Either deterministic based on "number", or 'random' (ie. call rnd). + */ + +static unsigned long df(unsigned randm, unsigned number) +{ + if(randm == 0) { + return number; + } else { + return rnd(); + } +} + +static void Make(mps_arena_t arena, mps_ap_t ap, unsigned randm, unsigned keep1in, unsigned keepTotal, unsigned keepRootspace, unsigned sizemethod) +{ + unsigned keepCount = 0; + unsigned objCount = 0; + + Insist(keepRootspace <= myrootExactCOUNT); + + objCount = 0; + while(keepCount < keepTotal) { + mps_word_t v; + unsigned slots = 2; /* minimum */ + switch(sizemethod) { + case 0: { + /* minimum */ + slots = 2; + break; + } + case 1: { + slots = 2; + if(df(randm, objCount) % 10000 == 0) { + printf("*"); + slots = 300000; + } + break; + } + case 2: { + slots = 2; + if(df(randm, objCount) % 6661 == 0) { /* prime */ + printf("*"); + slots = 300000; + } + break; + } + default: { + printf("bad script command: sizemethod %u unknown.\n", sizemethod); + cdie(FALSE, "bad script command!"); + break; + } + } + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(objCount); + DYLAN_VECTOR_SLOT(v, 1) = (mps_word_t)NULL; + objCount++; + if(df(randm, objCount) % keep1in == 0) { + /* keep this one */ + myrootExact[df(randm, keepCount) % keepRootspace] = (void*)v; + keepCount++; + } + get(arena); + } + printf(" ...made and kept: %u objects, storing cyclically in " + "first %u roots " + "(actually created %u objects, in accord with " + "keep-1-in %u).\n", + keepCount, keepRootspace, objCount, keep1in); +} + + +static void Rootdrop(char rank_char) +{ + size_t i; + + if(rank_char == 'A') { + for(i = 0; i < myrootAmbigCOUNT; ++i) { + myrootAmbig[i] = NULL; + } + } else if(rank_char == 'E') { + for(i = 0; i < myrootExactCOUNT; ++i) { + myrootExact[i] = NULL; + } + } else { + cdie(0, "Rootdrop: rank must be 'A' or 'E'.\n"); + } +} + + +#define stackwipedepth 50000 +static void stackwipe(void) +{ + size_t iw; + unsigned long aw[stackwipedepth]; + + /* Do some pointless work that the compiler won't optimise away, so that + this function wipes over the stack by filling stuff into the "aw" + array. */ + + /* https://xkcd.com/710/ */ + /* I don't want my friends to stop calling; I just want the */ + /* compiler to stop optimising away my code. */ + + /* Do you ever get two even numbers next to each other? Hmmmm :-) */ + for(iw = 0; iw < stackwipedepth; iw++) { + if((iw & 1) == 0) { + aw[iw] = 1; + } else { + aw[iw] = 0; + } + } + for(iw = 1; iw < stackwipedepth; iw++) { + if(aw[iw - 1] + aw[iw] != 1) { + printf("Errrr....\n"); + break; + } + } +} + + +static void StackScan(mps_arena_t arena, int on) +{ + if(on) { + Insist(root_stackreg == NULL); + die(mps_root_create_thread(&root_stackreg, arena, + stack_thr, stack_start), + "root_stackreg"); + Insist(root_stackreg != NULL); + } else { + Insist(root_stackreg != NULL); + mps_root_destroy(root_stackreg); + root_stackreg = NULL; + Insist(root_stackreg == NULL); + } +} + + +/* checksi -- check count of sscanf items is correct + */ + +static void checksi(int si, int si_shouldBe, const char *script, const char *scriptAll) +{ + if(si != si_shouldBe) { + printf("bad script command (sscanf found wrong number of params) %s (full script %s).\n", script, scriptAll); + cdie(FALSE, "bad script command!"); + } +} + +/* testscriptC -- actually runs a test script + * + */ +static void testscriptC(mps_arena_t arena, mps_ap_t ap, const char *script) +{ + const char *scriptAll = script; + int si, sb; /* sscanf items, sscanf bytes */ + + while(*script != '\0') { + switch(*script) { + case 'C': { + si = sscanf(script, "Collect%n", + &sb); + checksi(si, 0, script, scriptAll); + script += sb; + printf(" Collect\n"); + stackwipe(); + die(mps_arena_collect(arena), "mps_arena_collect"); + mps_arena_release(arena); + break; + } + case 'K': { + si = sscanf(script, "Katalog()%n", + &sb); + checksi(si, 0, script, scriptAll); + script += sb; + printf(" Katalog()\n"); + CatalogDo(arena, ap); + break; + } + case 'B': { + ulongest_t big = 0; + char small_ref = ' '; + si = sscanf(script, "BigdropSmall(big %"SCNuLONGEST", small %c)%n", + &big, &small_ref, &sb); + checksi(si, 2, script, scriptAll); + script += sb; + printf(" BigdropSmall(big %"PRIuLONGEST", small %c)\n", + big, small_ref); + BigdropSmall(arena, ap, big, small_ref); + break; + } + case 'M': { + unsigned randm = 0; + unsigned keep1in = 0; + unsigned keepTotal = 0; + unsigned keepRootspace = 0; + unsigned sizemethod = 0; + si = sscanf(script, "Make(random %u, keep-1-in %u, keep %u, rootspace %u, sizemethod %u)%n", + &randm, &keep1in, &keepTotal, &keepRootspace, &sizemethod, &sb); + checksi(si, 5, script, scriptAll); + script += sb; + printf(" Make(random %u, keep-1-in %u, keep %u, rootspace %u, sizemethod %u).\n", + randm, keep1in, keepTotal, keepRootspace, sizemethod); + Make(arena, ap, randm, keep1in, keepTotal, keepRootspace, sizemethod); + break; + } + case 'R': { + char drop_ref = ' '; + si = sscanf(script, "Rootdrop(rank %c)%n", + &drop_ref, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" Rootdrop(rank %c)\n", drop_ref); + Rootdrop(drop_ref); + break; + } + case 'S': { + unsigned on = 0; + si = sscanf(script, "StackScan(%u)%n", + &on, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" StackScan(%u)\n", on); + StackScan(arena, on != 0); + break; + } + case 'Z': { + unsigned long s0; + si = sscanf(script, "ZRndStateSet(%lu)%n", + &s0, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" ZRndStateSet(%lu)\n", s0); + rnd_state_set(s0); + break; + } + case ' ': + case ',': + case '.': { + script++; + break; + } + default: { + printf("unknown script command '%c' (script %s).\n", + *script, scriptAll); + cdie(FALSE, "unknown script command!"); + return; + } + } + get(arena); + } + +} + + +/* testscriptB -- create pools and objects; call testscriptC */ + +typedef struct testDataStruct { + mps_arena_t arena; + mps_thr_t thr; + const char *script; +} testDataStruct; + +static void testscriptB(testDataStruct *testData) +{ + mps_arena_t arena; + mps_thr_t thr; + const char *script; + mps_fmt_t fmt; + mps_chain_t chain; + mps_pool_t amc; + size_t i; + mps_root_t root_table_Ambig; + mps_root_t root_table_Exact; + mps_ap_t ap; + void *stack_starts_here; /* stack scanning starts here */ + + arena = testData->arena; + thr = testData->thr; + script = testData->script; + + die(mps_fmt_create_A(&fmt, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + die(mps_pool_create(&amc, arena, mps_class_amc(), fmt, chain), + "pool_create amc"); + + for(i = 0; i < myrootAmbigCOUNT; ++i) { + myrootAmbig[i] = NULL; + } + die(mps_root_create_table(&root_table_Ambig, arena, mps_rank_ambig(), (mps_rm_t)0, + myrootAmbig, (size_t)myrootAmbigCOUNT), + "root_create - ambig"); + + for(i = 0; i < myrootExactCOUNT; ++i) { + myrootExact[i] = NULL; + } + die(mps_root_create_table(&root_table_Exact, arena, mps_rank_exact(), (mps_rm_t)0, + myrootExact, (size_t)myrootExactCOUNT), + "root_create - exact"); + + die(mps_ap_create(&ap, amc, mps_rank_exact()), "ap_create"); + + /* root_stackreg: stack & registers are ambiguous roots = mutator's workspace */ + stack_start = &stack_starts_here; + stack_thr = thr; + die(mps_root_create_thread(&root_stackreg, arena, + stack_thr, stack_start), + "root_stackreg"); + + + mps_message_type_enable(arena, mps_message_type_gc_start()); + mps_message_type_enable(arena, mps_message_type_gc()); + mps_message_type_enable(arena, mps_message_type_finalization()); + + testscriptC(arena, ap, script); + + printf(" Destroy roots, pools, arena etc.\n\n"); + mps_arena_park(arena); + mps_root_destroy(root_stackreg); + mps_ap_destroy(ap); + mps_root_destroy(root_table_Exact); + mps_root_destroy(root_table_Ambig); + mps_pool_destroy(amc); + mps_chain_destroy(chain); + mps_fmt_destroy(fmt); +} + + +/* testscriptA -- create arena and thr; call testscriptB + */ +static void testscriptA(const char *script) +{ + mps_arena_t arena; + int si, sb; /* sscanf items, sscanf bytes */ + unsigned long arenasize = 0; + mps_thr_t thr; + testDataStruct testData; + + si = sscanf(script, "Arena(size %lu)%n", &arenasize, &sb); + cdie(si == 1, "bad script command: Arena(size %%lu)"); + script += sb; + printf(" Create arena, size = %lu.\n", arenasize); + + /* arena */ + MPS_ARGS_BEGIN(args) { + /* Randomize pause time as a regression test for job004011. */ + MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, rnd_pause_time()); + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arenasize); + die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), + "arena_create\n"); + } MPS_ARGS_END(args); + + /* thr: used to stop/restart multiple threads */ + die(mps_thread_reg(&thr, arena), "thread"); + + /* call testscriptB! */ + testData.arena = arena; + testData.thr = thr; + testData.script = script; + testscriptB(&testData); + + mps_thread_dereg(thr); + mps_arena_destroy(arena); +} + + +/* main -- runs various test scripts + * + */ +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + /* 1<<19 == 524288 == 1/2 Mebibyte */ + /* 16<<20 == 16777216 == 16 Mebibyte */ + + /* 1<<19 == 524288 == 1/2 Mebibyte */ + /* This is bogus! sizemethod 1 can make a 300,000-slot dylan vector, ie. 1.2MB. */ + /* Try 10MB arena */ + /* testscriptA("Arena(size 10485760), Make(keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect."); */ + if(1) { + testscriptA("Arena(size 10000000), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), StackScan(0), Collect, Collect, StackScan(1), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), Collect, Collect."); + } + if(1) { + testscriptA("Arena(size 4000000), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), StackScan(0), Collect, Collect, StackScan(1), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), Collect, Collect."); + } + if(1) { + testscriptA("Arena(size 4000000), " + "Make(random 1, keep-1-in 5, keep 10000, rootspace 30000, sizemethod 1), " + "Rootdrop(rank E), Collect, " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), " + "Rootdrop(rank E), Collect, " + "Make(random 1, keep-1-in 5, keep 100000, rootspace 30000, sizemethod 1), " + "Rootdrop(rank E), Collect, " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), " + "Rootdrop(rank E), Collect."); + } + if(1) { + testscriptA("Arena(size 10485760), " + "Make(random 0, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 2), " + "Collect, " + "Rootdrop(rank E), Collect, Collect, " + "Make(random 0, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 2), " + "Collect, " + "Rootdrop(rank E), Collect, Collect."); + } + if(1) { + testscriptA("Arena(size 10485760), " + "ZRndStateSet(239185672), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), StackScan(0), Collect, Collect, StackScan(1), " + "ZRndStateSet(239185672), " + "Make(random 1, keep-1-in 5, keep 50000, rootspace 30000, sizemethod 1), Collect, " + "Rootdrop(rank E), Collect, Collect."); + } + + /* LSP -- Large Segment Padding (job001811) + * + * BigdropSmall creates a big object & drops ref to it, + * then a small object but keeps a ref to it. Do this 100 + * times. (It also parks the arena, to avoid incremental + * collections). + * + * If big is 28000, it is <= 28672 bytes and therefore fits on a seg + * of 7 pages. AMC classes this as a Medium Segment and uses the + * remainder, placing the subsequent small object there. If the ref + * to small is "A" = ambig, the entire 7-page seg is retained. + * + * If big is > 28672 bytes (7 pages), it requires a seg of >= 8 + * pages. AMC classes this as a Large Segment, and does LSP (Large + * Segment Padding), to prevent the subsequent small object being + * placed in the remainder. If the ref to small is "A" = ambig, + * only its 1-page seg is retained. This greatly reduces the + * retention page-count. + * + * If the ref to small is "E" = exact, then the small object is + * preserved-by-copy onto a new seg. In this case there is no + * seg/page retention, so LSP does not help. It has a small cost: + * total pages increase from 700 to 900. So in this case (no ambig + * retention at all, pessimal allocation pattern) LSP would slightly + * increase the frequency of minor collections. + */ + /* 7p = 28672b; 8p = 32768b */ + /* 28000 = Medium segment */ + /* 29000 = Large segment */ + testscriptA("Arena(size 16777216), BigdropSmall(big 28000, small A), Collect."); + testscriptA("Arena(size 16777216), BigdropSmall(big 29000, small A), Collect."); + testscriptA("Arena(size 16777216), BigdropSmall(big 28000, small E), Collect."); + testscriptA("Arena(size 16777216), BigdropSmall(big 29000, small E), Collect."); + + /* 16<<20 == 16777216 == 16 Mebibyte */ + /* See .catalog.broken. + testscriptA("Arena(size 16777216), Katalog(), Collect."); + */ + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2008-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/zmess.c b/mps/code/zmess.c new file mode 100644 index 00000000000..e0431f63c38 --- /dev/null +++ b/mps/code/zmess.c @@ -0,0 +1,588 @@ +/* zmess.c: Message test + * + * $Id$ + * Copyright (c) 2008-2020 Ravenbrook Limited. See end of file for license. + * Portions copyright (C) 2002 Global Graphics Software. + * + * OBJECTIVE + * + * Test MPS messages. In particular: + * - Check prompt finalization even when there are several segs + * of guardians. This test replaces fin1658a.c. See job001658. + * - Check GC messages are correctly generated, posted, and queued, + * regardless of when the client gets them. (Note: "get" means + * "mps_message_get", throughout). See job001989. + * + * Please add tests for other message behaviour into this file. + * Expand the script language as necessary! RHSK 2008-12-19. + * + * + * DESIGN OVERVIEW + * + * Client (this test) does various actions that are known to provoke + * MPS messages. Client (this test) gets these messages at variable + * times. + * + * Verification is: + * - client gets all the expected messages, and no others, at the + * expected time; + * - no asserts or failures. + * + * Additionally: client checks the message order. MPS specification + * does not currently guarantee that messages are queued in order of + * posting, but in fact they should be, and it is a useful check. + * (But finalization messages from a single collection may be posted + * in any order). + * + * The actions, messages to check for, and get times, are scripted + * using a simple text code: + * C - action: request garbage-collection; + * F - action: make a (registered)finalized object unreachable + * (note: this drops the myroot ref, but some objects are + * deliberately kept alive by additional references; see + * .keep-alive) + * b - message produced: collection begin (mps_message_type_gc_start); + * e - message produced: collection end (mps_message_type_gc); + * f - message produced: finalization (mps_message_type_finalization); + * . - get messages. + * ! - get messages without discarding (see .discard). + * + * Example: + * script "Cbe.FFCbffe.Cbe" + * means: + * Request a collection and check for _gc_start and _gc messages + * (in that order, and no other messages). Then drop refs to two + * objects, request another collection, and check for _gc_start, + * the two finalization messages (in either order), and _gc. Then + * request a third collection and end the test WITHOUT GETTING + * the last two messages (note: no "."), to test that + * mps_arena_destroy copes with ungot messages. + * + * Each script runs in a newly created arena. The arena is clamped so + * that collections only happen when the script requests them. + * + * + * CODE OVERVIEW + * + * main() has the list of testscripts. + * + * testscriptA() sets up a new arena and calls testscriptB(). + * + * testscriptB() creates pools and objects for this test script. + * + * testscriptC() actually runs the script. + * + * + * DEPENDENCIES + * + * This test uses the dylan object format, but the reliance on this + * particular format is not great and could be removed. + * + * + * BUGS, FUTURE IMPROVEMENTS, ETC + * + * There are a few special objects with refs to each other (see + * .keep-alive). For clarity and flexibility, there should be special + * actions to drop the myroot ref to these, eg. '0', '1', '2', 'Y', 'Z'. + * Whereas (for clarity) 'F' should be an action that drops the myroot + * ref to a plain (non-kept-alive) object, thereby simply provoking a + * single finalization message. + * + * Actions could be expanded to include: + * - mps_arena_start_collect; + * - mps_arena_step; + * - automatic (not client-requested) collections. + * etc. + * + * HISTORY + * + * This code was created by first copying . + */ + +#include "testlib.h" +#include "mpslib.h" +#include "mps.h" +#include "mpscamc.h" +#include "mpsavm.h" +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mpstd.h" + +#include /* printf */ + + +#define testArenaSIZE ((size_t)16<<20) + +/* usually (ArenaGrainSize / sizeof(Ref)) = 1024 */ +/* so choose 3000 to force 3 segments of guardians */ +#define myrootCOUNT 3000 + +/* testChain -- generation parameters for the test */ +#define genCOUNT 2 +static mps_gen_param_s testChain[genCOUNT] = { + { 150, 0.85 }, { 170, 0.45 } }; + + +/* note: static, so auto-initialised to NULL */ +static void *myroot[myrootCOUNT]; +enum { + rootSTATE, + deadSTATE, + finalizableSTATE, + finalizedSTATE +}; +static int state[myrootCOUNT]; + + +/* report -- get and check messages + * + * Get messages, report what was got, check they are the expected + * messages, and (for finalization messages) check that these objects + * should have been finalized (because we made them unreachable). + * + * .discard: The client should always call mps_message_discard when + * it has finished with the message. But calling with the "discard" + * parameter set to false lets us check how the MPS handles naughty + * clients. The undiscarded messages must be cleared up by + * ArenaDestroy. + */ +static void report(mps_arena_t arena, const char *pm, Bool discard) +{ + char mFound = '\0'; + mps_message_type_t type; + + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + mps_word_t *obj; + mps_word_t objind; + mps_addr_t objaddr; + + cdie(mps_message_get(&message, arena, type), + "get"); + + switch(type) { + case mps_message_type_gc_start(): { + printf(" Begin Collection\n"); + mFound = 'b'; + break; + } + case mps_message_type_gc(): { + printf(" End Collection\n"); + mFound = 'e'; + break; + } + case mps_message_type_finalization(): { + mps_message_finalization_ref(&objaddr, arena, message); + obj = objaddr; + objind = DYLAN_INT_INT(DYLAN_VECTOR_SLOT(obj, 0)); + printf(" Finalization for object %"PRIuLONGEST" at %p\n", + (ulongest_t)objind, objaddr); + cdie(myroot[objind] == NULL, "finalized live"); + cdie(state[objind] == finalizableSTATE, "not finalizable"); + state[objind] = finalizedSTATE; + mFound = 'f'; + break; + } + default: { + cdie(0, "message type"); + break; + } + } + + if(discard) { + mps_message_discard(arena, message); /* .discard */ + } + + cdie('\0' != *pm, "Found message, but did not expect any"); + cdie(mFound == *pm, "Found message type != Expected message type"); + pm++; + } + + mFound = '\0'; + cdie(mFound == *pm, "No message found, but expected one"); +} + + +/* testscriptC -- actually runs a test script + * + */ +static void testscriptC(mps_arena_t arena, const char *script) +{ + unsigned isLoNext = 1; + unsigned loNext = 0; + unsigned hiNext = myrootCOUNT - 1; + unsigned i; + const char *scriptAll = script; + char am[100]; /* Array of Messages (expected but not yet got) */ + char *pmNext = am; /* Pointer to where Next Message will be stored */ + + while(*script != '\0') { + switch(*script) { + case '.': { + *pmNext = '\0'; + printf(" Getting messages (expecting \"%s\")...\n", am); + report(arena, am, TRUE); + printf(" ...done.\n"); + pmNext = am; + break; + } + case '!': { + /* Like '.', but not discarding got messages; see .discard */ + *pmNext = '\0'; + printf(" Getting messages (expecting \"%s\")...\n", am); + report(arena, am, FALSE); /* FALSE: see .discard */ + printf(" ...done.\n"); + printf(" NOTE: DELIBERATELY FAILING TO DISCARD MESSAGES, " + "TO SEE HOW MPS COPES.\n"); /* .discard */ + pmNext = am; + break; + } + case 'C': { + printf(" Collect\n"); + die(mps_arena_collect(arena), "mps_arena_collect"); + break; + } + case 'F': { + /* (perhaps) make an object Finalizable + * + * .alternate: We alternately pick objects from the low and + * high ends of the myroot array. This is used to test for + * the defect described in job001658. + */ + Insist(loNext <= hiNext); + i = isLoNext ? loNext++ : hiNext--; + isLoNext = 1 - isLoNext; + + printf(" Drop myroot ref to object %u -- " + "this might make it Finalizable\n", i); + /* drop myroot ref, to perhaps make i finalizable */ + /* (but see .keep-alive) */ + myroot[i] = NULL; + state[i] = finalizableSTATE; + break; + } + case 'b': + case 'e': + case 'f': { + /* expect that MPS has posted a particular message */ + *pmNext++ = *script; + break; + } + default: { + printf("unknown script command %c (script %s).\n", + *script, scriptAll); + cdie(FALSE, "unknown script command"); + return; + } + } + Insist(am <= pmNext && pmNext < am + NELEMS(am)); + script++; + } +} + + +/* testscriptB -- create pools and objects; call testscriptC */ + +typedef struct testDataStruct { + mps_arena_t arena; + mps_thr_t thr; + const char *script; +} testDataStruct; + +static void testscriptB(testDataStruct *testData) +{ + mps_arena_t arena; + mps_thr_t thr; + const char *script; + mps_fmt_t fmt; + mps_chain_t chain; + mps_pool_t amc; + mps_root_t root_table; + mps_ap_t ap; + mps_root_t root_stackreg; + size_t i; + int N = myrootCOUNT - 1; + void *stack_starts_here; /* stack scanning starts here */ + + arena = testData->arena; + thr = testData->thr; + script = testData->script; + + die(mps_fmt_create_A(&fmt, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + die(mps_pool_create(&amc, arena, mps_class_amc(), fmt, chain), + "pool_create amc"); + for(i = 0; i < myrootCOUNT; ++i) { + myroot[i] = NULL; + } + die(mps_root_create_table(&root_table, arena, mps_rank_exact(), (mps_rm_t)0, + myroot, (size_t)myrootCOUNT), + "root_create"); + die(mps_ap_create(&ap, amc, mps_rank_exact()), "ap_create"); + + /* root_stackreg: stack & registers are ambiguous roots = mutator's workspace */ + die(mps_root_create_thread(&root_stackreg, arena, + thr, &stack_starts_here), + "root_stackreg"); + + /* Make myrootCOUNT registered-for-finalization objects. */ + /* Each is a dylan vector with 2 slots, inited to: (index, NULL) */ + for(i = 0; i < myrootCOUNT; ++i) { + mps_word_t v; + mps_addr_t v_ref; + die(make_dylan_vector(&v, ap, 2), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(i); + DYLAN_VECTOR_SLOT(v, 1) = (mps_word_t)NULL; + v_ref = (mps_addr_t)v; + die(mps_finalize(arena, &v_ref), "finalize"); + myroot[i] = (void*)v; + state[i] = rootSTATE; + } + + /* .keep-alive: Create some additional inter-object references. + * + * 1 and N-1 don't die until myroot refs to both have been nulled. + * + * 2 and 3 don't die until myroot refs to both have been nulled. + * + * We do this to check that reachability via non-root refs prevents + * finalization. + */ + + /* Leave 0 and N containing NULL refs */ + + /* Make 1 and N-1 refer to each other */ + DYLAN_VECTOR_SLOT(myroot[1] , 1) = (mps_word_t)myroot[N-1]; + DYLAN_VECTOR_SLOT(myroot[N-1], 1) = (mps_word_t)myroot[1]; + + /* Make 2 and 3 refer to each other */ + DYLAN_VECTOR_SLOT(myroot[2], 1) = (mps_word_t)myroot[3]; + DYLAN_VECTOR_SLOT(myroot[3], 1) = (mps_word_t)myroot[2]; + + /* Stop stack scanning, otherwise stack or register dross from */ + /* these setup functions can cause unwanted object retention, */ + /* which would mean we don't get the finalization messages we */ + /* expect. */ + mps_root_destroy(root_stackreg); + + mps_message_type_enable(arena, mps_message_type_gc_start()); + mps_message_type_enable(arena, mps_message_type_gc()); + mps_message_type_enable(arena, mps_message_type_finalization()); + + testscriptC(arena, script); + + mps_arena_park(arena); + mps_ap_destroy(ap); + mps_root_destroy(root_table); + mps_pool_destroy(amc); + mps_chain_destroy(chain); + mps_fmt_destroy(fmt); +} + + +/* testscriptA -- create arena and thr; call testscriptB + */ +static void testscriptA(const char *script) +{ + mps_arena_t arena; + mps_thr_t thr; + testDataStruct testData; + + printf("Script: \"%s\"\n Create arena etc.\n", script); + + /* arena */ + die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), + "arena_create"); + mps_arena_clamp(arena); + + /* thr: used to stop/restart multiple threads */ + die(mps_thread_reg(&thr, arena), "thread"); + + /* call testscriptB! */ + testData.arena = arena; + testData.thr = thr; + testData.script = script; + testscriptB(&testData); + + mps_thread_dereg(thr); + mps_arena_destroy(arena); + + printf(" Destroy arena etc.\n\n"); + +} + +/* TIMCA_remote -- TraceIdMessagesCreate Alloc remote control + * + * In low memory situations, ControlAlloc may be unable to allocate + * memory for GC messages. This needs to work flawlessly, but is + * hard to test. + * + * To simulate it for testing purposes, add the following lines to + * traceanc.c, before the definition of TraceIdMessagesCreate: + * #define ControlAlloc !TIMCA_remote() ? ResFAIL : ControlAlloc + * extern Bool TIMCA_remote(void); + * (See changelist 166959). + * + * TIMCA_remote returns a Bool, true for let "ControlAlloc succeed". + */ + +#ifdef TEST_CONTROLALLOC_FAILURE + +static const char *TIMCA_str = ""; +static int TIMCA_done = 0; +static void TIMCA_setup(const char *string) +{ + /* TIMCA_setup -- TraceIdMessagesCreate Alloc remote control + * + * 1..9 -- succeed this many times + * 0 -- fail once + * NUL -- succeed from now on + * + * Eg: "1400" succeeds 5 times, fails 2 times, then succeeds forever. + */ + TIMCA_str = string; + TIMCA_done = 0; +} + +extern Bool TIMCA_remote(void); +Bool TIMCA_remote(void) +{ + Bool succeed; + + if(*TIMCA_str == '\0') { + succeed = TRUE; + } else if(*TIMCA_str == '0') { + succeed = FALSE; + TIMCA_str++; + } else { + Insist(*TIMCA_str >= '1' && *TIMCA_str <= '9'); + succeed = TRUE; + TIMCA_done++; + if(TIMCA_done == *TIMCA_str - '0') { + TIMCA_done = 0; + TIMCA_str++; + } + } + + return succeed; +} + +#endif /* TEST_CONTROLALLOC_FAILURE */ + + +/* main -- runs various test scripts + * + */ +int main(int argc, char *argv[]) +{ + testlib_init(argc, argv); + + /* Scripts that should fail (uncomment to show failure is detected) */ + /*testscriptA("C.");*/ + /*testscriptA("b.");*/ + + /* The most basic scripts */ + testscriptA("."); + testscriptA("Cbe."); + testscriptA("Cbe.Cbe."); + + /* Get messages, but not promptly */ + testscriptA(".Cbe.CbeCbeCbe."); + + /* Ungot messages at ArenaDestroy */ + testscriptA("Cbe"); + testscriptA("Cbe.CbeCbeCbe"); + + /* Fail to call mps_message_discard */ + testscriptA("Cbe!"); + testscriptA("Cbe!CbeCbeCbe!"); + + /* Simple finalization + * + * These tests rely on the particular order in which the "F" command + * nulls-out references. Not every "F" makes an object finalizable. + * See .keep-alive. + */ + testscriptA("FFCbffe."); + testscriptA("FFCbffe.FFCbffe."); + testscriptA("FFCbffe.FCbe.F.Cbffe.FFCbfe.FF.Cbfffe."); + + /* Various other scripts */ + testscriptA("Cbe.FFCbffe.Cbe"); + + /* Simulate low memory situations + * + * These scripts only work with a manually edited traceanc.c -- + * see TIMCA_remote() above. + * + * When TraceIdMessagesCreate is trying to pre-allocate GC messages, + * either "0" or "10" makes it fail -- "0" fails the trace start + * message alloc, whereas "10" fails the trace end message alloc. + * In either case TraceIdMessagesCreate promptly gives up, and + * neither start nor end message will be sent for the next trace. + * + * . + */ +#if TEST_CONTROLALLOC_FAILURE + { + /* ArenaCreate unable to pre-allocate: THESE SHOULD FAIL */ + /* manually edit if(0) -> if(1) to test these */ + if(0) { + TIMCA_setup("0"); testscriptA("Fail at create 1"); + } + if(0) { + TIMCA_setup("10"); testscriptA("Fail at create 2"); + } + + /* ArenaDestroy with no pre-allocated messages */ + TIMCA_setup("20"); testscriptA("Cbe."); + TIMCA_setup("210"); testscriptA("Cbe."); + + /* Collect with no pre-allocated messages: drops messages, */ + /* hence "C." instead of "Cbe.". Also, in diagnostic varieties, */ + /* these should produce a "droppedMessages" diagnostic at */ + /* ArenaDestroy. */ + TIMCA_setup("2022"); testscriptA("Cbe.C.Cbe."); + TIMCA_setup("21022"); testscriptA("Cbe.C.Cbe."); + + /* 2 Collects and ArenaDestroy with no pre-allocated messages */ + TIMCA_setup("2000"); testscriptA("Cbe.C.C."); + TIMCA_setup("201010"); testscriptA("Cbe.C.C."); + + TIMCA_setup(""); /* must reset it! */ + } +#endif + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2008-2020 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ztfm.c b/mps/code/ztfm.c new file mode 100644 index 00000000000..f5a497130c5 --- /dev/null +++ b/mps/code/ztfm.c @@ -0,0 +1,1459 @@ +/* ztfm.c: Transforms test + * + * $Id$ + * Copyright (c) 2011-2022 Ravenbrook Limited. See end of file for license. + * + * .overview: This test creates data structures and then applies MPS + * transforms (see design.mps.transform) to them and verifies the + * result. It uses various checksums to verify that the structures + * are equivalent before and after the transform. + * + * .issues: TODO: This test has issues and needs refactoring: + * + * - GitHub issue #242 + * "Testing of transforms is unclear and overengineered" + * + * - GitHub issue #243 + * "ztfm.c fails to meet its requirement to test transforms on all + * platforms" + */ + +#include "fmtdy.h" +#include "fmtdytst.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpslib.h" +#include "testlib.h" + +#include /* printf */ + +/* Hacky progress output with switches. */ +/* TODO: Tidy this up by extending testlib if necessary. */ +#define progressf(args) \ + printf args; +#define Xprogressf(args) \ + do{if(0)printf args;}while(FALSE) + +/* testChain -- generation parameters for the test */ +#define genCOUNT 2 +static mps_gen_param_s testChain[genCOUNT] = { + { 100, 0.85 }, { 170, 0.45 } }; + + +/* myroot -- arrays of references that are the root */ +#define myrootAmbigCOUNT 30000 +static void *myrootAmbig[myrootAmbigCOUNT]; +#define myrootExactCOUNT 1000000 +static void *myrootExact[myrootExactCOUNT]; + +static mps_root_t root_stackreg; +static void *stack_start; +static mps_thr_t stack_thr; + + +/* =========== Transform ==============*/ +static void get(mps_arena_t arena); + +static ulongest_t serial = 0; + + +/* Tree nodes + * + * The node structure is punned with a Dylan vector. The first two + * fields map to the Dylan wrapper and vector length. The fields with + * suffix "dyi" will be tagged as Dylan integers. TODO: Fix this + * somewhat unsafe punning. + * + * TODO: Is this actually a tree or a graph? + * + * To make a node: + * - use next unique serial; + * - choose an id (eg. next in sequence); + * - choose a version (eg. 0). + * + * To make a New node from (or in connection with) an Old: + * - use next unique serial; + * - copy id of Old; + * - ver = ver(Old) + 1. + * + * To invalidate an Old node, when adding an OldNew pair for it: + * - ver = -1 + */ + +struct node_t { + mps_word_t word0; /* Dylan wrapper pointer */ + mps_word_t word1; /* Dylan vector length */ + mps_word_t serial_dyi; /* unique across every node ever */ + mps_word_t id_dyi; /* .id: replacement nodes copy this */ + mps_word_t ver_dyi; /* .version: distinguish new from old */ + struct node_t *left; + struct node_t *right; + mps_word_t tour_dyi; /* latest tour that visited this node */ + mps_word_t tourIdHash_dyi; /* hash of node ids, computed last tour */ +}; + + +/* Tour -- a every node in the world calculating report with hash + * + * A tour starts with the node at world[0], tours the graph reachable + * from it, then any further bits of graph reachable from world[1], + * and so on. + * + * As it does so, the tour computes a tourReport, characterising the + * state of everything reachable (from world) in the form a few numbers. + * + * Each tour explores the subgraph rooted at each node exactly once. + * That is: if the tour re-encounters a node already visited on that + * tour, it uses the values already computed for that node. + * + * The tourIdHash deliberately depends on the order in which nodes are + * encountered. Therefore, the tourIdHash of a tour depends on the + * entirety of the state of the world and all the world-reachable nodes. + * + * The tourVerSum, being a simple sum, does not depend on the order of + * visiting. + */ + +/* Maximum count of node versions. TODO: Should not be an enum. */ +enum { + cVer = 10 +}; + +typedef struct tourReportStruct { + ulongest_t tour; /* tour serial */ + ulongest_t tourIdHash; /* hash of node ids, computed last tour */ + ulongest_t acNodesVer[cVer]; /* count of nodes of each Ver */ +} *tourReport; + +static void tour_subgraph(tourReport tr_o, struct node_t *node); + +static ulongest_t tourSerial = 0; + +static void tourWorld(tourReport tr_o, mps_addr_t *world, ulongest_t countWorld) +{ + ulongest_t i; + + tourSerial += 1; + tr_o->tour = tourSerial; + tr_o->tourIdHash = 0; + for(i = 0; i < cVer; i++) { + tr_o->acNodesVer[i] = 0; + } + Xprogressf(( "[tour %"SCNuLONGEST"] BEGIN, world: %p, countWorld: %"SCNuLONGEST"\n", + tourSerial, (void *)world, countWorld)); + + for(i = 0; i < countWorld; i++) { + struct node_t *node; + node = (struct node_t *)world[i]; + tour_subgraph(tr_o, node); + tr_o->tourIdHash += i * (node ? DYI_INT(node->tourIdHash_dyi) : 0); + } +} + +static void tour_subgraph(tourReport tr_o, struct node_t *node) +{ + ulongest_t tour; + ulongest_t ver; + ulongest_t id; + struct node_t *left; + struct node_t *right; + ulongest_t tourIdHashLeft; + ulongest_t tourIdHashRight; + ulongest_t tourIdHash; + + Insist(tr_o != NULL); + + /* node == NULL is permitted */ + if(node == NULL) + return; + + tour = tr_o->tour; + if(DYI_INT(node->tour_dyi) == tour) + return; /* already visited */ + + /* this is a newly discovered node */ + Insist(DYI_INT(node->tour_dyi) < tour); + + /* mark as visited */ + node->tour_dyi = INT_DYI(tour); + + /* 'local' idHash = id, used for any re-encounters while computing the computed idHash */ + node->tourIdHash_dyi = node->id_dyi; + + /* record this node in the array of ver counts */ + ver = DYI_INT(node->ver_dyi); + Insist(ver < cVer); + tr_o->acNodesVer[ver] += 1; + + /* tour the subgraphs (NULL is permitted) */ + left = node->left; + right = node->right; + tour_subgraph(tr_o, left); + tour_subgraph(tr_o, right); + + /* computed idHash of subgraph at this node */ + id = DYI_INT(node->id_dyi); + tourIdHashLeft = left ? DYI_INT(left->tourIdHash_dyi) : 0; + tourIdHashRight = right ? DYI_INT(right->tourIdHash_dyi) : 0; + tourIdHash = (13*id + 17*tourIdHashLeft + 19*tourIdHashRight) & DYLAN_UINT_MASK; + Insist(tourIdHash <= DYLAN_UINT_MAX); + node->tourIdHash_dyi = INT_DYI(tourIdHash); + + Insist(DYI_INT(node->tour_dyi) == tour); + Xprogressf(( "[tour %"SCNuLONGEST"] new completed node: %p, ver: %"SCNuLONGEST", tourIdHash: %"SCNuLONGEST"\n", + tour, (void*)node, ver, tourIdHash )); +} + +static struct tourReportStruct trBefore; +static struct tourReportStruct trAfter; + +static void before(mps_addr_t *world, ulongest_t countWorld) +{ + tourWorld(&trBefore, world, countWorld); +} + +static void after(mps_addr_t *world, ulongest_t countWorld, + ulongest_t verOld, + longest_t deltaCVerOld, + ulongest_t verNew, + longest_t deltaCVerNew) +{ + longest_t dCVerOld; + longest_t dCVerNew; + + tourWorld(&trAfter, world, countWorld); + + dCVerOld = ((long)trAfter.acNodesVer[verOld] - (long)trBefore.acNodesVer[verOld]); + dCVerNew = ((long)trAfter.acNodesVer[verNew] - (long)trBefore.acNodesVer[verNew]); + + progressf(("tourWorld: (%"PRIuLONGEST" %"PRIuLONGEST":%"PRIuLONGEST"/%"PRIuLONGEST":%"PRIuLONGEST") -> (%"PRIuLONGEST" %"PRIuLONGEST":%+"PRIdLONGEST"/%"PRIuLONGEST":%+"PRIdLONGEST"), %s\n", + trBefore.tourIdHash, + verOld, + trBefore.acNodesVer[verOld], + verNew, + trBefore.acNodesVer[verNew], + + trAfter.tourIdHash, + verOld, + dCVerOld, + verNew, + dCVerNew, + + trBefore.tourIdHash == trAfter.tourIdHash ? "same" : "XXXXX DIFFERENT XXXXX" + )); + Insist(trBefore.tourIdHash == trAfter.tourIdHash); + Insist(dCVerOld == deltaCVerOld); + Insist(dCVerNew == deltaCVerNew); +} + + +static mps_res_t mps_arena_transform_objects_list(mps_bool_t *transform_done_o, + mps_arena_t mps_arena, + mps_addr_t *old_list, + size_t old_list_count, + mps_addr_t *new_list, + size_t new_list_count) +{ + mps_res_t res; + mps_transform_t transform; + mps_bool_t applied = FALSE; + + Insist(old_list_count == new_list_count); + + res = mps_transform_create(&transform, mps_arena); + if(res == MPS_RES_OK) { + /* We have a transform */ + res = mps_transform_add_oldnew(transform, old_list, new_list, old_list_count); + if(res == MPS_RES_OK) { + res = mps_transform_apply(&applied, transform); + } + Insist(!applied || res == MPS_RES_OK); + mps_transform_destroy(transform); + } + + /* Always set *transform_done_o (even if there is also a non-ResOK */ + /* return code): it is a status report, not a material return. */ + *transform_done_o = applied; + return res; +} + +static void Transform(mps_arena_t arena, mps_ap_t ap) +{ + ulongest_t i; + ulongest_t keepCount = 0; + mps_word_t v; + struct node_t *node; + mps_res_t res; + mps_bool_t transform_done; + ulongest_t old, new; + ulongest_t perset; + + mps_arena_park(arena); + + { + /* Test with sets of pre-built nodes, a known distance apart. + * + * This gives control over whether new nodes are on the same + * segment as the olds or not. + */ + + ulongest_t iPerset; + ulongest_t aPerset[] = {0, 1, 1, 10, 10, 1000, 1000}; + ulongest_t cPerset = NELEMS(aPerset); + ulongest_t stepPerset; + ulongest_t countWorld = 0; + + /* randomize the order of set sizes from aPerset */ + stepPerset = 1 + (rnd() % (cPerset - 1)); + + progressf(("INT_DYI(1): %"PRIuLONGEST"; DYI_INT(5): %"PRIuLONGEST"\n", + (ulongest_t)INT_DYI(1), (ulongest_t)DYI_INT(5) )); + progressf(("Will make and transform sets of old nodes into new nodes. Set sizes: ")); + for(iPerset = stepPerset; + aPerset[iPerset] != 0; + iPerset = (iPerset + stepPerset) % cPerset) { + countWorld += aPerset[iPerset] * 2; /* 2: old + new */ + progressf(("%"PRIuLONGEST", ", aPerset[iPerset])); + } + progressf(("total: %"PRIuLONGEST".\n", countWorld)); + Insist(countWorld <= myrootExactCOUNT); + + keepCount = 0; + + for(iPerset = stepPerset; + aPerset[iPerset] != 0; + iPerset = (iPerset + stepPerset) % cPerset) { + ulongest_t j; + ulongest_t first; + ulongest_t skip; + ulongest_t count; + + perset = aPerset[iPerset]; + first = keepCount; + skip = 0; + count = perset; + progressf(("perset: %"PRIuLONGEST", first: %"PRIuLONGEST"\n", perset, first)); + + /* Make a set of olds, and a set of news */ + for(j = 0; j < 2 * perset; j++) { + ulongest_t slots = (sizeof(struct node_t) / sizeof(mps_word_t)) - 2; + /* make_dylan_vector: fills slots with INT_DYI(0) */ + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + node = (struct node_t *)v; + node->serial_dyi = INT_DYI(serial++); + node->id_dyi = INT_DYI(j % perset); + node->ver_dyi = INT_DYI(1 + (j >= perset)); + node->left = NULL; + node->right = NULL; + node->tour_dyi = INT_DYI(tourSerial); + node->tourIdHash_dyi = INT_DYI(0); + myrootExact[keepCount++ % myrootExactCOUNT] = (void*)v; + get(arena); + /*printf("Object %"PRIuLONGEST" at %p.\n", keepCount, (void*)v);*/ + } + v = 0; + + /* >=10? pick subset */ + if(perset >= 10) { + /* subset of [first..first+perset) */ + + skip = (rnd() % (2 * perset)); + if(skip > (perset - 1)) + skip = 0; + + count = 1 + rnd() % (2 * (perset - skip)); + if(skip + count > perset) + count = perset - skip; + + Insist(skip < perset); + Insist(count >= 1); + Insist(skip + count <= perset); + } + + /* >=10? sometimes build tree */ + if(perset >= 10 && count >= 4 && rnd() % 2 == 0) { + void **oldNodes = &myrootExact[first + skip]; + void **newNodes = &myrootExact[first + skip + perset]; + progressf(("Building tree in %"PRIuLONGEST" nodes.\n", count)); + for(j = 1; (2 * j) + 1 < count; j++) { + /* You might be tempted to lift some of these gnarly casts + into local variables, but if you do you will probably + create ambiguous references in stack slots and prevent + the transform from succeeding, as observed in Git commit + a7ebcbdf0. */ + ((struct node_t *)oldNodes[j])->left = oldNodes[2 * j]; + ((struct node_t *)oldNodes[j])->right = oldNodes[(2 * j) + 1]; + ((struct node_t *)newNodes[j])->left = newNodes[2 * j]; + ((struct node_t *)newNodes[j])->right = newNodes[(2 * j) + 1]; + } + } + + /* transform {count} olds into {count} news */ + before(myrootExact, countWorld); + /* after(myrootExact, countWorld, 1, 0, 2, 0); */ + progressf(("Transform [%"PRIuLONGEST"..%"PRIuLONGEST") to [%"PRIuLONGEST"..%"PRIuLONGEST").\n", + first + skip, first + skip + count, first + skip + perset, first + skip + count + perset)); + res = mps_arena_transform_objects_list(&transform_done, arena, + &myrootExact[first + skip], count, + &myrootExact[first + skip + perset], count); + Insist(res == MPS_RES_OK); + Insist(transform_done); + /* Olds decrease; news were in world already so don't increase. */ + after(myrootExact, countWorld, 1, -(longest_t)count, 2, 0); + } + } + + { + /* Transforming in various situations + * + * First, make two sets of 1024 nodes. + */ + + perset = 1024; + Insist(2*perset < myrootExactCOUNT); + for(keepCount = 0; keepCount < 2*perset; keepCount++) { + ulongest_t slots = (sizeof(struct node_t) / sizeof(mps_word_t)) - 2; + /* make_dylan_vector: fills slots with INT_DYI(0) */ + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + node = (struct node_t *)v; + node->serial_dyi = INT_DYI(serial++); + node->id_dyi = INT_DYI(keepCount % perset); + node->ver_dyi = INT_DYI(1 + (keepCount >= perset)); + node->left = NULL; + node->right = NULL; + myrootExact[keepCount % myrootExactCOUNT] = (void*)v; + get(arena); + /* printf("Object %u at %p.\n", keepCount, (void*)v); */ + } + v = 0; + + + /* Functions before() and after() checksum the world, and verify + * that the expected transform occurred. + */ + before(myrootExact, perset); + after(myrootExact, perset, 1, 0, 2, 0); + + /* Don't transform node 0: its ref coincides with a segbase, so + * there are probably ambiguous refs to it on the stack. + * Don't transform last node either: this test code may leave an + * ambiguous reference to it on the stack. + */ + + /* Refs in root */ + /* ============ */ + old = 1; + new = 1 + perset; + Insist(myrootExact[old] != myrootExact[new]); + before(myrootExact, perset); + res = mps_arena_transform_objects_list(&transform_done, arena, &myrootExact[old], 1, &myrootExact[new], 1); + Insist(res == MPS_RES_OK); + Insist(transform_done); + Insist(myrootExact[old] == myrootExact[new]); + after(myrootExact, perset, 1, -1, 2, +1); + + /* Refs in root: ambiguous ref causes failure */ + /* ========================================== */ + old = 2; + new = 2 + perset; + Insist(myrootExact[old] != myrootExact[new]); + /* Make an ambiguous reference. This must make the transform fail. */ + myrootAmbig[1] = myrootExact[old]; + before(myrootExact, perset); + res = mps_arena_transform_objects_list(&transform_done, arena, &myrootExact[old], 1, &myrootExact[new], 1); + Insist(res == MPS_RES_OK); + Insist(!transform_done); + Insist(myrootExact[old] != myrootExact[new]); + after(myrootExact, perset, 1, 0, 2, 0); + + /* Ref in an object */ + /* ================ */ + old = 3; + new = 3 + perset; + node = myrootExact[4]; + progressf(("node: %p\n", (void *)node)); + node->left = myrootExact[old]; + Insist(myrootExact[old] != myrootExact[new]); + before(myrootExact, perset); + res = mps_arena_transform_objects_list(&transform_done, arena, &myrootExact[old], 1, &myrootExact[new], 1); + Insist(res == MPS_RES_OK); + Insist(transform_done); + Insist(myrootExact[old] == myrootExact[new]); + after(myrootExact, perset, 1, -1, 2, +1); + } + + { + /* Tests with mps_transform_t + * + * **** USES OBJECTS CREATED IN PREVIOUS TEST GROUP **** + */ + + mps_transform_t t1; + mps_transform_t t2; + mps_bool_t applied = FALSE; + ulongest_t k, l; + mps_addr_t nullref1 = NULL; + mps_addr_t nullref2 = NULL; + + k = 9; /* start with this object (in set of 1024) */ + + /* Destroy */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + mps_transform_destroy(t1); + t1 = NULL; + + /* Empty (no add) */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + res = mps_transform_apply(&applied, t1); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t1); + t1 = NULL; + after(myrootExact, perset, 1, 0, 2, 0); + + /* Identity-transform */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + for(l = k + 4; k < l; k++) { + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k], 1); + } + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k], 10); + k += 10; + res = mps_transform_apply(&applied, t1); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t1); + t1 = NULL; + after(myrootExact, perset, 1, 0, 2, 0); + + /* Mixed non-trivial, NULL- and identity-transforms */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + { + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 1); + k += 1; + /* NULL */ + mps_transform_add_oldnew(t1, &nullref1, &myrootExact[k + perset], 1); + k += 1; + /* identity */ + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k], 1); + k += 1; + /* NULL */ + mps_transform_add_oldnew(t1, &nullref2, &myrootExact[k + perset], 1); + k += 1; + } + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 10); + k += 10; + res = mps_transform_apply(&applied, t1); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t1); + t1 = NULL; + after(myrootExact, perset, 1, -11, 2, +11); + + /* Non-trivial transform */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + for(l = k + 4; k < l; k++) { + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 1); + } + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 10); + k += 10; + res = mps_transform_apply(&applied, t1); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t1); + t1 = NULL; + after(myrootExact, perset, 1, -14, 2, +14); + + /* Two transforms, first destroyed unused */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 10); + k += 10; + l = k; + res = mps_transform_create(&t2, arena); + Insist(res == MPS_RES_OK); + mps_transform_add_oldnew(t2, &myrootExact[l], &myrootExact[l + perset], 10); + l += 10; + res = mps_transform_apply(&applied, t2); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t2); + t2 = NULL; + mps_transform_destroy(t1); + t1 = NULL; + after(myrootExact, perset, 1, -10, 2, +10); + + /* Two transforms, both live [-- not supported yet. RHSK 2010-12-16] */ + before(myrootExact, perset); + res = mps_transform_create(&t1, arena); + Insist(res == MPS_RES_OK); + mps_transform_add_oldnew(t1, &myrootExact[k], &myrootExact[k + perset], 10); + k += 10; + l = k; + res = mps_transform_create(&t2, arena); + Insist(res == MPS_RES_OK); + mps_transform_add_oldnew(t2, &myrootExact[l], &myrootExact[l + perset], 10); + l += 10; + res = mps_transform_apply(&applied, t2); + Insist(res == MPS_RES_OK); + Insist(applied); + mps_transform_destroy(t2); + t2 = NULL; + /* TODO: This test block does not destroy t1. Why is that? RB 2023-06-16 */ + k = l; + after(myrootExact, perset, 1, -10, 2, +10); + } + + /* Large number of objects */ + { + ulongest_t count; + mps_transform_t t; + mps_bool_t applied; + + /* LARGE! */ + perset = myrootExactCOUNT / 2; + + Insist(2*perset <= myrootExactCOUNT); + for(keepCount = 0; keepCount < 2*perset; keepCount++) { + ulongest_t slots = (sizeof(struct node_t) / sizeof(mps_word_t)) - 2; + /* make_dylan_vector: fills slots with INT_DYI(0) */ + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + node = (struct node_t *)v; + node->serial_dyi = INT_DYI(serial++); + node->id_dyi = INT_DYI(keepCount % perset); + node->ver_dyi = INT_DYI(1 + (keepCount >= perset)); + node->left = NULL; + node->right = NULL; + myrootExact[keepCount % myrootExactCOUNT] = (void*)v; + get(arena); + /* printf("Object %u at %p.\n", keepCount, (void*)v); */ + } + v = 0; + + /* Refs in root */ + /* ============ */ + /* don't transform 0: its ref coincides with a segbase, so causes ambig refs on stack */ + /* don't transform last: its ambig ref may be left on the stack */ + old = 1; + new = 1 + perset; + count = perset - 2; + Insist(myrootExact[old] != myrootExact[new]); + before(myrootExact, perset); + res = mps_transform_create(&t, arena); + Insist(res == MPS_RES_OK); + for(i = 0; i < count; i++) { + res = mps_transform_add_oldnew(t, &myrootExact[old + i], &myrootExact[new + i], 1); + Insist(res == MPS_RES_OK); + } + res = mps_transform_apply(&applied, t); + Insist(applied); + mps_transform_destroy(t); + Insist(myrootExact[old] == myrootExact[new]); + after(myrootExact, perset, 1, -(longest_t)count, 2, +(longest_t)count); + } + + printf(" ...made and kept: %"PRIuLONGEST" objects.\n", + keepCount); +} + + +static ulongest_t cols(size_t bytes) +{ + double M; /* Mebibytes */ + ulongest_t cM; /* hundredths of a Mebibyte */ + + M = (double)bytes / (1UL<<20); + cM = (ulongest_t)(M * 100 + 0.5); /* round to nearest */ + return cM; +} + +/* showStatsAscii -- present collection stats, 'graphically' + * + */ +static void showStatsAscii(size_t notcon, size_t con, size_t live, size_t alimit) +{ + ulongest_t n = cols(notcon); + ulongest_t c = cols(notcon + con); + ulongest_t l = cols(notcon + live); /* a fraction of con */ + ulongest_t a = cols(alimit); + ulongest_t count; + ulongest_t i; + + /* if we can show alimit within 200 cols, do so */ + count = (a < 200) ? a + 1 : c; + + for(i = 0; i < count; i++) { + printf( (i == a) ? "A" + : (i < n) ? "n" + : (i < l) ? "L" + : (i < c) ? "_" + : " " + ); + } + printf("\n"); +} + + +/* print_M -- print count of bytes as Mebibytes or Megabytes + * + * Print as a whole number, "m" for the decimal point, and + * then the decimal fraction. + * + * Input: 208896 + * Output: (Mebibytes) 0m199 + * Output: (Megabytes) 0m209 + */ +#if 0 +#define bPerM (1UL << 20) /* Mebibytes */ +#else +#define bPerM (1000000UL) /* Megabytes */ +#endif +static void print_M(size_t bytes) +{ + size_t M; /* M thingies */ + double Mfrac; /* fraction of an M thingy */ + + M = bytes / bPerM; + Mfrac = (double)(bytes % bPerM); + Mfrac = (Mfrac / bPerM); + + printf("%1"PRIuLONGEST"m%03.f", (ulongest_t)M, Mfrac * 1000); +} + + +/* showStatsText -- present collection stats + * + * prints: + * Coll End 0m137[->0m019 14%-live] (0m211-not ) + */ +static void showStatsText(size_t notcon, size_t con, size_t live) +{ + double liveFrac = (double)live / (double)con; + + print_M(con); + printf("[->"); + print_M(live); + printf("% 3.f%%-live]", liveFrac * 100); + printf(" ("); + print_M(notcon); + printf("-not "); + printf(")\n"); +} + +/* get -- get messages + * + */ +static void get(mps_arena_t arena) +{ + mps_message_type_t type; + + while (mps_message_queue_type(&type, arena)) { + mps_message_t message; + static mps_clock_t mclockBegin = 0; + static mps_clock_t mclockEnd = 0; + mps_word_t *obj; + mps_word_t objind; + mps_addr_t objaddr; + + cdie(mps_message_get(&message, arena, type), + "get"); + + switch(type) { + case mps_message_type_gc_start(): { + mclockBegin = mps_message_clock(arena, message); + printf(" %5"PRIuLONGEST": (%5"PRIuLONGEST")", + mclockBegin, mclockBegin - mclockEnd); + printf(" Coll Begin (%s)\n", + mps_message_gc_start_why(arena, message)); + break; + } + case mps_message_type_gc(): { + size_t con = mps_message_gc_condemned_size(arena, message); + size_t notcon = mps_message_gc_not_condemned_size(arena, message); + /* size_t other = 0; -- cannot determine; new method reqd */ + size_t live = mps_message_gc_live_size(arena, message); + size_t alimit = mps_arena_reserved(arena); + + mclockEnd = mps_message_clock(arena, message); + + printf(" %5"PRIuLONGEST": (%5"PRIuLONGEST")", + mclockEnd, mclockEnd - mclockBegin); + printf(" Coll End "); + showStatsText(notcon, con, live); + if(rnd()==0) showStatsAscii(notcon, con, live, alimit); + break; + } + case mps_message_type_finalization(): { + mps_message_finalization_ref(&objaddr, arena, message); + obj = objaddr; + objind = DYLAN_INT_INT(DYLAN_VECTOR_SLOT(obj, 0)); + printf(" Finalization for object %"PRIuLONGEST" at %p\n", (ulongest_t)objind, objaddr); + break; + } + default: { + cdie(0, "message type"); + break; + } + } + + mps_message_discard(arena, message); + } +} + + +/* .catalog: The Catalog client: + * + * This is an MPS client for testing the MPS. It simulates + * converting a multi-page "Catalog" document from a page-description + * into a bitmap. + * + * The intention is that this task will cause memory usage that is + * fairly realistic (much more so than randomly allocated objects + * with random interconnections. The patterns in common with real + * clients are: + * - the program input and its task are 'fractal', with a + * self-similar hierarchy; + * - object allocation is prompted by each successive element of + * the input/task; + * - objects are often used to store a transformed version of the + * program input; + * - there may be several stages of transformation; + * - at each stage, the old object (holding the untransformed data) + * may become dead; + * - sometimes a tree of objects becomes dead once an object at + * some level of the hierarchy has been fully processed; + * - there is more than one hierarchy, and objects in different + * hierarchies interact. + * + * The entity-relationship diagram is: + * Catalog -< Page -< Article -< Polygon + * v + * | + * Palette --------------------< Colour + * + * The first hierarchy is a Catalog, containing Pages, each + * containing Articles (bits of artwork etc), each composed of + * Polygons. Each polygon has a single colour. + * + * The second hierarchy is a top-level Palette, containing Colours. + * Colours (in this client) are expensive, large objects (perhaps + * because of complex colour modelling or colour blending). + * + * The things that matter for their effect on MPS behaviour are: + * - when objects are allocated, and how big they are; + * - how the reference graph mutates over time; + * - how the mutator accesses objects (barrier hits). + */ + +enum { + CatalogRootIndex = 0, + CatalogSig = 0x0000CA2A, /* CATAlog */ + CatalogFix = 1, + CatalogVar = 10, + PageSig = 0x0000BA9E, /* PAGE */ + PageFix = 1, + PageVar = 100, + ArtSig = 0x0000A621, /* ARTIcle */ + ArtFix = 1, + ArtVar = 100, + PolySig = 0x0000B071, /* POLYgon */ + PolyFix = 1, + PolyVar = 100 +}; + +static void CatalogCheck(void) +{ + mps_word_t w; + void *Catalog, *Page, *Art, *Poly; + ulongest_t Catalogs = 0, Pages = 0, Arts = 0, Polys = 0; + int i, j, k; + + /* retrieve Catalog from root */ + Catalog = myrootExact[CatalogRootIndex]; + if(!Catalog) + return; + Insist(DYLAN_VECTOR_SLOT(Catalog, 0) == DYLAN_INT(CatalogSig)); + Catalogs += 1; + + for(i = 0; i < CatalogVar; i += 1) { + /* retrieve Page from Catalog */ + w = DYLAN_VECTOR_SLOT(Catalog, CatalogFix + i); + /* printf("Page = 0x%8x\n", (unsigned int) w); */ + if(w == DYLAN_INT(0)) + break; + Page = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Page, 0) == DYLAN_INT(PageSig)); + Pages += 1; + + for(j = 0; j < PageVar; j += 1) { + /* retrieve Art from Page */ + w = DYLAN_VECTOR_SLOT(Page, PageFix + j); + if(w == DYLAN_INT(0)) + break; + Art = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Art, 0) == DYLAN_INT(ArtSig)); + Arts += 1; + + for(k = 0; k < ArtVar; k += 1) { + /* retrieve Poly from Art */ + w = DYLAN_VECTOR_SLOT(Art, ArtFix + k); + if(w == DYLAN_INT(0)) + break; + Poly = (void *)w; + Insist(DYLAN_VECTOR_SLOT(Poly, 0) == DYLAN_INT(PolySig)); + Polys += 1; + } + } + } + printf("Catalog ok with: Catalogs: %"PRIuLONGEST", Pages: %"PRIuLONGEST", Arts: %"PRIuLONGEST", Polys: %"PRIuLONGEST".\n", + Catalogs, Pages, Arts, Polys); +} + + +/* CatalogDo -- make a Catalog and its tree of objects + * + * .catalog.broken: this code, when compiled with + * moderate optimization, may have ambiguous interior pointers but + * lack corresponding ambiguous base pointers to MPS objects. This + * means the interior pointers are unmanaged references, and the + * code goes wrong. The hack in poolamc.c#4 cures this, but not very + * nicely. For further discussion, see: + * + */ +static void CatalogDo(mps_arena_t arena, mps_ap_t ap) +{ + mps_word_t v; + void *Catalog, *Page, *Art, *Poly; + int i, j, k; + + die(make_dylan_vector(&v, ap, CatalogFix + CatalogVar), "Catalog"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(CatalogSig); + Catalog = (void *)v; + + /* store Catalog in root */ + myrootExact[CatalogRootIndex] = Catalog; + get(arena); + + fflush(stdout); + CatalogCheck(); + + for(i = 0; i < CatalogVar; i += 1) { + die(make_dylan_vector(&v, ap, PageFix + PageVar), "Page"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(PageSig); + Page = (void *)v; + + /* store Page in Catalog */ + DYLAN_VECTOR_SLOT(Catalog, CatalogFix + i) = (mps_word_t)Page; + get(arena); + + printf("Page %d: make articles\n", i); + fflush(stdout); + + for(j = 0; j < PageVar; j += 1) { + die(make_dylan_vector(&v, ap, ArtFix + ArtVar), "Art"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(ArtSig); + Art = (void *)v; + + /* store Art in Page */ + DYLAN_VECTOR_SLOT(Page, PageFix + j) = (mps_word_t)Art; + get(arena); + + for(k = 0; k < ArtVar; k += 1) { + die(make_dylan_vector(&v, ap, PolyFix + PolyVar), "Poly"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(PolySig); + Poly = (void *)v; + + /* store Poly in Art */ + DYLAN_VECTOR_SLOT(Art, ArtFix + k) = (mps_word_t)Poly; + /* get(arena); */ + } + } + } + fflush(stdout); + CatalogCheck(); +} + + +/* MakeThing -- make an object of the size requested (in bytes) + * + * Any size is accepted. MakeThing may round it up (MakeThing always + * makes a dylan vector, which has a minimum size of 8 bytes). Vector + * slots, if any, are initialized to DYLAN_INT(0). + * + * After making the object, calls get(), to retrieve MPS messages. + * + * make_dylan_vector [fmtdytst.c] says: + * size = (slots + 2) * sizeof(mps_word_t); + * That is: a dylan vector has two header words before the first slot. + */ +static void* MakeThing(mps_arena_t arena, mps_ap_t ap, size_t size) +{ + mps_word_t v; + ulongest_t words; + ulongest_t slots; + + words = (size + (sizeof(mps_word_t) - 1) ) / sizeof(mps_word_t); + if(words < 2) + words = 2; + + slots = words - 2; + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + get(arena); + + return (void *)v; +} + +static void BigdropSmall(mps_arena_t arena, mps_ap_t ap, size_t big, char small_ref) +{ + static ulongest_t keepCount = 0; + ulongest_t i; + + mps_arena_park(arena); + for(i = 0; i < 100; i++) { + (void) MakeThing(arena, ap, big); + if(small_ref == 'A') { + myrootAmbig[keepCount++ % myrootAmbigCOUNT] = MakeThing(arena, ap, 1); + } else if(small_ref == 'E') { + myrootExact[keepCount++ % myrootExactCOUNT] = MakeThing(arena, ap, 1); + } else { + cdie(0, "BigdropSmall: small must be 'A' or 'E'.\n"); + } + } +} + + +/* df -- diversity function + * + * Either deterministic based on "number", or 'random' (ie. call rnd). + */ + +static ulongest_t df(unsigned randm, ulongest_t number) +{ + if(randm == 0) { + return number; + } else { + return rnd(); + } +} + +static void Make(mps_arena_t arena, mps_ap_t ap, unsigned randm, unsigned keep1in, unsigned keepTotal, unsigned keepRootspace, unsigned sizemethod) +{ + unsigned keepCount = 0; + ulongest_t objCount = 0; + + Insist(keepRootspace <= myrootExactCOUNT); + + objCount = 0; + while(keepCount < keepTotal) { + mps_word_t v; + unsigned slots = 2; /* minimum */ + switch(sizemethod) { + case 0: { + /* minimum */ + slots = 2; + break; + } + case 1: { + slots = 2; + if(df(randm, objCount) % 10000 == 0) { + printf("*"); + slots = 300000; + } + break; + } + case 2: { + slots = 2; + if(df(randm, objCount) % 6661 == 0) { /* prime */ + printf("*"); + slots = 300000; + } + break; + } + default: { + printf("bad script command: sizemethod %u unknown.\n", sizemethod); + cdie(FALSE, "bad script command!"); + break; + } + } + die(make_dylan_vector(&v, ap, slots), "make_dylan_vector"); + DYLAN_VECTOR_SLOT(v, 0) = DYLAN_INT(objCount); + DYLAN_VECTOR_SLOT(v, 1) = (mps_word_t)NULL; + objCount++; + if(df(randm, objCount) % keep1in == 0) { + /* keep this one */ + myrootExact[df(randm, keepCount) % keepRootspace] = (void*)v; + keepCount++; + } + get(arena); + } + printf(" ...made and kept: %u objects, storing cyclically in " + "first %u roots " + "(actually created %"PRIuLONGEST" objects, in accord with " + "keep-1-in %u).\n", + keepCount, keepRootspace, objCount, keep1in); +} + + +static void Rootdrop(char rank_char) +{ + ulongest_t i; + + if(rank_char == 'A') { + for(i = 0; i < myrootAmbigCOUNT; ++i) { + myrootAmbig[i] = NULL; + } + } else if(rank_char == 'E') { + for(i = 0; i < myrootExactCOUNT; ++i) { + myrootExact[i] = NULL; + } + } else { + cdie(0, "Rootdrop: rank must be 'A' or 'E'.\n"); + } +} + +#if 0 +#define stackwipedepth 50000 +static void stackwipe(void) +{ + unsigned iw; + ulongest_t aw[stackwipedepth]; + + /* http://xkcd.com/710/ */ + /* I don't want my friends to stop calling; I just want the */ + /* compiler to stop optimising away my code. */ + + /* Do you ever get two even numbers next to each other? Hmmmm :-) */ + for(iw = 0; iw < stackwipedepth; iw++) { + if((iw & 1) == 0) { + aw[iw] = 1; + } else { + aw[iw] = 0; + } + } + for(iw = 1; iw < stackwipedepth; iw++) { + if(aw[iw - 1] + aw[iw] != 1) { + printf("Errrr....\n"); + break; + } + } +} +#endif + +static void StackScan(mps_arena_t arena, int on) +{ + if(on) { + Insist(root_stackreg == NULL); + die(mps_root_create_reg(&root_stackreg, arena, + mps_rank_ambig(), (mps_rm_t)0, stack_thr, + mps_stack_scan_ambig, stack_start, 0), + "root_stackreg"); + Insist(root_stackreg != NULL); + } else { + Insist(root_stackreg != NULL); + mps_root_destroy(root_stackreg); + root_stackreg = NULL; + Insist(root_stackreg == NULL); + } +} + + +/* checksi -- check count of sscanf items is correct + */ + +static void checksi(int si, int si_shouldBe, const char *script, const char *scriptAll) +{ + if(si != si_shouldBe) { + printf("bad script command (sscanf found wrong number of params) %s (full script %s).\n", script, scriptAll); + cdie(FALSE, "bad script command!"); + } +} + +/* testscriptC -- actually runs a test script + * + */ +static void testscriptC(mps_arena_t arena, mps_ap_t ap, const char *script) +{ + const char *scriptAll = script; + int si, sb; /* sscanf items, sscanf bytes */ + + while(*script != '\0') { + switch(*script) { + case 'C': { + si = sscanf(script, "Collect%n", + &sb); + checksi(si, 0, script, scriptAll); + script += sb; + printf(" Collect\n"); + /* stackwipe(); */ + mps_arena_collect(arena); + mps_arena_release(arena); + break; + } + case 'T': { + si = sscanf(script, "Transform%n", + &sb); + checksi(si, 0, script, scriptAll); + script += sb; + printf(" Transform\n"); + Transform(arena, ap); + break; + } + case 'K': { + si = sscanf(script, "Katalog()%n", + &sb); + checksi(si, 0, script, scriptAll); + script += sb; + printf(" Katalog()\n"); + CatalogDo(arena, ap); + break; + } + case 'B': { + ulongest_t big = 0; + char small_ref = ' '; + si = sscanf(script, "BigdropSmall(big %"SCNuLONGEST", small %c)%n", + &big, &small_ref, &sb); + checksi(si, 2, script, scriptAll); + script += sb; + printf(" BigdropSmall(big %"PRIuLONGEST", small %c)\n", big, small_ref); + BigdropSmall(arena, ap, big, small_ref); + break; + } + case 'M': { + unsigned randm = 0; + unsigned keep1in = 0; + unsigned keepTotal = 0; + unsigned keepRootspace = 0; + unsigned sizemethod = 0; + si = sscanf(script, "Make(random %u, keep-1-in %u, keep %u, rootspace %u, sizemethod %u)%n", + &randm, &keep1in, &keepTotal, &keepRootspace, &sizemethod, &sb); + checksi(si, 5, script, scriptAll); + script += sb; + printf(" Make(random %u, keep-1-in %u, keep %u, rootspace %u, sizemethod %u).\n", + randm, keep1in, keepTotal, keepRootspace, sizemethod); + Make(arena, ap, randm, keep1in, keepTotal, keepRootspace, sizemethod); + break; + } + case 'R': { + char drop_ref = ' '; + si = sscanf(script, "Rootdrop(rank %c)%n", + &drop_ref, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" Rootdrop(rank %c)\n", drop_ref); + Rootdrop(drop_ref); + break; + } + case 'S': { + unsigned on = 0; + si = sscanf(script, "StackScan(%u)%n", + &on, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" StackScan(%u)\n", on); + StackScan(arena, on != 0); + break; + } + case 'Z': { + ulongest_t s0; + si = sscanf(script, "ZRndStateSet(%"SCNuLONGEST")%n", + &s0, &sb); + checksi(si, 1, script, scriptAll); + script += sb; + printf(" ZRndStateSet(%"PRIuLONGEST")\n", s0); + rnd_state_set((unsigned long)s0); + break; + } + case ' ': + case ',': + case '.': { + script++; + break; + } + default: { + printf("unknown script command '%c' (script %s).\n", + *script, scriptAll); + cdie(FALSE, "unknown script command!"); + return; + } + } + get(arena); + } + +} + + +/* testscriptB -- create pools and objects; call testscriptC + * + * Is called via mps_tramp, so matches mps_tramp_t function prototype, + * and use trampDataStruct to pass parameters. + */ + +typedef struct trampDataStruct { + mps_arena_t arena; + mps_thr_t thr; + const char *script; +} trampDataStruct; + +static void *testscriptB(void *arg, size_t s) +{ + trampDataStruct trampData; + mps_arena_t arena; + mps_thr_t thr; + const char *script; + mps_fmt_t fmt; + mps_chain_t chain; + mps_pool_t amc; + int i; + mps_root_t root_table_Ambig; + mps_root_t root_table_Exact; + mps_ap_t ap; + void *stack_starts_here; /* stack scanning starts here */ + + Insist(s == sizeof(trampDataStruct)); + trampData = *(trampDataStruct*)arg; + arena = trampData.arena; + thr = trampData.thr; + script = trampData.script; + + die(mps_fmt_create_A(&fmt, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, genCOUNT, testChain), "chain_create"); + die(mps_pool_create(&amc, arena, mps_class_amc(), fmt, chain), + "pool_create amc"); + + for(i = 0; i < myrootAmbigCOUNT; ++i) { + myrootAmbig[i] = NULL; + } + die(mps_root_create_table(&root_table_Ambig, arena, mps_rank_ambig(), (mps_rm_t)0, + myrootAmbig, (size_t)myrootAmbigCOUNT), + "root_create - ambig"); + + for(i = 0; i < myrootExactCOUNT; ++i) { + myrootExact[i] = NULL; + } + die(mps_root_create_table(&root_table_Exact, arena, mps_rank_exact(), (mps_rm_t)0, + myrootExact, (size_t)myrootExactCOUNT), + "root_create - exact"); + + die(mps_ap_create(&ap, amc, mps_rank_exact()), "ap_create"); + + /* root_stackreg: stack & registers are ambiguous roots = mutator's workspace */ + stack_start = &stack_starts_here; + stack_thr = thr; + die(mps_root_create_reg(&root_stackreg, arena, + mps_rank_ambig(), (mps_rm_t)0, stack_thr, + mps_stack_scan_ambig, stack_start, 0), + "root_stackreg"); + + + mps_message_type_enable(arena, mps_message_type_gc_start()); + mps_message_type_enable(arena, mps_message_type_gc()); + mps_message_type_enable(arena, mps_message_type_finalization()); + + testscriptC(arena, ap, script); + + printf(" Destroy roots, pools, arena etc.\n\n"); + mps_root_destroy(root_stackreg); + mps_ap_destroy(ap); + mps_root_destroy(root_table_Exact); + mps_root_destroy(root_table_Ambig); + mps_pool_destroy(amc); + mps_chain_destroy(chain); + mps_fmt_destroy(fmt); + + return NULL; +} + + +/* testscriptA -- create arena, thr; call testscriptB + */ +static void testscriptA(const char *script) +{ + mps_arena_t arena; + int si, sb; /* sscanf items, sscanf bytes */ + ulongest_t arenasize = 0; + mps_thr_t thr; + trampDataStruct trampData; + + si = sscanf(script, "Arena(size %"SCNuLONGEST")%n", &arenasize, &sb); + cdie(si == 1, "bad script command: Arena(size %%"PRIuLONGEST")"); + script += sb; + printf(" Create arena, size = %"PRIuLONGEST".\n", arenasize); + + /* arena */ + die(mps_arena_create(&arena, mps_arena_class_vm(), arenasize), + "arena_create"); + + /* thr: used to stop/restart multiple threads */ + die(mps_thread_reg(&thr, arena), "thread"); + + /* call testscriptB! */ + trampData.arena = arena; + trampData.thr = thr; + trampData.script = script; + testscriptB(&trampData, sizeof trampData); + + mps_thread_dereg(thr); + mps_arena_destroy(arena); +} + + +/* main -- runs various test scripts + * + */ +int main(int argc, char *argv[]) +{ + randomize(argc, argv); + mps_lib_assert_fail_install(assert_die); + + /* 1<<19 == 524288 == 1/2 Mebibyte */ + /* 16<<20 == 16777216 == 16 Mebibyte */ + + testscriptA("Arena(size 500000000), " + "Transform" + /*", Collect, Rootdrop(rank E), Collect, Collect"*/ + "."); + + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2011-2022 Ravenbrook Limited . + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/configure b/mps/configure new file mode 100755 index 00000000000..9f06a0dc115 --- /dev/null +++ b/mps/configure @@ -0,0 +1,4860 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for Memory Pool System Kit release/1.118.0. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: mps-questions@ravenbrook.com about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='Memory Pool System Kit' +PACKAGE_TARNAME='mps-kit' +PACKAGE_VERSION='release/1.118.0' +PACKAGE_STRING='Memory Pool System Kit release/1.118.0' +PACKAGE_BUGREPORT='mps-questions@ravenbrook.com' +PACKAGE_URL='https://www.ravenbrook.com/project/mps/' + +ac_unique_file="code/mps.c" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +EXTRA_TARGETS +TEST_TARGET +INSTALL_TARGET +CLEAN_TARGET +BUILD_TARGET +MPS_BUILD_NAME +MPS_ARCH_NAME +MPS_OS_NAME +MAKE +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +EGREP +GREP +CPP +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures Memory Pool System Kit release/1.118.0 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/mps-kit] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of Memory Pool System Kit release/1.118.0:";; + esac + cat <<\_ACEOF + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CPP C preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +Memory Pool System Kit home page: . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +Memory Pool System Kit configure release/1.118.0 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES +# --------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_c_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_decl + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_c_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ------------------------------------------- ## +## Report this to mps-questions@ravenbrook.com ## +## ------------------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by Memory Pool System Kit $as_me release/1.118.0, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_aux_dir= +for ac_dir in tool/autoconf/build-aux "$srcdir"/tool/autoconf/build-aux; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in tool/autoconf/build-aux \"$srcdir\"/tool/autoconf/build-aux" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + + + +# Checks for programs. +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + + +# Check for Clang/LLVM. + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +ac_fn_c_check_decl "$LINENO" "__clang__" "ac_cv_have_decl___clang__" "$ac_includes_default" +if test "x$ac_cv_have_decl___clang__" = xyes; then : + CLANG=yes +else + CLANG=no +fi + + +# These flags aren't used for building the MPS, but for sample programs. +CFLAGS_GC="-ansi -pedantic -Wall -Werror -Wpointer-arith \ + -Wstrict-prototypes -Wmissing-prototypes \ + -Winline -Waggregate-return -Wnested-externs \ + -Wcast-qual -Wstrict-aliasing=2 -O -g3 -pthread" +CFLAGS_LL="$CFLAGS_GC" + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target platform" >&5 +$as_echo_n "checking target platform... " >&6; } +BUILD_TARGET=build-via-make +CLEAN_TARGET=clean-make-build +INSTALL_TARGET=install-make-build +TEST_TARGET=test-make-build +case $host/$CLANG in + aarch64-*-linux*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux ARM64" >&5 +$as_echo "Linux ARM64" >&6; } + MPS_OS_NAME=li + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + aarch64-*-linux*/yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux ARM64" >&5 +$as_echo "Linux ARM64" >&6; } + MPS_OS_NAME=li + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=ll + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-linux*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86" >&5 +$as_echo "Linux x86" >&6; } + MPS_OS_NAME=li + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + x86_64-*-linux*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86_64" >&5 +$as_echo "Linux x86_64" >&6; } + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + x86_64-*-linux*/yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86_64" >&5 +$as_echo "Linux x86_64" >&6; } + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + PFMCFLAGS="$CFLAGS_LL" + ;; + aarch64-*-darwin*/*) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Mac OS X ARM64" >&5 +$as_echo "Mac OS X ARM64" >&6; } + MPS_OS_NAME=xc + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-darwin*/*) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Mac OS X x86" >&5 +$as_echo "Mac OS X x86" >&6; } + MPS_OS_NAME=xc + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + x86_64-apple-darwin*/*) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Mac OS X x86_64" >&5 +$as_echo "Mac OS X x86_64" >&6; } + MPS_OS_NAME=xc + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-freebsd*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86" >&5 +$as_echo "FreeBSD x86" >&6; } + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + amd64-*-freebsd*/yes | x86_64-*-freebsd*/yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5 +$as_echo "FreeBSD x86_64" >&6; } + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + amd64-*-freebsd*/no | x86_64-*-freebsd*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5 +$as_echo "FreeBSD x86_64" >&6; } + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + i*86-*-freebsd*/yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86" >&5 +$as_echo "FreeBSD x86" >&6; } + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_LL" + ;; + amd64-*-freebsd*/no | x86_64-*-freebsd*/no) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5 +$as_echo "FreeBSD x86_64" >&6; } + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_LL" + ;; + *) + as_fn_error $? "MPS does not support this platform out of the box. See manual/build.txt" "$LINENO" 5 +esac + +for ac_prog in gnumake gmake make +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MAKE+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MAKE"; then + ac_cv_prog_MAKE="$MAKE" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_MAKE="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MAKE=$ac_cv_prog_MAKE +if test -n "$MAKE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAKE" >&5 +$as_echo "$MAKE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$MAKE" && break +done +test -n "$MAKE" || MAKE="as_fn_error $? "Unable to find a make program." "$LINENO" 5" + +if ! $MAKE --version | grep -q "GNU" 2> /dev/null; then + as_fn_error $? "MPS requires GNU make to build from configure, but see manual/build.txt" "$LINENO" 5 +fi + +EXTRA_TARGETS="mpseventcnv mpseventpy mpseventtxt" +ac_fn_c_check_header_mongrel "$LINENO" "sqlite3.h" "ac_cv_header_sqlite3_h" "$ac_includes_default" +if test "x$ac_cv_header_sqlite3_h" = xyes; then : + EXTRA_TARGETS="$EXTRA_TARGETS mpseventsql" +fi + + + +# Put platform compiler flags like -ansi -pedantic into CFLAGS only +# after checking for sqlite3.h -- that header doesn't compile with +# those flags. +CFLAGS="$CFLAGS $PFMCFLAGS" + + + + + + + + + + + + +ac_config_files="$ac_config_files Makefile example/scheme/Makefile" + + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +# Transform confdefs.h into DEFS. +# Protect against shell expansion while executing Makefile rules. +# Protect against Makefile macro expansion. +# +# If the first sed substitution is executed (which looks for macros that +# take arguments), then branch to the quote section. Otherwise, +# look for a macro that doesn't take arguments. +ac_script=' +:mline +/\\$/{ + N + s,\\\n,, + b mline +} +t clear +:clear +s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g +t quote +s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g +t quote +b any +:quote +s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g +s/\[/\\&/g +s/\]/\\&/g +s/\$/$$/g +H +:any +${ + g + s/^\n// + s/\n/ /g + p +} +' +DEFS=`sed -n "$ac_script" confdefs.h` + + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by Memory Pool System Kit $as_me release/1.118.0, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + +Configuration files: +$config_files + +Report bugs to . +Memory Pool System Kit home page: ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +Memory Pool System Kit config.status release/1.118.0 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h | --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "example/scheme/Makefile") CONFIG_FILES="$CONFIG_FILES example/scheme/Makefile" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + + +eval set X " :F $CONFIG_FILES " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + + + + esac + +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +echo 1>&2 "CONFIGURE/MAKE IS NOT THE BEST WAY TO BUILD THE MPS +-- see for alternative approaches" + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/configure.ac b/mps/configure.ac new file mode 100644 index 00000000000..e10e8322efc --- /dev/null +++ b/mps/configure.ac @@ -0,0 +1,228 @@ +# configure.ac -- autoconf configuration for the MPS -*- Autoconf -*- +# +# $Id$ +# Copyright (C) 2012-2020 Ravenbrook Limited. See end of file for license. +# +# YOU DON'T NEED AUTOCONF TO BUILD THE MPS +# This is just here for people who want or expect a configure script. +# See [Building the Memory Pool System](manual/build.txt) for how best +# to build and integrate the MPS. +# +# Generate the configure script with +# +# autoreconf -vif +# + +AC_PREREQ([2.50]) +# http://www.gnu.org/software/autoconf/manual/autoconf.html#Initializing-configure +AC_INIT([Memory Pool System Kit], + m4_esyscmd_s([sed -n '/^#define MPS_RELEASE "\(.*\)"$/{s/.*"\(.*\)"/\1/;p;}' code/version.c]), + [mps-questions@ravenbrook.com], + [mps-kit], + [https://www.ravenbrook.com/project/mps/]) +AC_CONFIG_AUX_DIR(tool/autoconf/build-aux) +AC_CONFIG_SRCDIR([code/mps.c]) + +# Checks for programs. +AC_PROG_CC +AC_LANG_C +AC_PROG_INSTALL + +# Check for Clang/LLVM. +AC_CHECK_DECL([__clang__],[CLANG=yes],[CLANG=no]) + +# These flags aren't used for building the MPS, but for sample programs. +CFLAGS_GC="-ansi -pedantic -Wall -Werror -Wpointer-arith \ + -Wstrict-prototypes -Wmissing-prototypes \ + -Winline -Waggregate-return -Wnested-externs \ + -Wcast-qual -Wstrict-aliasing=2 -O -g3 -pthread" +CFLAGS_LL="$CFLAGS_GC" + +AC_CANONICAL_HOST +AC_MSG_CHECKING([target platform]) +BUILD_TARGET=build-via-make +CLEAN_TARGET=clean-make-build +INSTALL_TARGET=install-make-build +TEST_TARGET=test-make-build +case $host/$CLANG in + aarch64-*-linux*/no) + AC_MSG_RESULT([Linux ARM64]) + MPS_OS_NAME=li + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + aarch64-*-linux*/yes) + AC_MSG_RESULT([Linux ARM64]) + MPS_OS_NAME=li + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=ll + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-linux*/no) + AC_MSG_RESULT([Linux x86]) + MPS_OS_NAME=li + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + x86_64-*-linux*/no) + AC_MSG_RESULT([Linux x86_64]) + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc + PFMCFLAGS="$CFLAGS_GC" + ;; + x86_64-*-linux*/yes) + AC_MSG_RESULT([Linux x86_64]) + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + PFMCFLAGS="$CFLAGS_LL" + ;; + aarch64-*-darwin*/*) + AC_MSG_RESULT([Mac OS X ARM64]) + MPS_OS_NAME=xc + MPS_ARCH_NAME=a6 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-darwin*/*) + AC_MSG_RESULT([Mac OS X x86]) + MPS_OS_NAME=xc + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + x86_64-apple-darwin*/*) + AC_MSG_RESULT([Mac OS X x86_64]) + MPS_OS_NAME=xc + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + BUILD_TARGET=build-via-xcode + CLEAN_TARGET=clean-xcode-build + INSTALL_TARGET=install-xcode-build + TEST_TARGET=test-xcode-build + PFMCFLAGS="$CFLAGS_LL" + ;; + i*86-*-freebsd*/no) + AC_MSG_RESULT([FreeBSD x86]) + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + amd64-*-freebsd*/yes | x86_64-*-freebsd*/yes) + AC_MSG_RESULT([FreeBSD x86_64]) + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + amd64-*-freebsd*/no | x86_64-*-freebsd*/no) + AC_MSG_RESULT([FreeBSD x86_64]) + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_GC" + ;; + i*86-*-freebsd*/yes) + AC_MSG_RESULT([FreeBSD x86]) + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_LL" + ;; + amd64-*-freebsd*/no | x86_64-*-freebsd*/no) + AC_MSG_RESULT([FreeBSD x86_64]) + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll + # Need /usr/local/include in order to find sqlite3.h + CFLAGS="-I/usr/local/include" + CPP="$CC -I/usr/local/include -E" + PFMCFLAGS="$CFLAGS_LL" + ;; + *) + AC_MSG_ERROR([MPS does not support this platform out of the box. See manual/build.txt]) +esac + +AC_CHECK_PROGS([MAKE],[gnumake gmake make],[AC_MSG_ERROR([Unable to find a make program.])]) +if ! $MAKE --version | grep -q "GNU" 2> /dev/null; then + AC_MSG_ERROR([MPS requires GNU make to build from configure, but see manual/build.txt]) +fi + +EXTRA_TARGETS="mpseventcnv mpseventpy mpseventtxt" +AC_CHECK_HEADER([sqlite3.h], [EXTRA_TARGETS="$EXTRA_TARGETS mpseventsql"]) + +# Put platform compiler flags like -ansi -pedantic into CFLAGS only +# after checking for sqlite3.h -- that header doesn't compile with +# those flags. +CFLAGS="$CFLAGS $PFMCFLAGS" + +AC_SUBST(MPS_OS_NAME) +AC_SUBST(MPS_ARCH_NAME) +AC_SUBST(MPS_BUILD_NAME) +AC_SUBST(BUILD_TARGET) +AC_SUBST(CLEAN_TARGET) +AC_SUBST(INSTALL_TARGET) +AC_SUBST(TEST_TARGET) +AC_SUBST(EXTRA_TARGETS) +AC_SUBST(CFLAGS) +AC_SUBST(LDFLAGS) +AC_SUBST(CPPFLAGS) +AC_CONFIG_FILES(Makefile example/scheme/Makefile) + +AC_OUTPUT + +echo 1>&2 "CONFIGURE/MAKE IS NOT THE BEST WAY TO BUILD THE MPS +-- see for alternative approaches" + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2020 Ravenbrook Limited . +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/contributing.rst b/mps/contributing.rst new file mode 100644 index 00000000000..38de05c8c6c --- /dev/null +++ b/mps/contributing.rst @@ -0,0 +1,43 @@ +======================= +Contributing to the MPS +======================= + +We are very happy to receive contributions to the Memory Pool System so +that we can improve it for everyone. + + +Review +------ + +The MPS is highly engineered and rigorously controlled in order to +prevent defects. This approach has lead to an extremely small number of +bugs in production since its first commercial use in 1997. There are a +fairly large number of rules, both low- and high-level that your code +must follow in order to be accepted. These rules are the result of +continuous process improvement to prevent defects. Unfortunately, we do +not have many of them published at present. We apologise if you find it +frustrating that we do not accept your changes as they are. + +The style guide in guide.impl.c.format_ contains basic rules for style. + +.. _guide.impl.c.format: design/guide.impl.c.format.txt + + +Licensing +--------- + +All contributions are deemed to have been made under the same license +as the material to which the contribution is made, unless you +expressly state otherwise in your contribution request. In nearly all +cases this is the `BSD 2-clause license +`_. You retain the +copyright to such contributions. + + +Thank you +--------- + +Finally, thank you for making the MPS more useful to everyone. + +.. validated with rst2html -v contributing.rst > /dev/null +.. end diff --git a/mps/design/.dotted.tags b/mps/design/.dotted.tags new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mps/design/abq.txt b/mps/design/abq.txt new file mode 100644 index 00000000000..5d293e17edb --- /dev/null +++ b/mps/design/abq.txt @@ -0,0 +1,146 @@ +.. mode: -*- rst -*- + +Fixed-length queues +=================== + +:Tag: design.mps.abq +:Author: Gareth Rees +:Date: 2013-05-20 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: fixed-length queues; design + + +Introduction +------------ + +_`.intro`: This is the design of the ABQ module, which implements a +fixed-length queue of small objects. + +_`.readership`: This document is intended for any MM developer. + +_`.name`: The name ABQ originally stood for "Available Block Queue" as +the module is used by the MVT pool. + + +Requirements +------------ + +_`.req.push`: Clients can efficiently push new elements onto the queue. + +_`.req.pop`: Clients can efficiently pop elements from the queue. + +_`.req.empty`: Clients can efficiently test whether the queue is empty. + +_`.req.abstract`: The ABQ module does not know anything about the +elements in the queue other than their size. + +_`.req.delete`: Clients can delete elements from the queue. (Note: not necessarily efficiently.) + +_`.req.iterate`: Clients can iterate over elements in the queue. + + +Interface +--------- + +``typedef ABQStruct *ABQ`` + +``ABQ`` is the type of a queue. It is an alias for ``ABQStruct *``. +``ABQStruct`` is defined in the header so that it can be inlined in +client structures: clients must not depend on its implementation +details. + +``void ABQInit(Arena arena, ABQ abq, void *owner, Count elements, Size elementSize)`` + +Initialize the queue ``abq``. The parameter ``arena`` is the arena +whose control pool should be used to allocate the memory for the +queue; ``owner`` is passed to ``MeterInit()`` for the statistics; +``elements`` is the maximum number of elements that can be stored in +the queue; and ``elementSize`` is the size of each element. + +``void ABQFinish(Arena arena, ABQ abq)`` + +Finish ``abq`` and free all resources associated with it. + +``Bool ABQPush(ABQ abq, void *element)`` + +If the queue is full, leave it unchanged and return ``FALSE``. +Otherwise, push ``element`` on to the queue and return ``TRUE``. + +``Bool ABQPop(ABQ abq, void *elementReturn)`` + +If the queue is empty, return ``FALSE``. Otherwise, copy the first +element on the queue into the memory pointed to by ``elementReturn``, +remove the element from the queue, and return ``TRUE``. + +``Bool ABQPeek(ABQ abq, void *elementReturn)`` + +If the queue is empty, return ``FALSE``. Otherwise, copy the first +element on the queue into the memory pointed to by ``elementReturn`` +and return ``TRUE``. (This is the same as ``ABQPop()`` except that +the queue is unchanged.) + +``Bool ABQIsEmpty(ABQ abq)`` + +If the queue is empty, return ``TRUE``, otherwise return ``FALSE``. + +``Bool ABQIsFull(ABQ abq)`` + +If the queue is full, return ``TRUE``, otherwise return ``FALSE``. + +``Count ABQDepth(ABQ abq)`` + +Return the number of elements in the queue. + +``typedef Bool (*ABQVisitor)(Bool *deleteReturn, void *element, void *closure)`` + +A callback function for ``ABQIterate()``. The parameter ``element`` is +an element in the queue, and ``closure`` is the value originally +passed to ``ABQIterate()``. This function must set ``*deleteReturn`` +to ``FALSE`` if ``element`` must be kept in the queue, or ``TRUE`` if +``element`` must be deleted from the queue. It must return ``TRUE`` +if the iteration must continue, or ``FALSE`` if the iteration must +stop after processing ``element``. + +``void ABQIterate(ABQ abq, ABQVisitor visitor, void *closure)`` + +Call ``visitor`` for each element in the queue, passing the element +and ``closure``. See ``ABQVisitor`` for details. + + +Document History +---------------- + +- 2013-05-20 GDR_ Created. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/alloc-frame.txt b/mps/design/alloc-frame.txt new file mode 100644 index 00000000000..7da94c9ffd2 --- /dev/null +++ b/mps/design/alloc-frame.txt @@ -0,0 +1,429 @@ +.. mode: -*- rst -*- + +Allocation frame protocol +========================= + +:Tag: design.mps.alloc-frame +:Author: Tony Mann +:Date: 1998-10-02 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: allocation frames; design + + +Introduction +------------ + +_`.intro`: This document explains the design of the support for +allocation frames in MPS. + +_`.readership`: This document is intended for any MM developer. + +_`.overview`: Allocation frames are used for implementing stack pools; +each stack frame corresponds to an allocation frame. Allocation frames +may also be suitable for implementing other sub-pool groupings, such +as generations and ramp allocation patterns. + +_`.overview.ambition`: We now believe this to be a design that loses +too many advantages of stack allocation for questionable gains. The +requirements are almost entirely based on unanalysed anecdote, instead +of actual clients. + +.. note:: + + We plan to supersede this with a stack pool design at some point + in the future. Pekka P. Pirinen, 2000-03-09. + + +Definitions +----------- + +_`.def.alloc-frame`: An allocation frame is a generic name for a +device which groups objects together with other objects at allocation +time, and which may have a parent/child relationship with other +allocation frames. + + +Purpose +------- + +_`.purpose.stack-allocation`: The allocation frame protocol is +intended to support efficient memory management for stack allocation, +that is, the allocation of objects which have dynamic extent. + +_`.purpose.general`: The allocation frame protocol is intended to be +sufficiently general that it will be useful in supporting other types +of nested allocation patterns too. For example, it could be used to +for EPVM-style save and restore, ramp allocation patterns or +generations. + + +Requirements +------------ + +Known requirements +.................. + +_`.req.stack-alloc`: Provide a interface for clients to describe a +stack allocation pattern, as an alternative to using the control +stack. + +_`.req.efficient`: Permit an implementation which is comparable in +efficiency to allocating on the control stack. + +_`.req.ap`: Support allocation via allocation points (APs). + +_`.req.format`: Support the allocation of formatted objects. + +_`.req.scan`: Ensure that objects in allocation frames can participate +in garbage collection by being scanned. + +_`.req.fix`: Ensure that objects in allocation frames can participate +in garbage collection by accepting Fix requests. + +_`.req.condemn`: Ensure that objects in allocation frames can +participate in garbage collection by being condemned. + +_`.attr.locking`: Minimize the synchronization cost for the creation +and destruction of frames. + + +Proto-requirements +.................. + +_`.proto-req`: The following are possible requirements that might be +important in the future. The design does not necessarily meet all +these requirements, but it does consider them all. Each requirement +either has direct support in the framework, or could be supported with +future additions to the framework. + +_`.req.parallels`: The allocation frame protocol should provide a +framework for exploiting the parallels between stack extents, +generations and "ramps". + +_`.req.pool-destroy`: It should be possible to use allocation frames +to free all objects in a pool without destroying the pool. + +_`.req.epvm`: It should be possible to implement EPVM-style save and +restore operations by creating and destroying allocation frames. + +_`.req.subst`: It should be possible to substitute a stack pool with a +GC-ed pool so that erroneous use of a stack pool can be detected. + +_`.req.format-extensions`: It should be possible for stack pools to +utilize the same format as any other pool, including debugging formats +that include fenceposting, etc. + +_`.req.mis-nest`: Should ensure "mis-nested" stacks are safe. + +_`.req.non-top-level`: Should support allocation in the non-top stack +extent. + +_`.req.copy-if-necessary`: Should ensure that stack pools can support +"copy-if-necessary" (so that low-level system code can heapify stack +objects.) + +_`.req.preserve`: When an object is in an allocation frame which is +being destroyed, it should be possible to preserve that object in the +parent frame. + +_`.req.contained`: Should allow clients to ask if an object is +"contained" in a frame. The object is contained in a frame if it is +affected when the frame is ended. + +_`.req.alloc-with-other`: Should allow clients to allocate an object +in the same frame as another object. + + +Overview +-------- + +_`.frame-classes`: The protocol supports different types of allocation +frames, which are represented as "frame classes". It's up to pools to +determine which classes of allocation frames they support. Pools which +support more than one frame class rely on the client to indicate which +class is currently of interest. The client indicates this by means of +an operation which stores the class in the buffer to which the +allocation point is attached. + +_`.frame-handles`: Allocation frames are described via abstract "frame +handles". Pools may choose what the representation of a frame handle +should be. Frame handles are static, and the client need not store +them in a GC root. + +_`.lightweight-frames`: The design includes an extension to the +allocation point protocol, which permits the creation and destruction +of allocation frames without the necessity for claiming the arena +lock. Such frames are called "lightweight frames". + + +Operations +---------- + +_`.op.intro`: Each operation has both an external (client) interface +and an internal (MPS) interface. The external function takes an +allocation point as a parameter, determines which buffer and pool it +belongs to, and calls the internal function with the buffer and pool +as parameters. + +_`.op.obligatory`: The following operations are supported on any +allocation point which supports allocation frames:- + +_`.operation.push`: The *FramePush* operation creates a new +allocation frame of the currently chosen frame class, makes this new +frame the current frame, and returns a handle for the frame. + +_`.operation.pop`: The *FramePop* operation takes a frame handle +as a parameter. Some pool classes might insist or assume that this is +the handle for the current frame. It finds the parent of that frame +and makes it the current frame. The operation indicates that all +children of the new current frame contain objects which are likely to +be dead. The reclaim policy is up to the pool; some classes might +insist or assume that the objects must be dead, and eagerly free them. +Note that this might introduce the possibility of leaving dangling +pointers elsewhere in the arena. If so, it's up to the pool to decide +what to do about this. + +_`.op.optional`: The following operations are supported for some +allocation frames, but not all. Pools may choose to support some or +all of these operations for certain frame classes. An unsupported +operation will return a failure value:- + +_`.operation.select`: The *FrameSelect* operation takes a frame +handle as a parameter and makes that frame the current frame. It does +not indicate that any children of the current frame contain objects +which are likely to be dead. + +_`.operation.select-addr`: The *FrameSelectOfAddr* operation takes +an address as a parameter and makes the frame of that address the +current frame. It does not indicate that any children of the current +frame contain objects which are likely to be dead. + +_`.operation.in-frame`: The *FrameHasAddr* operation determines +whether the supplied address is the address of an object allocated in +the supplied frame, or any child of that frame. + +_`.operation.set`: The *SetFrameClass* operation takes a frame +class and an allocation point as parameters, and makes that the +current frame class for the allocation point. The next *FramePush* +operation will create a new frame of that class. + + +Interface +--------- + +External types +.............. + +_`.type.client.frame-handle`: Frame handles are defined as the abstract +type ``mps_frame_t``. + +``typedef struct mps_frame_class_s *mps_frame_class_t`` + +_`.type.client.frame-class`: Frame classes are defined as an abstract +type. + +_`.type.client.frame-class.access`: Clients access frame classes by +means of dedicated functions for each frame class. + +External functions +.................. + +_`.fn.client.push`: ``mps_ap_frame_push()`` is used by clients to +invoke the *FramePush* operation. For lightweight frames, this +might not invoke the corresponding internal function. + +_`.fn.client.pop`: ``mps_ap_frame_pop()`` is used by clients to invoke +the *FramePop* operation. For lightweight frames, this might not +invoke the corresponding internal function. + +``mps_res_t mps_ap_frame_select(mps_ap_t buf, mps_frame_t frame)`` + +_`.fn.client.select`: This following function is used by clients to +invoke the *FrameSelect* operation. + +``mps_res_t mps_ap_frame_select_from_addr(mps_ap_t buf, mps_addr_t addr)`` + +_`.fn.client.select-addr`: This function is used by clients to invoke +the *FrameSelectOfAddr* operation. + +``mps_res_t mps_ap_addr_in_frame(mps_bool_t *inframe_o, mps_ap_t buf, mps_addr_t *addrref, mps_frame_t frame)`` + +_`.fn.client.in-frame`: This function is used by clients to invoke the +*FrameHasAddr* operation. + +``mps_res_t mps_ap_set_frame_class(mps_ap_t buf, mps_frame_class_t class)`` + +_`.fn.client.set`: This function is used by clients to invoke the +*SetFrameClass* operation. + +``mps_frame_class_t mps_alloc_frame_class_stack(void)`` + +_`.fn.client.stack-frame-class`: This function is used by clients to +access the frame class used for simple stack allocation. + + +Internal types +.............. + +``typedef struct AllocFrameStruct *AllocFrame`` + +_`.type.frame-handle`: Frame handles are defined as an abstract type. + +``typedef struct AllocFrameClassStruct *AllocFrameClass`` + +_`.type.frame-class`: Frame classes are defined as an abstract type. + +``typedef Res (*PoolFramePushMethod)(AllocFrame *frameReturn, Pool pool, Buffer buf)`` + +_`.fn.push`: A pool method of this type is called (if needed) to +invoke the *FramePush* operation. + +``typedef Res (*PoolFramePopMethod)(Pool pool, Buffer buf, AllocFrame frame)`` + +_`.fn.pop`: A pool method of this type is called (if needed) +to invoke the *FramePop* operation: + +``typedef Res (*PoolFrameSelectMethod)(Pool pool, Buffer buf, AllocFrame frame)`` + +_`.fn.select`: A pool method of this type is called to invoke the +*FrameSelect* operation. + +``typedef Res (*PoolFrameSelectFromAddrMethod)(Pool pool, Buffer buf, Addr addr)`` + +_`.fn.select-addr`: A pool method of this type is called to invoke the +*FrameSelectOfAddr* operation. + +``typedef Res (*PoolFrameHasAddrMethod)(Bool *inframeReturn, Pool pool, Seg seg, Addr *addrref, AllocFrame frame)`` + +_`.fn.in-frame`: A pool method of this type is called to invoke the +*FrameHasAddr* operation. + +``typedef Res (*PoolSetFrameClassMethod)(Pool pool, Buffer buf, AllocFrameClass class)`` + +_`.fn.set`: A pool method of this type is called to invoke the +*SetFrameClass* operation. + + +Lightweight frames +------------------- + +Overview +........ + +_`.lw-frame.overview`: Allocation points provide direct support for +lightweight frames, and are designed to permit *FramePush* and +*FramePop* operations without the need for locking and delegation to +the pool method. The pool method will be called whenever +synchronization is required for other reasons (e.g. the buffer is +tripped). + +_`.lw-frame.model`: Lightweight frames offer direct support for a +particular model of allocation frame use, whereby the *FramePush* +operation returns the current allocation pointer as a frame handle, +and the *FramePop* operation causes the allocation pointer to be reset +to the address of the frame handle. This model should be suitable for +simple stack frames, where more advanced operations like *FrameSelect* +are not supported. It may also be suitable for more advanced +allocation frame models when they are being used simply. The use of a +complex operation always involves synchronization via locking, and the +pool may disable lightweight synchronization temporarily at this time. + + +Synchronization +............... + +_`.lw-frame.sync`: The purpose of the design is that mutator may +access the state of an AP without locking with MPS (via the external +functions). The design assumes the normal MPS restriction that an +operation on an AP may only be performed by a single mutator thread at +a time. Each of the operations on allocation frames counts as an +operation on an AP. + + +Implementation +.............. + +_`.lw-frame.push`: The external *FramePush* operation +``mps_ap_frame_push()`` performs the following operations:: + + IF ap->init != ap->alloc + FAIL + ELSE IF ap->init < ap->limit + *frame_o = ap->init; + ELSE + WITH_ARENA_LOCK + PerformInternalFramePushOperation(...) + END + END + +_`.lw-frame.push.limit`: The reason for testing ``ap->init < +ap->limit`` and not ``ap->init <= ap->limit`` is that a frame pointer +at the limit of a buffer (and possibly therefore of a segment) would +be ambiguous: is it at the limit of the segment, or at the base of the +segment that's adjacent in memory? The internal operation must handle +this case, for example by refilling the buffer and setting the frame +at the beginning. + +_`.lw-frame.pop`: The external *FramePop* operation +(``mps_ap_frame_pop()``) performs the following operations:: + + IF ap->init != ap->alloc + FAIL + ELSE IF BufferBase(ap) <= frame AND frame < ap->init + ap->init = ap->alloc = frame; + ELSE + WITH_ARENA_LOCK + PerformInternalFramePopOperation(...) + END + END + +_`.lw-frame.pop.buffer`: The reason for testing that ``frame`` is in +the buffer is that if it's not, then we're popping to an address in +some other segment, and that means that some objects in the other +segment (and all objects in any segments on the stack in between) are +now dead, and the only way for the pool to mark them as being dead is +to do a heavyweight pop. + + +Document History +---------------- + +- 1998-10-02 Tony Mann. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/an.txt b/mps/design/an.txt new file mode 100644 index 00000000000..801e0a04245 --- /dev/null +++ b/mps/design/an.txt @@ -0,0 +1,202 @@ +.. mode: -*- rst -*- + +Generic modules +=============== + +:Tag: design.mps.an +:Author: Gareth Rees +:Date: 2014-11-02 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: generic modules; design + + +Introduction +------------ + +_`.intro`: This is the design of generic modules in the MPS. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: Generic modules provide implementations of functional +modules using only the features of the Standard C Library. These +implementations are partially functional or non-functional, but +provide a basis for ports of the MPS to new platforms. + +_`.name`: The name "ANSI" for the generic modules is historical: the C +language was originally standardized by the American National +Standards Institute, and so Standard C used to be known as "ANSI C". + + +Requirements +------------ + +_`.req.port`: The MPS must be portable to new platforms. (Otherwise we +can't meet the needs of customers using new platforms.) + +_`.req.port.rapid`: The MPS should be portable to new platforms +rapidly. + +_`.req.port.rapid.expert`: An expert MPS developer (who may be a +novice on the new platform) should be able to get a minimally useful +implementation of the MPS running on a new platform within a few +hours. + +_`.req.port.rapid.novice`: A novice MPS developer (who is an expert on +the new platform) should be able to get the MPS running on a new +platform within a few days. + + +Design +------ + +_`.sol.modules`: Features of the MPS which can benefit from +platform-specific implementations are divided into *functional +modules*, with clean interfaces to the MPS and to each other. See +`.mod`_ for a list of these modules. (This helps meet `.req.port`_ by +isolating the platform dependencies, and it helps meet +`.req.port.rapid`_ because a porter can mix and match implementations, +using existing implementations where possible.) + +_`.sol.generic`: Each functional module has a generic implementation +using only features of the Standard C Library. (This helps meet +`.req.port.rapid`_ because the MPS can be ported in stages, starting +with the generic modules and porting the modules needed to meet the +most urgent requirements. The generic implementations help meet +`.req.port.rapid.novice`_ by providing clear and illustrative +examples.) + +_`.sol.fallback`: The interfaces to the modules are designed to make +it possible to implement `.sol.generic`_. When a platform-specific +feature is needed to meet performance (or other attribute) +requirements, the interface also makes it possible to meet the +functional requirements while missing the attribute requirements. See +`.sol.fallback.example`_ for an example. (This helps meet +`.req.port.rapid`_ by allowing the generic implementations to meet +many or most of the functional requirements.) + +_`.sol.fallback.example`: The MPS normally uses incremental collection +to meet requirements on pause times, but this requires barriers. The +interface to the protection module is designed to make it possible to +write an implementation without barriers, via the function +``ProtSync()`` that synchronizes the mutator with the collector. + +_`.sol.test`: There are makefiles for the pseudo-platforms ``anangc``, +``ananll`` and ``ananmv`` that compile and test the generic +implementations. See design.mps.config.opt_ for the configuration +options used to implement these platforms. (This supports +`.req.port.rapid`_ by making sure that the generic implementations are +working when it is time to use them.) + +.. _design.mps.config.opt: config#.opt + + +Modules +------- + +_`.mod`: This section lists the functional modules in the MPS. + +_`.mod.lock`: Locks. See design.mps.lock_. + +_`.mod.prmc`: Mutator context. See design.mps.prmc_. + +_`.mod.prot`: Memory protection. See design.mps.prot_. + +_`.mod.sp`: Stack probe. See design.mps.sp_. + +_`.mod.ss`: Stack scanning. See design.mps.stack-scan_. + +_`.mod.th`: Thread manager. See design.mps.thread-manager_. + +_`.mod.vm`: Virtual mapping. See design.mps.vm_. + +.. _design.mps.lock: lock +.. _design.mps.prot: prot +.. _design.mps.prmc: prmc +.. _design.mps.sp: sp +.. _design.mps.stack-scan: stack-scan +.. _design.mps.thread-manager: thread-manager +.. _design.mps.vm: vm + + +Limitations of generic implementations +-------------------------------------- + +_`.lim`: This section summarizes the limitations of the generic +implementations of the function modules. + +_`.lim.lock`: Requires a single-threaded mutator (see +design.mps.lock.impl.an_). + +_`.lim.prmc`: Does not support single-stepping of accesses (see +design.mps.prmc.impl.an.fault_) and requires a single-threaded mutator +(see design.mps.prmc.impl.an.suspend_). + +_`.lim.prot`: Does not support incremental collection (see +design.mps.prot.impl.an.sync_) and is not compatible with +implementations of the mutator context module that support +single-stepping of accesses (see design.mps.prot.impl.an.sync.issue_). + +_`.lim.sp`: Only suitable for use with programs that do not handle +stack overflow faults, or do not call into the MPS from the handler +(see design.mps.sp.issue.an_). + +_`.lim.stack-scan`: Assumes that the stack grows downwards and that +``setjmp()`` reliably captures the registers (see design.mps.stack-scan.sol.stack.platform_). + +_`.lim.th`: Requires a single-threaded mutator (see +design.mps.thread-manager.impl.an.single_). + +_`.lim.vm`: Maps all reserved addresses into main memory (see +design.mps.vm.impl.an.reserve_), thus using more main memory than a +platform-specific implementation. + +.. _design.mps.lock.impl.an: lock#.impl.an +.. _design.mps.prmc.impl.an.fault: prmc#.impl.an.fault +.. _design.mps.prmc.impl.an.suspend: prmc#.impl.an.suspend +.. _design.mps.prot.impl.an.sync: prot#.impl.an.sync +.. _design.mps.prot.impl.an.sync.issue: prot#.impl.an.sync.issue +.. _design.mps.sp.issue.an: sp#.issue.an +.. _design.mps.stack-scan.sol.stack.platform: stack-scan#.sol.stack.platform +.. _design.mps.thread-manager.impl.an.single: thread-manager#.impl.an.single +.. _design.mps.vm.impl.an.reserve: vm#.impl.an.reserve + + + +Document History +---------------- + +- 2014-11-02 GDR_ Initial draft based on design.mps.protan. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/arena.txt b/mps/design/arena.txt new file mode 100644 index 00000000000..e8df0817c29 --- /dev/null +++ b/mps/design/arena.txt @@ -0,0 +1,620 @@ +.. mode: -*- rst -*- + +Arena +===== + +:Tag: design.mps.arena +:Author: Pekka P. Pirinen +:Date: 1997-08-11 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: arena; design + + +Introduction +------------ + +_`.intro`: This is the design of the arena structure. + +_`.readership`: MPS developers. + + +Overview +-------- + +_`.overview`: The arena serves two purposes. It is a structure that is +the top-level state of the MPS, and as such contains a lot of fields +which are considered "global". And it provides raw memory to pools. + +An arena belongs to a particular arena class. The class is selected +when the arena is created. Classes encapsulate both policy (such as +how pool placement preferences map into actual placement) and +mechanism (such as where the memory originates: operating system +virtual memory, client provided, or via malloc). Some behaviour +(mostly serving the "top-level datastructure" purpose) is implemented +by generic arena code, and some by arena class code. + + +Definitions +----------- + +_`.def.grain`: The arena manages memory in units called *arena +grains*, whose size is returned by the macro ``ArenaGrainSize()``. +Memory allocated by ``ArenaAlloc()`` is a contiguous sequence of arena +grains, whose base address and size are multiples of the arena grain +size. + +_`.def.tract`: A tract is a data structure containing information +about a region of address space: which pool it belongs to (if any), +which segment contains it, and so on. Tracts are the hook on which the +segment module is implemented. Pools which don't use segments may use +tracts for associating their own data with ranges of address. + + + +Requirements +------------ + +.. note:: + + Where do these come from? Need to identify and document the + sources of requirements so that they are traceable to client + requirements. Most of these come from the architectural design + (design.mps.architecture) or the fix function design + (design.mps.fix_). Richard Brooksby, 1995-08-28. + + They were copied from design.mps.arena.vm_ and edited slightly. + David Jones, 1999-06-23. + + .. _design.mps.fix: fix + .. _design.mps.arena.vm: arenavm + + +Block management +................ + +_`.req.fun.block.alloc`: The arena must provide allocation of +contiguous blocks of memory. + +_`.req.fun.block.free`: It must also provide freeing of contiguously +allocated blocks owned by a pool, whether or not the block was +allocated via a single request. + +_`.req.attr.block.size.min`: The arena must support management of +blocks down to the larger of (i) the grain size of the virtual mapping +interface (if a virtual memory interface is being used); and (ii) the +grain size of the memory protection interface (if protection is used). + +.. note:: + + On all the operating systems we support, these grain sizes are the + same and are equal to the operating system page size. But we want + the MPS to remain flexible enough to be ported to operating + systems where these are different. + +_`.req.attr.block.size.max`: It must also support management of blocks +up to the maximum size allowed by the combination of operating system +and architecture. This is derived from req.dylan.attr.obj.max (at +least). + +_`.req.attr.block.align.min`: The alignment of blocks shall not be less +than ``MPS_PF_ALIGN`` for the architecture. This is so that pool +classes can conveniently guarantee pool allocated blocks are aligned +to ``MPS_PF_ALIGN``. (A trivial requirement.) + + +Address translation +................... + +_`.req.fun.trans`: The arena must provide a translation from any +address to the following information: + +_`.req.fun.trans.arena`: Whether the address is managed by the arena. + +_`.req.fun.trans.pool`: Whether the address is managed by a pool +within the arena, and if it is, the pool. + +_`.req.fun.trans.arbitrary`: If the address is managed by a pool, an +arbitrary pointer value that the pool can associate with a group of +contiguous addresses at any time. + +_`.req.fun.trans.white`: If the address is managed by an automatic +pool, the set of traces for which the address is white. This is +required so that the second-stage fix protocol can reject non-white +addresses quickly. See design.mps.critical-path_. + +.. _design.mps.critical-path: critical-path + +_`.req.attr.trans.time`: The translation shall take no more than @@@@ +[something not very large -- drj 1999-06-23] + + +Arena partition +............... + +_`.req.fun.set`: The arena must provide a method for approximating +sets of addresses. + +_`.req.fun.set.time`: The determination of membership shall take no +more than @@@@ [something very small indeed]. (the non-obvious +solution is refsets) + + +Constraints +........... + +_`.req.attr.space.overhead`: req.dylan.attr.space.struct implies that +the arena must limit the space overhead. The arena is not the only +part that introduces an overhead (pool classes being the next most +obvious), so multiple parts must cooperate in order to meet the +ultimate requirements. + +_`.req.attr.time.overhead`: Time overhead constraint? + +.. note:: + + How can there be a time "overhead" on a necessary component? David + Jones, 1999-06-23. + + +Architecture +------------ + +Statics +....... + +_`.static`: There is no higher-level data structure than a arena, so in +order to support several arenas, we have to have some static data in +impl.c.arena. See impl.c.arena.static. + +_`.static.init`: All the static data items are initialized when the +first arena is created. + +_`.static.serial`: ``arenaSerial`` is a static ``Serial``, containing +the serial number of the next arena to be created. The serial of any +existing arena is less than this. + +_`.static.ring`: ``arenaRing`` is the sentinel of the ring of arenas. + +_`.static.ring.init`: ``arenaRingInit`` is a ``Bool`` showing whether +the ring of arenas has been initialized. + +_`.static.ring.lock`: The ring of arenas has to be locked when +traversing the ring, to prevent arenas being added or removed. This is +achieved by using the (non-recursive) global lock facility, provided +by the lock module. + +_`.static.check`: The statics are checked each time any arena is +checked. + + +Arena classes +............. + +``typedef mps_arena_s *Arena`` + +_`.class`: The ``Arena`` data structure is designed to be subclassable +(see design.mps.protocol_). Clients can select what arena class +they'd like when instantiating one with ``mps_arena_create_k()``. The +arguments to ``mps_arena_create_k()`` are class-dependent. + +.. _design.mps.protocol: protocol + +_`.class.fields`: The ``grainSize`` (for allocation and freeing) and +``zoneShift`` (for computing zone sizes and what zone an address is +in) fields in the arena are the responsibility of the each class, and +are initialized by the ``init`` method. The responsibility for +maintaining the ``commitLimit``, ``spareCommitted``, and +``spareCommitLimit`` fields is shared between the (generic) arena and +the arena class. ``commitLimit`` (see `.commit-limit`_) is changed by +the generic arena code, but arena classes are responsible for ensuring +the semantics. For ``spareCommitted`` and ``spareCommitLimit`` see +`.spare-committed`_ below. + +_`.class.abstract`: The basic arena class (``AbstractArenaClass``) is +abstract and must not be instantiated. It provides little useful +behaviour, and exists primarily as the root of the tree of arena +classes. Each concrete class must specialize each of the class method +fields, with the exception of the describe method (which has a trivial +implementation) and the ``extend``, ``retract`` and +``spareCommitExceeded`` methods which have non-callable methods for +the benefit of arena classes which don't implement these features. + + +Chunks +...... + +_`.chunk`: Each contiguous region of address space managed by the MPS +is represented by a *chunk*. + +_`.chunk.tracts`: A chunk contains a table of tracts. See +`.tract.table`_. + +_`.chunk.lookup`: Looking of the chunk of an address is the first +step in the second-stage fix operation, and so on the critical path. +See design.mps.critical-path_. + +_`.chunk.tree`: For efficient lookup, chunks are stored in a balanced +tree; ``arena->chunkTree`` points to the root of the tree. Operations +on this tree must ensure that the tree remains balanced, otherwise +performance degrades badly with many chunks. + +_`.chunk.insert`: New chunks are inserted into the tree by calling +``ArenaChunkInsert()``. This calls ``TreeInsert()``, followed by +``TreeBalance()`` to ensure that the tree is balanced. + +_`.chunk.delete`: There is no corresponding function +``ArenaChunkDelete()``. Instead, deletions from the chunk tree are +carried out by calling ``TreeToVine()``, iterating over the vine +(where deletion is possible, if care is taken) and then calling +``TreeBalance()`` on the remaining tree. The function +``TreeTraverseAndDelete()`` implements this. + +_`.chunk.delete.justify`: This is because we don't have a function +that deletes an item from a balanced tree efficiently, and because all +functions that delete chunks do so in a loop over the chunks (so the +best we can do is O(*n*) time in any case). + +_`.chunk.delete.tricky`: Deleting chunks from the chunk tree is tricky +in the virtual memory arena because ``vmChunkDestroy()`` unmaps the +memory containing the chunk, which includes the tree node. So the next +chunk must be looked up before deleting the current chunk. The function +``TreeTraverseAndDelete()`` ensures that this is done. + + +Tracts +...... + +_`.tract.table`: The arena maintains tables of tract structures such +that every address managed by the arena belongs to exactly one tract. + +_`.tract.size`: Each tract covers exactly one arena grain. This is an +implementation detail, not a requirement. + +_`.tract.structure`: The tract structure definition looks like this:: + + typedef struct TractStruct { /* Tract structure */ + Pool pool; /* MUST BE FIRST */ + Seg seg; /* NULL or segment containing tract */ + Addr base; /* Base address of the tract */ + } TractStruct; + +_`.tract.field.pool`: The pool field indicates to which pool the tract +has been allocated (`.req.fun.trans.pool`_). Tracts are only valid +when they are allocated to pools. When tracts are not allocated to +pools, arena classes are free to reuse tract objects in undefined +ways. A standard technique is for arena class implementations to +internally describe the objects as a union type of ``TractStruct`` and +some private representation, and to set the pool field to ``NULL`` +when the tract is not allocated. The pool field must come first so +that the private representation can share a common prefix with +``TractStruct``. This permits arena classes to determine from their +private representation whether such an object is allocated or not, +without requiring an extra field. + +_`.tract.field.seg`: The seg field is a pointer to the segment +containing the tract, or ``NULL`` if the tract is not contained in any +segment. + +_`.tract.field.base`: The base field contains the base address of the +memory represented by the tract. + +_`.tract.limit`: The limit of the tract's memory may be determined by +adding the arena grain size to the base address. + +_`.tract.iteration`: Iteration over tracts is described in +design.mps.arena.tract-iter(0). + +``Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)`` + +_`.tract.if.tractofaddr`: The function ``TractOfAddr()`` finds the +tract corresponding to an address in memory. (See `.req.fun.trans`_.) +If ``addr`` is an address which has been allocated to some pool, then +``TractOfAddr()`` returns ``TRUE``, and sets ``*tractReturn`` to the +tract corresponding to that address. Otherwise, it returns ``FALSE``. +This function is similar to ``TractOfBaseAddr()`` (see +design.mps.arena.tract-iter.if.contig-base) but serves a more general +purpose and is less efficient. + + +Control pool +............ + +_`.pool`: Each arena has a "control pool", +``arena->controlPoolStruct``, which is used for allocating MPS control +data structures by calling ``ControlAlloc()``. + + +Polling +....... + +_`.poll`: ``ArenaPoll()`` is called "often" by other code (for instance, +on buffer fill or allocation). It is the entry point for doing tracing +work. If the polling clock exceeds a set threshold, and we're not +already doing some tracing work (that is, ``insidePoll`` is not set), +it calls ``TracePoll()`` on all busy traces. + +_`.poll.size`: The actual clock is ``arena->fillMutatorSize``. This is +because internal allocation is only significant when copy segments are +being allocated, and we don't want to have the pause times to shrink +because of that. There is no current requirement for the trace rate to +guard against running out of memory. + +.. note:: + + Clearly it really ought to: we have a requirement to not run out + of memory (see req.dylan.prot.fail-alloc, req.dylan.prot.consult), + and emergency tracing should not be our only story. David Jones, + 1999-06-22. + +``BufferEmpty()`` is not taken into account, because the splinter will +rarely be useable for allocation and we are wary of the clock running +backward. + +_`.poll.clamp`: Polling is disabled when the arena is "clamped", in +which case ``arena->clamped`` is ``TRUE``. Clamping the arena prevents +background tracing work, and further new garbage collections from +starting. Clamping and releasing are implemented by the ``ArenaClamp()`` +and ``ArenaRelease()`` methods. + +_`.poll.park`: The arena is "parked" by clamping it, then polling until +there are no active traces. This finishes all the active collections +and prevents further collection. Parking is implemented by the +``ArenaPark()`` method. + + +Commit limit +............ + +_`.commit-limit`: The arena supports a client configurable "commit +limit" which is a limit on the total amount of committed memory. The +generic arena structure contains a field to hold the value of the +commit limit and the implementation provides two functions for +manipulating it: ``ArenaCommitLimit()`` to read it, and +``ArenaSetCommitLimit()`` to set it. Actually abiding by the contract of +not committing more memory than the commit limit is left up to the +individual arena classes. + +_`.commit-limit.err`: When allocation from the arena would otherwise +succeed but cause the MPS to use more committed memory than specified +by the commit limit ``ArenaAlloc()`` should refuse the request and +return ``ResCOMMIT_LIMIT``. + +_`.commit-limit.err.multi`: In the case where an ``ArenaAlloc()`` request +cannot be fulfilled for more than one reason including exceeding the +commit limit then class implementations should strive to return a +result code other than ``ResCOMMIT_LIMIT``. That is, +``ResCOMMIT_LIMIT`` should only be returned if the *only* reason for +failing the ``ArenaAlloc()`` request is that the commit limit would be +exceeded. The client documentation allows implementations to be +ambiguous with respect to which result code in returned in such a +situation however. + + +Spare committed (aka "hysteresis") +.................................. + +_`.spare-committed`: See ``mps_arena_spare_committed()``. The generic +arena structure contains two fields for the spare committed memory +fund: ``spareCommitted`` records the total number of spare committed +bytes; ``spareCommitLimit`` records the limit (set by the user) on the +amount of spare committed memory. ``spareCommitted`` is modified by +the arena class but its value is used by the generic arena code. There +are two uses: a getter function for this value is provided through the +MPS interface (``mps_arena_spare_commit_limit()``), and by the +``ArenaSetSpareCommitLimit()`` function to determine whether the +amount of spare committed memory needs to be reduced. +``spareCommitLimit`` is manipulated by generic arena code, however the +associated semantics are the responsibility of the class. It is the +class's responsibility to ensure that it doesn't use more spare +committed bytes than the value in ``spareCommitLimit``. + +_`.spare-commit-limit`: The function ``ArenaSetSpareCommitLimit()`` sets +the ``spareCommitLimit`` field. If the limit is set to a value lower +than the amount of spare committed memory (stored in +``spareCommitted``) then the class specific function +``spareCommitExceeded`` is called. + + +Pause time control +.................. + +_`.pause-time`: The generic arena structure contains the field +``pauseTime`` for the maximum time any operation in the arena may take +before returning to the mutator. This value is used by +``PolicyPollAgain()`` to decide whether to do another unit of tracing +work. The MPS interface provides getter (``mps_arena_pause_time()``) +and setter (``mps_arena_pause_time_set()``) functions. + + +Locks +..... + +_`.lock.ring`: ``ArenaAccess()`` is called when we fault on a barrier. +The first thing it does is claim the non-recursive global lock to +protect the arena ring (see design.mps.lock(0)). + +_`.lock.arena`: After the arena ring lock is claimed, ``ArenaEnter()`` is +called on one or more arenas. This claims the lock for that arena. +When the correct arena is identified or we run out of arenas, the lock +on the ring is released. + +_`.lock.avoid`: Deadlocking is avoided as described below: + +_`.lock.avoid.mps`: Firstly we require the MPS not to fault (that is, +when any of these locks are held by a thread, that thread does not +fault). + +_`.lock.avoid.thread`: Secondly, we require that in a multi-threaded +system, memory fault handlers do not suspend threads (although the +faulting thread will, of course, wait for the fault handler to +finish). + +_`.lock.avoid.conflict`: Thirdly, we avoid conflicting deadlock between +the arena and global locks by ensuring we never claim the arena lock +when the recursive global lock is already held, and we never claim the +binary global lock when the arena lock is held. + + +Location dependencies +..................... + +_`.ld`: Location dependencies use fields in the arena to maintain a +history of summaries of moved objects, and to keep a notion of time, +so that the staleness of location dependency can be determined. + + +Finalization +............ + +_`.final`: There is a pool which is optionally (and dynamically) +instantiated to implement finalization. The fields ``finalPool`` and +``isFinalPool`` are used. + + +Implementation +-------------- + + +Tract cache +........... + +_`.impl.tract.cache`: When tracts are allocated to pools by ``ArenaAlloc()``, +the first tract of the block and it's base address are cached in arena +fields ``lastTract`` and ``lastTractBase``. The function +``TractOfBaseAddr()`` (see design.mps.arena.tract-iter.if.block-base(0)) +checks against these cached values and only calls the class method on +a cache miss. This optimizes for the common case where a pool +allocates a block and then iterates over all its tracts (for example, +to attach them to a segment). + +_`.impl.tract.uncache`: When blocks of memory are freed by pools, +``ArenaFree()`` checks to see if the cached value for the most +recently allocated tract (see `.impl.tract.cache`_) is being freed. If +so, the cache is invalid, and must be reset. The ``lastTract`` and +``lastTractBase`` fields are set to ``NULL``. + + +Control pool +............ + +_`.impl.pool.init`: The control pool is initialized by a call to +``PoolInit()`` during ``ArenaCreate()``. + +_`.impl.pool.ready`: All the other fields in the arena are made +checkable before calling ``PoolInit()``, so ``PoolInit()`` can call +``ArenaCheck(arena)``. The pool itself is, of course, not checkable, +so we have a field ``arena->poolReady``, which is false until after +the return from ``PoolInit()``. ``ArenaCheck()`` only checks the pool +if ``poolReady``. + + +Traces +...... + +_`.impl.trace`: ``arena->trace[ti]`` is valid if and only if +``TraceSetIsMember(arena->busyTraces, ti)``. + +_`.impl.trace.create`: Since the arena created by ``ArenaCreate()`` +has ``arena->busyTraces = TraceSetEMPTY``, none of the traces are +meaningful. + +_`.impl.trace.invalid`: Invalid traces have signature ``SigInvalid``, +which can be checked. + + +Polling +....... + +_`.impl.poll.fields`: There are three fields of a arena used for +polling: ``pollThreshold``, ``insidePoll``, and ``clamped`` (see +above). ``pollThreshold`` is the threshold for the next poll: it is +set at the end of ``ArenaPoll()`` to the current polling time plus +``ARENA_POLL_MAX``. + + +Location dependencies +..................... + +_`.impl.ld`: The ``historyStruct`` contains fields used to maintain a +history of garbage collection and in particular object motion in order +to implement location dependency. + +_`.impl.ld.epoch`: The ``epoch`` is the "current epoch". This is the number +of "flips" of traces, in which objects might have moved, in the arena +since it was created. From the mutator's point of view, locations +change atomically at flip. + +_`.impl.ld.history`: The ``history`` is a circular buffer of +``LDHistoryLENGTH`` elements of type ``RefSet``. These are the +summaries of moved objects since the last ``LDHistoryLENGTH`` epochs. +If ``e`` is one of these recent epochs, then :: + + history->history[e % LDHistoryLENGTH] + +is a summary of (the original locations of) objects moved since epoch +``e``. + +_`.impl.ld.prehistory`: The ``prehistory`` is a ``RefSet`` summarizing +the original locations of all objects ever moved. When considering +whether a really old location dependency is stale, it is compared with +this summary. + + +Roots +..... + +_`.impl.root-ring`: The arena holds a member of a ring of roots in the +arena. It holds an incremental serial which is the serial of the next +root. + + +Document History +---------------- + +- 1997-08-11 Pekka P. Pirinen. First draft, based on + design.mps.space(0) and mail.richard.1997-04-25.11-52(0). + +- 1999-04-16 Tony Mann. Updated for separation of tracts and segments. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-11 GDR_ Converted to reStructuredText. + +- 2014-02-17 RB_ Updated first field of tract structure. + +- 2016-04-08 RB_ All methods in the abstract arena class now have + dummy implementations, so that the class passes its own check. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2001–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/arenavm.txt b/mps/design/arenavm.txt new file mode 100644 index 00000000000..f04cecf7fe3 --- /dev/null +++ b/mps/design/arenavm.txt @@ -0,0 +1,262 @@ +.. mode: -*- rst -*- + +Virtual Memory Arena +==================== + +:Tag: design.mps.arena.vm +:Author: David Joes +:Date: 1996-07-16 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: virtual memory arena; design + pair: VM arena; design + + +Introduction +------------ + +_`.intro`: This is the design of the Virtual Memory Arena Class of the +Memory Pool System. The VM Arena Class is just one class available in +the MPS. The generic arena part is described in design.mps.arena_. + +.. _design.mps.arena: arena + + +Overview +-------- + +_`.overview`: VM arenas provide blocks of memory to all other parts of +the MPS in the form of "tracts" using the virtual mapping interface +(design.mps.vm_) to the operating system. The VM Arena Class is not +expected to be provided on platforms that do not have virtual memory +(for example, Macintosh System 7). + +.. _design.mps.vm: vm + +_`.overview.gc`: The VM Arena Class provides some special services on +these blocks in order to facilitate garbage collection: + +_`.overview.gc.zone`: Allocation of blocks with specific zones. This +means that the generic fix function (design.mps.fix_) can use a fast +refset test to eliminate references to addresses that are not in the +condemned set. This assumes that a pool class that uses this placement +appropriately is being used (such as the generation placement policy +used by AMC: see design.mps.poolamc_) and that the pool selects the +condemned sets to coincide with zone stripes. + +.. _design.mps.fix: fix +.. _design.mps.poolamc: poolamc + +_`.overview.gc.tract`: A fast translation from addresses to tract. +(See design.mps.arena.req.fun.trans_.) + +.. _design.mps.arena.req.fun.trans: arena#.req.fun.trans + + +Notes +----- + +_`.note.refset`: Some of this document simply assumes that RefSets +(see design.mps.collections.refsets_) have been chosen as the solution for +design.mps.arena.req.fun.set_. It's a lot simpler that way. Both to +write and understand. + +.. _design.mps.collections.refsets: collections#.refsets +.. _design.mps.arena.req.fun.set: arena#.req.fun.set + + +Requirements +------------ + +Most of the requirements are in fact on the generic arena (see +design.mps.arena.req_). However, many of those requirements can only +be met by a suitable arena class design. + +.. _design.mps.arena.req: arena#.req + +Requirements particular to this arena class: + +Placement +......... + +_`.req.fun.place`: It must be possible for pools to obtain tracts at +particular addresses. Such addresses shall be declared by the pool +specifying what refset zones the tracts should lie in and what refset +zones the tracts should not lie in. It is acceptable for the arena to +not always honour the request in terms of placement if it has run out +of suitable addresses. + +Arena partition +............... + +_`.req.fun.set`: See design.mps.arena.req.fun.set_. The +approximation to sets of address must cooperate with the placement +mechanism in the way required by `.req.fun.place`_ (above). + + +Architecture +------------ + +_`.arch.memory`: The underlying memory is obtained from whatever +Virtual Memory interface (see design.mps.vm_). @@@@ Explain why this +is used. + + +Solution ideas +-------------- + +_`.idea.grain`: Set the arena granularity to the grain provided by the +virtual mapping module. + +_`.idea.mem`: Get a single large contiguous address area from the +virtual mapping interface and divide that up. + +_`.idea.table`: Maintain a table with one entry per grain in order to +provide fast mapping (shift and add) between addresses and table +entries. + +_`.idea.table.figure`: [missing figure] + +_`.idea.map`: Store the pointers (design.arena.req.fun.trans) in the +table directly for every grain. + +_`.idea.zones`: Partition the managed address space into zones (see +idea.zones) and provide the set approximation as a reference +signature. + +_`.idea.first-fit`: Use a simple first-fit allocation policy for +tracts within each zone (`.idea.zones`_). Store the freelist in the +table (`.idea.table`_). + +_`.idea.base`: Store information about each contiguous area (allocated +of free) in the table entry (`.idea.table`_) corresponding to the base +address of the area. + +_`.idea.shadow`: Use the table (`.idea.table`_) as a "shadow" of the +operating system's page table. Keep information such as last access, +protection, etc. in this table, since we can't get at this information +otherwise. + +_`.idea.barrier`: Use the table (`.idea.table`_) to implement the +software barrier. Each segment can have a read and/or write barrier +placed on it by each process. (_`.idea.barrier.bits`: Store a +bit-pattern which remembers which process protected what.) This will +give a fast translation from a barrier-protected address to the +barrier handler via the process table. + +_`.idea.demand-table`: For a 1 GiB managed address space with a 4 KiB +page size, the table will have 256K-entries. At, say, four words per +entry, this is 4 MiB of table. Although this is only an 0.4%, the +table shouldn't be preallocated or initially it is an infinite +overhead, and with 1 MiB active, it is a 300% overhead! The address +space for the table should be reserved, but the pages for it mapped +and unmapped on demand. By storing the table in a tract, the status of +the table's pages can be determined by looking at it's own entries in +itself, and thus the translation lookup (design.arena.req.fun.trans) +is slowed to two lookups rather than one. + +_`.idea.pool`: Make the Arena Manager a pool class. Arena +initialization becomes pool creation. Tract allocation becomes +``PoolAlloc()``. Other operations become class-specific operations on +the "arena pool". + + +Data structures +--------------- + +_`.tables`: There are two table data structures: a page table, and an +alloc table. + +_`.table.page.map`: Each page in the VM has a corresponding page table +entry. + +_`.table.page.linear`: The table is a linear array of PageStruct +entries; there is a simple mapping between the index in the table and +the base address in the VM. Namely: + +- index to base address: ``base-address = arena-base + (index * page-size)`` +- base address to index: ``index = (base-address - arena-base) / page-size`` + +_`.table.page.partial`: The table is partially mapped on an +"as-needed" basis using the SparseArray abstract type. + +_`.table.page.tract`: Each page table entry contains a tract, which is +only valid if it is allocated to a pool. If it is not allocated to a +pool, the fields of the tract are used for other purposes. (See +design.mps.arena.tract.field.pool_) + +.. _design.mps.arena.tract.field.pool: arena#.tract.field.pool + +_`.table.alloc`: The alloc table is a simple bit table (implemented +using the BT module, design.mps.bt_). + +.. _design.mps.bt: bt + +_`.table.alloc.map`: Each page in the VM has a corresponding alloc +table entry. + +_`.table.alloc.semantics`: The bit in the alloc table is set iff the +corresponding page is allocated (to a pool). + + +Notes +----- + +_`.fig.page`: How the pages in the arena area are represented in the +tables. + +[missing figure] + +_`.fig.count`: How a count table can be used to partially map the page +table, as proposed in request.dylan.170049.sol.map_. + +.. _request.dylan.170049.sol.map: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170049 + +[missing figure] + + +Document History +---------------- + +- 1996-07-16 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-24 GDR_ Converted to reStructuredText. + +- 2014-02-17 RB_ Updated to note use of SparseArray rather than direct + management of page table mapping. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/bootstrap.txt b/mps/design/bootstrap.txt new file mode 100644 index 00000000000..ef486e3ed3d --- /dev/null +++ b/mps/design/bootstrap.txt @@ -0,0 +1,127 @@ +.. mode: -*- rst -*- + +Bootstrapping +============= + +:Tag: design.mps.bootstrap +:Author: Gareth Rees +:Date: 2015-09-01 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: bootstrap; design + + +Introduction +------------ + +_`.intro`: This explains how the MPS gets started. + +_`.readership`: Any MPS developer. + +_`.overview`: The job of the MPS is to allocate memory to a program. +Before it can allocate memory, the MPS needs to create data structures +to represent its internal state. But before it can create those data +structures, it needs to allocate memory to store them in. This +bootstrapping problem affects the MPS at several points, which are +listed here, together with their solutions. + + +Bootstrapping problems +---------------------- + +Virtual memory descriptor +......................... + +_`.vm`: Before address space can be mapped into main memory, the +virtual memory descriptor must be initialized. But before the virtual +memory descriptor can be initialized, some address space must be +mapped into main memory in order to store it. See +`design.vm.req.bootstrap`_. + +_`.vm.sol`: The virtual memory descriptor is allocated initially on +the stack, and then copied into its place in the chunk after the +memory for it has been mapped. See `design.vm.sol.bootstrap`_. + +.. _design.vm.req.bootstrap: vm#req.bootstrap +.. _design.vm.sol.bootstrap: vm#sol.bootstrap + + +Arena descriptor +................ + +_`.arena`: Before chunks of address space can be reserved and mapped, +the virtual memory arena descriptor must be initialized (so that the +chunks can be added to the arena's chunk tree). But before a virtual +memory arena descriptor can be initialized, address space must be +reserved and mapped in order to store it. + +_`.arena.sol`: A small amount of address space is reserved and mapped +directly via ``VMInit()`` and ``VMMap()`` (not via the chunk system) +in order to provide enough memory for the arena descriptor. + + +Arena's free land +................. + +_`.land`: Before the arena can allocate memory, a range of addresses +must be inserted into the arena's free land (so that the free land can +hand out memory from this range). But before addresses can be inserted +into the arena's free land, the free land's block pool must have +memory from the arena to store the nodes in the tree representing +those addresses. + +_`.land.sol`: The arena has two "back door" mechanisms and uses them +in combination. + +_`.land.sol.alloc`: First, there is a mechanism for allocating a +page of memory directly from a chunk, bypassing the free land. + +_`.land.sol.pool`: Second, the free land's block pool has an option to +prevent it extending itself by allocating memory from the arena. +Instead, it fails allocations with ``ResLIMIT``. The free land's +block pool also has a mechanism, ``MFSExtend`` to extend it with a +block of memory. When the free land fails with ``ResLIMIT`` the arena +uses `.land.sol.alloc`_ to provide it with memory. + + + +Document History +---------------- + +- 2015-09-01 GDR_ Initial draft. + +- 2016-02-25 RB_ Improving description of arena free land bootstrap + and cross-referencing from source code. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2015–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/bt.txt b/mps/design/bt.txt new file mode 100644 index 00000000000..08af959c5b9 --- /dev/null +++ b/mps/design/bt.txt @@ -0,0 +1,777 @@ +.. mode: -*- rst -*- + +Bit tables +========== + +:Tag: design.mps.bt +:Author: David Jones +:Date: 1997-03-04 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: bit tables; design + + +Introduction +------------ + +_`.intro`: This is the design of the Bit Tables module. A Bit Table is +a linear array of bits. A Bit Table of length *n* is indexed using an +integer from 0 up to (but not including) *n*. Each bit in a Bit Table +can hold either the value 0 (``FALSE``) or 1 (``TRUE``). A variety of +operations are provided including: get, set, and reset individual +bits; set and reset a contiguous range of bits; search for a +contiguous range of reset bits; making a "negative image" copy of a +range. + +_`.readership`: MPS developers. + + +Definitions +----------- + +_`.def.set`: **Set** + + Used as a verb meaning to assign the value 1 or ``TRUE`` to a bit. + Used descriptively to denote a bit containing the value 1. Note 1 + and ``TRUE`` are synonyms in MPS C code (see ``Bool``). + +_`.def.reset`: **Reset** + + Used as a verb meaning to assign the value 0 or ``FALSE`` to a + bit. Used descriptively to denote a bit containing the value 0. + Note 0 and ``FALSE`` are synonyms in MPS C code (see ``Bool``). + +.. note:: + + Consider using "fill/empty" or "mark/clear" instead of + "set/reset", set/reset is probably a hangover from drj's z80 + hacking days -- drj 1999-04-26 + +_`.def.bt`: **Bit Table** + + A Bit Table is a mapping from [0, *n*) to {0,1} for some *n*, + represented as a linear array of bits. + + _`.def.bt.justify`: They are called *Bit Tables* because a single + bit is used to encode whether the image of a particular integer + under the map is 0 or 1. + +_`.def.range`: **Range** + + A contiguous sequence of bits in a Bit Table. Ranges are typically + specified as a *base*--*limit* pair where the range includes the + position specified by the base, but excludes that specified by the + limit. The mathematical interval notation for half-open intervals, + [*base*, *limit*), is used. + + +Requirements +------------ + +_`.req.bit`: The storage for a Bit Table of *n* bits shall take no +more than a small constant addition to the storage required for *n* +bits. _`.req.bit.why`: This is so that clients can make some +predictions about how much storage their algorithms use. A small +constant is allowed over the minimal for two reasons: inevitable +implementation overheads (such as only being able to allocate storage +in multiples of 32 bits), extra storage for robustness or speed (such +as signature and length fields). + +_`.req.create`: A means to create Bit Tables. _`.req.create.why`: +Obvious. + +_`.req.destroy`: A means to destroy Bit Tables. _`.req.destroy.why`: +Obvious. + +_`.req.ops`: The following operations shall be supported: + +* _`.req.ops.get`: **Get**. Get the value of a bit at a specified + index. + +* _`.req.ops.set`: **Set**. Set a bit at a specified index. + +* _`.req.ops.reset`: **Reset**. Reset a bit at a specified index. + +_`.req.ops.minimal.why`: Get, Set, Reset, are the minimal operations. +All possible mappings can be created and inspected using these +operations. + +* _`.req.ops.set.range`: **SetRange**. Set a range of bits. + _`.req.ops.set.range.why`: It's expected that clients will often want + to set a range of bits; providing this operation allows the + implementation of the BT module to make the operation efficient. + +* _`.req.ops.reset.range`: **ResetRange**. Reset a range of + bits. _`.req.ops.reset.range.why`: as for SetRange, see + `.req.ops.set.range.why`_. + +* _`.req.ops.test.range.set`: **IsSetRange**. Test whether a range + of bits are all set. _`.req.ops.test.range.set.why`: Mostly + for checking. For example, often clients will know that a range they + are about to reset is currently all set, they can use this operation + to assert that fact. + +* _`.req.ops.test.range.reset`: **IsResetRange**. Test whether a + range of bits are all reset. _`.req.ops.test.range.reset.why` + As for IsSetRange, see `.req.ops.test.range.set.why`_. + +* _`.req.ops.find`: Find a range, which we'll denote [*i*, *j*), of at + least *L* reset bits that lies in a specified subrange of the entire + Bit Table. Various find operations are required according to the + (additional) properties of the required range: + + * _`.req.ops.find.short.low`: **FindShortResetRange**. Of all + candidate ranges, find the range with least *j* (find the leftmost + range that has at least *L* reset bits and return just enough of + that). _`.req.ops.find.short.low.why`: Required by client and VM + arenas to allocate segments. The arenas implement definite + placement policies (such as lowest addressed segment first) so + they need the lowest (or highest) range that will do. It's not + currently useful to allocate segments larger than the requested + size, so finding a short range is sufficient. + + * _`.req.ops.find.short.high`: **FindShortResetRangeHigh**. Of all + candidate ranges, find the range with greatest *i* (find the + rightmost range that has at least *L* reset bits and return just + enough of that). _`.req.ops.find.short.high.why`: Required by arenas + to implement a specific segment placement policy (highest + addressed segment first). + + * _`.req.ops.find.long.low`: **FindLongResetRange**. Of all candidate + ranges, identify the ranges with least *i* and of those find the + one with greatest *j* (find the leftmost range that has at least + *L* reset bits and return all of it). _`.req.ops.find.long.low.why` + Required by the mark and sweep Pool Classes (AMS, AWL, LO) for + allocating objects (filling a buffer). It's more efficient to fill + a buffer with as much memory as is conveniently possible. There's + no strong reason to find the lowest range but it's bound to have + some beneficial (small) cache effect and makes the algorithm more + predictable. + + * _`.req.ops.find.long.high`: **FindLongResetRangeHigh**. Provided, + but not required, see `.non-req.ops.find.long.high`_. + +* _`.req.ops.copy`: Copy a range of bits from one Bit Table to another + Bit Table. Various copy operations are required: + + * _`.req.ops.copy.simple`: Copy a range of bits from one Bit Table to + the same position in another Bit Table. + _`.req.ops.copy.simple.why`: Required to support copying of the + tables for the "low" segment during segment merging and splitting, + for pools using tables (for example, ``PoolClassAMS``). + + * _`.req.ops.copy.offset`: Copy a range of bits from one Bit Table to + an offset position in another Bit Table. + _`.req.ops.copy.offset.why`: Required to support copying of the + tables for the "high" segment during segment merging and + splitting, for pools which support this (currently none, as of + 2000-01-17). + + * _`.req.ops.copy.invert`: Copy a range of bits from one Bit Table to + the same position in another Bit Table inverting all the bits in + the target copy. _`.req.ops.copy.invert.why`: Required by colour + manipulation code in ``PoolClassAMS`` and ``PoolClassLO``. + +_`.req.speed`: Operations shall take no more than a few memory +operations per bit manipulated. _`.req.speed.why`: Any slower would be +gratuitous. + +_`.req.speed.fast`: The following operations shall be very fast: + +* _`.req.speed.fast.find.short`: FindShortResRange (the + operation used to meet `.req.ops.find.short.low`_) + FindShortResRangeHigh (the operation used to meet + `.req.ops.find.short.high`_). + + _`.req.speed.fast.find.short.why`: These two are used by the client + arena (design.mps.arena.client) and the VM arena + (design.mps.arena.vm_) for finding segments in page tables. The + operation will be used sufficiently often that its speed will + noticeably affect the overall speed of the MPS. They will be called + with a length equal to the number of pages in a segment. Typical + values of this length depend on the pool classes used and their + configuration, but we can expect length to be small (1 to 16) + usually. We can expect the Bit Table to be populated densely where + it is populated at all, that is set bits will tend to be clustered + together in subranges. + + .. _design.mps.arena.vm: arenavm + +* _`.req.speed.fast.find.long`: FindLongResRange (the operation + used to meet `.req.ops.find.long.low`_) + + _`.req.speed.fast.find.long.why`: Used in the allocator for + ``PoolClassAWL`` (design.mps.poolawl_), ``PoolClassAMS`` + (design.mps.poolams_), ``PoolClassEPVM`` (design.mps.poolepvm(0)). + Of these AWL and EPVM have speed requirements. For AWL the length of + range to be found will be the length of a Dylan table in words. + According to `mail.tony.1999-05-05.11-36`_, only ```` + objects are allocated in AWL (though not all ```` + objects are allocated in AWL), and the mean length of an + ```` object is 486 Words. No data for EPVM alas. + + .. _design.mps.poolawl: poolawl + .. _design.mps.poolams: poolams + .. _mail.tony.1999-05-05.11-36: https://info.ravenbrook.com/project/mps/mail/1999/05/05/11-36/0.txt + +_`.req.speed.fast.other.why`: We might expect mark and sweep pools to +make use of Bit Tables, the MPS has general requirements to support +efficient mark and sweep pools, so that imposes general speed +requirements on Bit Tables. + + +Non requirements +---------------- + +The following are not requirements but the current design could +support them with little modification or does support them. Often they +used to be requirements, but are no longer, or were added +speculatively or experimentally but aren't currently used. + +* _`.non-req.ops.test.range.same`: **RangesSame**. Test whether two + ranges that occupy the same positions in different Bit Tables are + the same. This used to be required by ``PoolClassAMS``, but is no + longer. Currently (1999-05-04) the functionality still exists. + +* _`.non-req.ops.find.long.high`: **FindLongResetRangeHigh**. (see + `.req.ops.find`_) Of all candidate ranges, identify the ranges with + greatest *j* and of those find the one with least *i* (find the + rightmost range that has at least *L* reset bits and return all of + it). Provided for symmetry but only currently used by the BT tests + and ``cbstest.c``. + + +Background +---------- + +_`.background`: Originally Bit Tables were used and implemented +by ``PoolClassLO`` (design.mps.poollo_). It was +decided to lift them out into a separate module when designing the +Pool to manage Dylan Weak Tables which is also a mark and sweep pool +and will make use of Bit Tables (see design.mps.poolawl_). + +.. _design.mps.poollo: poollo +.. _design.mps.poolawl: poolawl + +_`.background.analysis`: analysis.mps.bt(0) contains +some of the analysis of the design decisions that were and were not +made in this document. + + +Clients +------- + +_`.clients`: Bit Tables are used throughout the MPS but the important +uses are in the client and VM arenas (design.mps.arena.client(0) and +design.mps.arena.vm_) a bit table is used to record whether each +page is free or not; several pool classes (``PoolClassLO``, +``PoolClassEPVM``, ``PoolClassAMS``) use bit tables to record which +locations are free and also to store colour. + + +Overview +-------- + +_`.over`: Mostly, the design is as simple as possible. The significant +complications are iteration (see `.iteration`_ below) and searching +(see `.fun.find-res-range`_ below) because both of these are required +to be fast. + + +Interface +--------- + +``typedef Word *BT`` + +_`.if.representation.abstract`: A Bit Table is represented by the type +``BT``. + +_`.if.declare`: The module declares a type ``BT`` and a prototype for +each of the functions below. The type is declared in impl.h.mpmtypes, +the prototypes are declared in impl.h.mpm. Some of the functions are +in fact implemented as macros in the usual way +(doc.mps.ref-man.if-conv(0).macro.std). + +_`.if.general.index`: Many of the functions specified below take +indexes. If otherwise unspecified an index must be in the interval [0, +*n*) (note, up to, but not including, *n*) where *n* is the number of +bits in the relevant Bit Table (as passed to the ``BTCreate()`` +function). + +_`.if.general.range`: Where a range is specified by two indexes (*base* +and *limit*), the index *base*, which specifies the beginning of the +range, must be in the interval [0, *n*), and the index *limit*, which +specifies the end of the range, must be in the interval [1, *n*] (note +can be *n*), and *base* must be strictly less than *limit* (empty +ranges are not allowed). Sometimes *i* and *j* are used instead of +*base* and *limit*. + +``Res BTCreate(BT *btReturn, Arena arena, Count n)`` + +_`.if.create`: Attempts to create a table of length ``n`` in the arena +control pool, putting the table in ``*btReturn``. Returns ``ResOK`` if +and only if the table is created OK. The initial values of the bits in +the table are undefined (so the client should probably call +``BTResRange()`` on the entire range before using the ``BT``). Meets +`.req.create`_. + +``void BTDestroy(BT t, Arena arena, Count n)`` + +_`.if.destroy`: Destroys the table ``t``, which must have been created +with ``BTCreate()``. The value of argument ``n`` must be same as the +value of the argument passed to ``BTCreate()``. Meets +`.req.destroy`_. + +``size_t BTSize(Count n)`` + +_`.if.size`: ``BTSize(n)`` returns the number of bytes needed for a Bit +Table of ``n`` bits. ``BTSize()`` is a macro, but ``(BTSize)(n)`` will +assert if ``n`` exceeds ``COUNT_MAX - MPS_WORD_WIDTH + 1``. This is +used by clients that allocate storage for the ``BT`` themselves. +Before ``BTCreate()`` and ``BTDestroy()`` were implemented that was the +only way to allocate a Bit Table, but is now deprecated. + +``int BTGet(BT t, Index i)`` + +_`.if.get`: ``BTGet(t, i)`` returns the ``i``-th bit of the table ``t`` +(that is, the image of ``i`` under the mapping). Meets +`.req.ops.get`_. + +``void BTSet(BT t, Index i)`` + +_`.if.set`: ``BTSet(t, i)`` sets the ``i``-th bit of the table ``t`` (to +1). ``BTGet(t, i)`` will now return 1. Meets `.req.ops.set`_. + +``void BTRes(BT t, Index i)`` + +_`.if.res`: ``BTRes(t, i)`` resets the ``i``-th bit of the table ``t`` +(to 0). ``BTGet(t, i)`` will now return 0. Meets `.req.ops.reset`_. + +``void BTSetRange(BT t, Index base, Index limit)`` + +_`.if.set-range`: ``BTSetRange(t, base, limit)`` sets the range of bits +[``base``, ``limit``) in the table ``t``. ``BTGet(t, x)`` will now +return 1 for ``base`` ≤ ``x`` < ``limit``. Meets +`.req.ops.test.range.set`_. + +``void BTResRange(BT t, Index base, Index limit)`` + +_`.if.res-range`: ``BTResRange(t, base, limit)`` resets the range of +bits [``base``, ``limit``) in the table ``t``. ``BTGet(t, x)`` will +now return 0 for ``base`` ≤ ``x`` < ``limit``. Meets +`.req.ops.test.range.reset`_. + +``Bool BTIsSetRange(BT bt, Index base, Index limit)`` + +_`.if.test.range.set`: Returns ``TRUE`` if all the bits in the range +[``base``, ``limit``) are set, ``FALSE`` otherwise. Meets +`.req.ops.test.range.set`_. + +``Bool BTIsResRange(BT bt, Index base, Index limit)`` + +_`.if.test.range.reset`: Returns ``TRUE`` if all the bits in the range +[``base``, ``limit``) are reset, ``FALSE`` otherwise. Meets +`.req.ops.test.range.reset`_. + +``Bool BTRangesSame(BT BTx, BT BTy, Index base, Index limit)`` + +_`.if.test.range.same`: returns ``TRUE`` if ``BTGet(BTx,i)`` equals +``BTGet(BTy,i)`` for ``i`` in [``base``, ``limit``), and ``FALSE`` +otherwise. Meets `.non-req.ops.test.range.same`_. + +_`.if.find.general`: There are four functions (below) to find reset +ranges. All the functions have the same prototype (for symmetry):: + + Bool find(Index *baseReturn, Index *limitReturn, + BT bt, + Index searchBase, Index searchLimit, + Count length); + +where ``bt`` is the Bit Table in which to search. ``searchBase`` and +``searchLimit`` specify a subset of the Bit Table to use, the +functions will only find ranges that are subsets of [``searchBase``, +``searchLimit``) (when set, ``*baseReturn`` will never be less than +``searchBase`` and ``*limitReturn`` will never be greater than +``searchLimit``). ``searchBase`` and ``searchLimit`` specify a range +that must conform to the general range requirements for a range [*i*, +*j*), as per `.if.general.range`_ modified appropriately. ``length`` +is the number of contiguous reset bits to find; it must not be bigger +than ``searchLimit - searchBase`` (that would be silly). If a suitable +range cannot be found the function returns ``FALSE`` (0) and leaves +``*baseReturn`` and ``*limitReturn`` untouched. If a suitable range is +found then the function returns the range's base in ``*baseReturn`` +and its limit in ``*limitReturn`` and returns ``TRUE`` (1). + +``Bool BTFindShortResRange(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count length)`` + +_`.if.find-short-res-range`: Finds a range of reset bits in the table, +starting at ``searchBase`` and working upwards. This function is +intended to meet `.req.ops.find.short.low`_ so it will find the +leftmost range that will do, and never finds a range longer than the +requested length (the intention is that it will not waste time +looking). + +``Bool BTFindShortResRangeHigh(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count length)`` + +_`.if.find-short-res-range-high`: Finds a range of reset bits in the +table, starting at ``searchLimit`` and working downwards. This +function is intended to meet `.req.ops.find.short.high`_ so it will +find the rightmost range that will do, and never finds a range longer +than the requested length. + +``Bool BTFindLongResRange(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count length)`` + +_`.if.find-long-res-range`: Finds a range of reset bits in the table, +starting at ``searchBase`` and working upwards. This function is +intended to meet `.req.ops.find.long.low`_ so it will find the +leftmost range that will do and returns all of that range (which can +be longer than the requested length). + +``Bool BTFindLongResRangeHigh(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count length)`` + +_`.if.find-long-res-range-high`: Finds a range of reset bits in the +table, starting at ``searchLimit`` and working downwards. This +function is intended to meet `.req.ops.find.long.high`_ so it will +find the rightmost range that will do and returns all that range +(which can be longer than the requested length). + +``void BTCopyRange(BT fromBT, BT toBT, Index base, Index limit)`` + +_`.if.copy-range`: Overwrites the ``i``-th bit of ``toBT`` with the +``i``-th bit of ``fromBT``, for all ``i`` in [``base``, ``limit``). +Meets `.req.ops.copy.simple`_. + +``void BTCopyOffsetRange(BT fromBT, BT toBT, Index fromBase, Index fromLimit, Index toBase, Index toLimit)`` + +_`.if.copy-offset-range`: Overwrites the ``i``-th bit of ``toBT`` with +the ``j``-th bit of ``fromBT``, for all ``i`` in [``toBase``, +``toLimit``) and corresponding ``j`` in [``fromBase``, ``fromLimit``). +Each of these ranges must be the same size. This might be +significantly less efficient than ``BTCopyRange()``. Meets +`.req.ops.copy.offset`_. + +``void BTCopyInvertRange(BT fromBT, BT toBT, Index base, Index limit)`` + +_`.if.copy-invert-range`: Overwrites the ``i``-th bit of ``toBT`` with +the inverse of the ``i``-th bit of ``fromBT``, for all ``i`` in +[``base``, ``limit``). Meets `.req.ops.copy.invert`_. + + +Detailed design +--------------- + + +Data structures +............... + +_`.datastructure`: Bit Tables will be represented as (a pointer to) an +array of ``Word``. A plain array is used instead of the more usual +design convention of implementing an abstract data type as a structure +with a signature (see guide.impl.c.adt(0)). +_`.datastructure.words.justify`: The type ``Word`` is used as it will +probably map to the object that can be most efficiently accessed on +any particular platform. _`.datastructure.non-adt.justify`: The usual +abstract data type convention was not followed because (i) The initial +design (drj) was lazy, (ii) Bit Tables are more likely to come in +convenient powers of two with the extra one or two words overhead. +However, the loss of checking is severe. Perhaps it would be better to +use the usual abstract data type style. + + +Functions +......... + +_`.fun.size`: ``BTSize()``. Since a Bit Table is an array of ``Word``, the +size of a Bit Table of *n* bits is simply the number of words that it +takes to store *n* bits times the number of bytes in a word. This is +``ceiling(n/MPS_WORD_WIDTH)*sizeof(Word).`` _`.fun.size.justify`: Since +there can be at most ``MPS_WORD_WIDTH - 1`` unused bits in the entire +table, this satisfies `.req.bit`_. + +_`.index`: The designs for the following functions use a decomposition +of a bit-index, ``i``, into two parts, ``iw``, ``ib``. + +* _`.index.word`: ``iw`` is the "word-index" which is the index into the + word array of the word that contains the bit referred to by the + bit-index. ``iw = i / MPS_WORD_WIDTH``. Since ``MPS_WORD_WIDTH`` is + a power of two, this is the same as ``iw = i >> MPS_WORD_SHIFT``. + The latter expression is used in the code. _`.index.word.justify`: The + compiler is more likely to generate good code without the divide. + +* _`.index.sub-word`: ``ib`` is the "sub-word-index" which is the index + of the bit referred to by the bit-index in the above word. ``ib = i + % MPS_WORD_WIDTH``. Since ``MPS_WORD_WIDTH`` is a power of two, this + is the same as ``ib = i & ~((Word)-1<>5) + (i&31); + } + +``ACT_ON_RANGE(base, limit, single_action, bits_action, word_action)`` +``ACT_ON_RANGE_HIGH(base, limit, single_action, bits_action, word_action)`` + +_`.iteration`: Many of the following functions involve iteration over +ranges in a Bit Table. This is performed on whole words rather than +individual bits, whenever possible (to improve speed). This is +implemented internally by the macros ``ACT_ON_RANGE()`` and +``ACT_ON_RANGE_HIGH()`` for iterating over the range forwards and +backwards respectively. These macros do not form part of the interface +of the module, but are used extensively in the implementation. The +macros are often used even when speed is not an issue because it +simplifies the implementation and makes it more uniform. The iteration +macros take the parameters ``base``, ``limit``, ``single_action``, +``bits_action``, and ``word_action``: + +* ``base`` and ``limit`` are of type ``Index`` and define the range of + the iteration. + +* ``single_action`` is the name of a macro which will be used for + iterating over bits in the table individually. This macro must take + a single ``Index`` parameter corresponding to the index for the bit. + The expansion of the macro must not contain ``break`` or + ``continue`` because it will be called from within a loop from the + expansion of ``ACT_ON_RANGE()``. + +* ``bits_action`` is the name of a macro which will be used for + iterating over part-words. This macro must take parameters + ``wordIndex``, ``base``, ``limit`` where ``wordIndex`` is the index + into the array of words, and ``base`` and ``limit`` define a range + of bits within the indexed word. + +* ``word_action`` is the name of a macro which will be used for + iterating over whole-words. This macro must take the single + parameter ``wordIndex`` which is the index of the whole-word in the + array. The expansion of the macro must not contain ``break`` or + ``continue`` because it will be called from within a loop from the + expansion of ``ACT_ON_RANGE()``. + +_`.iteration.exit`: The expansion of the ``single_action``, +``bits_action``, and ``word_action`` macros is allowed to contain +``return`` or ``goto`` to terminate the iteration early. This is used +by the test (`.fun.test.range.set`_) and find (`.fun.find`_) +operations. + +_`.iteration.small`: If the range is sufficiently small only the +``single_action`` macro will be used, as this is more efficient in +practice. The choice of what constitutes a small range is made +entirely on the basis of experimental performance results (and +currently, 1999-04-27, a "small range" is 6 bits or fewer. See +change.mps.epcore.brisling.160181 for some justification). Otherwise +(for a bigger range) ``bits_action`` is used on the part words at +either end of the range (or the whole of the range it if it fits in a +single word), and ``word_action`` is used on the words that comprise +the inner portion of the range. + +The implementation of ``ACT_ON_RANGE()`` (and ``ACT_ON_RANGE_HIGH()``) is +simple enough. It decides which macros it should invoke and invokes +them. ``single_action`` and ``word_action`` are invoked inside loops. + +_`.fun.get`: ``BTGet()``. The bit-index will be converted in the usual +way, see `.index`_. The relevant ``Word`` will be read out of the Bit +Table and shifted right by the sub-``Word`` index (this brings the +relevant bit down to the least significant bit of the ``Word``), the +``Word`` will then be masked with 1, producing the answer. + +_`.fun.set`: ``BTSet()``. + +_`.fun.res`: ``BTRes()``. + +In both ``BTSet()`` and ``BTRes()`` a mask is constructed by shifting 1 +left by the sub-word-index (see `.index`_). For ``BTSet()`` the mask is +or-ed into the relevant word (thereby setting a single bit). For +``BTRes()`` the mask is inverted and and-ed into the relevant word +(thereby resetting a single bit). + +_`.fun.set-range`: ``BTSetRange()``. ``ACT_ON_RANGE()`` (see `.iteration`_ +above) is used with macros that set a single bit (using ``BTSet()``), +set a range of bits in a word, and set a whole word. + +_`.fun.res-range`: ``BTResRange()`` This is implemented similarly to +``BTSetRange()`` (`.fun.set-range`_) except using ``BTRes()`` and reverse +bit-masking logic. + +_`.fun.test.range.set`: ``BTIsSetRange()``. ``ACT_ON_RANGE()`` (see +`.iteration`_ above) is used with macros that test whether all the +relevant bits are set; if some of the relevant bits are not set then +``return FALSE`` is used to terminate the iteration early and return +from the ``BTIsSetRange()`` function. If the iteration completes then +``TRUE`` is returned. + +_`.fun.test.range.reset`: ``BTIsResRange()``. As for ``BTIsSetRange()`` +(`.fun.test.range.set`_ above) but testing whether the bits are reset. + +_`.fun.test.range.same`: ``BTRangesSame()``. As for ``BTIsSetRange()`` +(`.fun.test.range.set`_ above) but testing whether corresponding +ranges in the two Bit Tables are the same. Note there are no speed +requirements, but ``ACT_ON_RANGE()`` is used for simplicity and +uniformity. + +_`.fun.find`: The four external find functions (``BTFindShortResRange()``, +``BTFindShortResRangeHigh()``, ``BTFindLongResRange()``, +``BTFindLongResRangeHigh()``) simply call through to one of the two +internal functions: ``BTFindResRange()`` and ``BTFindResRangeHigh()``. + +``Bool BTFindResRange(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count minLength, Count maxLength)`` +``Bool BTFindResRangeHigh(Index *baseReturn, Index *limitReturn, BT bt, Index searchBase, Index searchLimit, Count minLength, Count maxLength)`` + +There are two length parameters, one specifying the minimum length of +the range to be found, the other the maximum length. For +``BTFindShort()`` and ``BTFindShortHigh()``, ``maxLength`` is equal to +``minLength`` when passed; for ``BTFindLong()`` and ``BTFindLongHigh()``, +``maxLength` is equal to the maximum possible range, namely +``searchLimit - searchBase``. + +_`.fun.find-res-range`: ``BTFindResRange()``. Iterate within the search +boundaries, identifying candidate ranges by searching for a reset bit. +The Boyer–Moore algorithm [Boyer_Moore_1977]_ is used (it's particularly +easy to implement when there are only two symbols, 0 and 1, in the +alphabet). For each candidate range, iterate backwards over the bits +from the end of the range towards the beginning. If a set bit is +found, this candidate has failed and a new candidate range is +selected. If when scanning for the set bit a range of reset bits was +found before finding the set bit, then this (small) range of reset +bits is used as the start of the next candidate. Additionally the end +of this small range of reset bits (the end of the failed candidate +range) is remembered so that we don't have to iterate over this range +again. But if no reset bits were found in the candidate range, then +iterate again (starting from the end of the failed candidate) to look +for one. If during the backwards search no set bit is found, then we +have found a sufficiently large range of reset bits; now extend the +valid range as far as possible up to the maximum length by iterating +forwards up to the maximum limit looking for a set bit. The iterations +make use of the ``ACT_ON_RANGE()`` and ``ACT_ON_RANGE_HIGH()`` macros, +which can use ``goto`` to effect an early termination of the iteration +when a set/reset (as appropriate) bit is found. The macro +``ACTION_FIND_SET_BIT()`` is used in the iterations. It efficiently +finds the first (that is, with lowest index or weight) set bit in a +word or subword. + +_`.fun.find-res-range.improve`: Various other performance improvements +have been suggested in the past, including some from +request.epcore.170534_. Here is a list of potential improvements which +all sound plausible, but which have not led to performance improvements +in practice: + +.. _request.epcore.170534: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/170534 + +* _`.fun.find-res-range.improve.step.partial`: When the top index in a + candidate range fails, skip partial words as well as whole words, + using, for example, lookup tables. + +* _`.fun.find-res-range.improve.lookup`: When testing a candidate run, + examine multiple bits at once (for example, 8), using lookup tables + for (for example) index of first set bit, index of last set bit, + number of reset bits, length of maximum run of reset bits. + +_`.fun.find-res-range-high`: ``BTFindResRangeHigh()``. Exactly the same +algorithm as in ``BTFindResRange()`` (see `.fun.find-res-range`_ above), +but moving over the table in the opposite direction. + +_`.fun.copy-simple-range`: ``BTCopyRange()``. Uses ``ACT_ON_RANGE()`` (see +`.iteration`_ above) with the obvious implementation. Should be fast. + +_`.fun.copy-offset-range`: ``BTCopyOffsetRange()``. Uses a simple +iteration loop, reading bits with ``BTGet()`` and setting them with +``BTSet()``. Doesn't use ``ACT_ON_RANGE()`` because the two ranges will +not, in general, be similarly word-aligned. + +_`.fun.copy-invert-range`: ``BTCopyInvertRange()``. Uses ``ACT_ON_RANGE()`` +(see `.iteration`_ above) with the obvious implementation. Should be +fast---although there are no speed requirements. + + +Testing +------- + +_`.test`: The following tests are available or have been used during +development. + +_`.test.btcv`: ``btcv.c``. This is supposed to be a coverage test, +intended to execute all of the module's code in at least some minimal +way. + +_`.test.landtest`: ``landtest.c``. This is a test of the ``Land`` +module (design.mps.land_) and its concrete implementations. It +compares the functional operation of a ``Land`` with that of a ``BT`` +so is a good functional test of either module. + +.. _design.mps.land: land + +_`.test.mmqa.120`: MMQA_test_function!210.c. This is used because it has +a fair amount of segment allocation and freeing so exercises the arena +code that uses Bit Tables. + +_`.test.bttest`: ``bttest.c``. This is an interactive test that can be +used to exercise some of the ``BT`` functionality by hand. + +_`.test.dylan`: It is possible to modify Dylan so that it uses Bit +Tables more extensively. See change.mps.epcore.brisling.160181 TEST1 +and TEST2. + + +References +---------- + +.. [Boyer_Moore_1977] + "A Fast String Searching Algorithm"; + Robert S. Boyer and J. Strother Moore; + Communications of the ACM 20(10):762–772; + 1977; + . + + +Document History +---------------- + +- 1997-03-04 David Jones. Initial drafts. + +- 1999-04-29 David Jones. Prepared for review. Added full requirements + section. Made notation more consistent throughout. Documented all + functions. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-12 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/buffer.txt b/mps/design/buffer.txt new file mode 100644 index 00000000000..48caf93e98b --- /dev/null +++ b/mps/design/buffer.txt @@ -0,0 +1,748 @@ +.. mode: -*- rst -*- + +Allocation buffers and allocation points +======================================== + +:Tag: design.mps.buffer +:Author: Richard Brooksby +:Date: 1996-09-02 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: buffers; design + + +Introduction +------------ + +_`.scope`: This is the design of allocation buffers and allocation +points. + +_`.purpose`: The purpose of this document is to record design +decisions made concerning allocation buffers and allocation points and +justify those decisions in terms of requirements. + +_`.readership`: The document is intended for reading by any MPS +developer. + + +Glossary +-------- + +trapped + + _`.def.trapped`: The buffer is in a state such that the MPS gets + to know about the next use of that buffer. + + +Source +------ + +_`.source.mail`: Much of the juicy stuff about buffers is only +floating around in mail discussions. You might like to try searching +the archives if you can't find what you want here. + +.. note:: + + Mail archives are only accessible to Ravenbrook staff. RHSK + 2006-06-09. + +_`.source.synchronize`: For a discussion of the synchronization +issues, see `mail.richard.1995-05-19.17-10`_, +`mail.ptw.1995-05-19.19-15`_, and `mail.richard.1995-05-24.10-18`_. + +.. _mail.richard.1995-05-19.17-10: https://info.ravenbrook.com/project/mps/mail/1995/05/19/17-10/0.txt +.. _mail.ptw.1995-05-19.19-15: https://info.ravenbrook.com/project/mps/mail/1995/05/19/19-15/0.txt +.. _mail.richard.1995-05-24.10-18: https://info.ravenbrook.com/project/mps/mail/1995/05/24/10-18/0.txt + +.. note:: + + I believe that the sequence for flip in PTW's message is + incorrect. The operations should be in the other order. DRJ. + +_`.source.interface`: For a description of the buffer interface in C +prototypes, see `mail.richard.1997-04-28.09-25`_. + +.. _mail.richard.1997-04-28.09-25: https://info.ravenbrook.com/project/mps/mail/1997/04/28/09-25/0.txt + +_`.source.qa`: Discussions with QA were useful in pinning down the +semantics and understanding of some obscure but important boundary +cases. See the thread with subject "notes on our allocation points +discussion" and messages `mail.richard.tucker.1997-05-12.09-45`_, +`mail.ptw.1997-05-12.12-46`_, `mail.richard.1997-05-12.13-15`_, +`mail.richard.1997-05-12.13-28`_, `mail.ptw.1997-05-13.15-15`_, +`mail.sheep.1997-05-14.11-52`_, `mail.rit.1997-05-15.09-19`_, +`mail.ptw.1997-05-15.21-22`_, `mail.ptw.1997-05-15.21-35`_, +`mail.rit.1997-05-16.08-02`_, `mail.rit.1997-05-16.08-42`_, +`mail.ptw.1997-05-16.12-36`_, `mail.ptw.1997-05-16.12-47`_, +`mail.richard.1997-05-19.15-46`_, `mail.richard.1997-05-19.15-56`_, +and `mail.ptw.1997-05-20.20-47`_. + +.. _mail.richard.tucker.1997-05-12.09-45: https://info.ravenbrook.com/project/mps/mail/1997/05/12/09-45/0.txt +.. _mail.ptw.1997-05-12.12-46: https://info.ravenbrook.com/project/mps/mail/1997/05/12/12-46/1.txt +.. _mail.richard.1997-05-12.13-15: https://info.ravenbrook.com/project/mps/mail/1997/05/12/13-15/0.txt +.. _mail.richard.1997-05-12.13-28: https://info.ravenbrook.com/project/mps/mail/1997/05/12/13-28/0.txt +.. _mail.ptw.1997-05-13.15-15: https://info.ravenbrook.com/project/mps/mail/1997/05/13/15-15/0.txt +.. _mail.sheep.1997-05-14.11-52: https://info.ravenbrook.com/project/mps/mail/1997/05/14/11-52/0.txt +.. _mail.rit.1997-05-15.09-19: https://info.ravenbrook.com/project/mps/mail/1997/05/15/09-19/0.txt +.. _mail.ptw.1997-05-15.21-22: https://info.ravenbrook.com/project/mps/mail/1997/05/15/21-22/0.txt +.. _mail.ptw.1997-05-15.21-35: https://info.ravenbrook.com/project/mps/mail/1997/05/15/21-35/0.txt +.. _mail.rit.1997-05-16.08-02: https://info.ravenbrook.com/project/mps/mail/1997/05/16/08-02/0.txt +.. _mail.rit.1997-05-16.08-42: https://info.ravenbrook.com/project/mps/mail/1997/05/16/08-42/0.txt +.. _mail.ptw.1997-05-16.12-36: https://info.ravenbrook.com/project/mps/mail/1997/05/16/12-36/0.txt +.. _mail.ptw.1997-05-16.12-47: https://info.ravenbrook.com/project/mps/mail/1997/05/16/12-47/0.txt +.. _mail.richard.1997-05-19.15-46: https://info.ravenbrook.com/project/mps/mail/1997/05/19/15-46/0.txt +.. _mail.richard.1997-05-19.15-56: https://info.ravenbrook.com/project/mps/mail/1997/05/19/15-56/0.txt +.. _mail.ptw.1997-05-20.20-47: https://info.ravenbrook.com/project/mps/mail/1997/05/20/20-47/0.txt + + + +Requirements +------------ + +_`.req.fast`: Allocation must be very fast. + +_`.req.thread-safe`: Must run safely in a multi-threaded environment. + +_`.req.no-synch`: Must avoid the use of thread-synchronization. +(`.req.fast`_) + +_`.req.manual`: Support manual memory management. + +_`.req.exact`: Support exact collectors. + +_`.req.ambig`: Support ambiguous collectors. + +_`.req.count`: Must record (approximately) the amount of allocation (in bytes). + +.. note:: + + Actually not a requirement any more, but once was put forward as a + Dylan requirement. Bits of the code still reflect this + requirement. See request.dylan.170554_. + +.. _request.dylan.170554: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170554 + + +Classes +------- + +_`.class.hierarchy`: The ``Buffer`` data structure is designed to be +subclassable (see design.mps.protocol_). + +.. _design.mps.protocol: protocol + +_`.class.hierarchy.buffer`: The basic buffer class (``BufferClass``) +supports basic allocation-point buffering, and is appropriate for +those manual pools which don't use segments (`.req.manual`_). The +``Buffer`` class doesn't support reference ranks (that is, the buffers +have ``RankSetEMPTY``). Clients may use ``BufferClass`` directly, or +create their own subclasses (see `.subclassing`_). + +_`.class.hierarchy.segbuf`: Class ``SegBufClass`` is also provided for +the use of pools which additionally need to associate buffers with +segments. ``SegBufClass`` is a subclass of ``BufferClass``. Manual +pools may find it convenient to use ``SegBufClass``, but it is +primarily intended for automatic pools (`.req.exact`_, `.req.ambig`_). +An instance of ``SegBufClass`` may be attached to a region of memory +that lies within a single segment. The segment is associated with the +buffer, and may be accessed with the ``BufferSeg()`` function. +``SegBufClass`` also supports references at any rank set. Hence this +class or one of its subclasses should be used by all automatic pools +(with the possible exception of leaf pools). The rank sets of buffers +and the segments they are attached to must match. Clients may use +``SegBufClass`` directly, or create their own subclasses (see +`.subclassing`_). + +_`.class.hierarchy.rankbuf`: Class ``RankBufClass`` is also provided +as a subclass of ``SegBufClass``. The only way in which this differs +from its superclass is that the rankset of a ``RankBufClass`` is set +during initialization to the singleton rank passed as an additional +parameter to ``BufferCreate()``. Instances of ``RankBufClass`` are of +the same type as instances of ``SegBufClass``, that is, ``SegBuf``. +Clients may use ``RankBufClass`` directly, or create their own +subclasses (see `.subclassing`_). + +_`.class.create`: The buffer creation functions (``BufferCreate()`` +and ``BufferCreateV()``) take a ``class`` parameter, which determines +the class of buffer to be created. + +_`.class.choice`: Pools which support buffered allocation should +specify a default class for buffers. This class will be used when a +buffer is created in the normal fashion by MPS clients (for example by +a call to ``mps_ap_create()``). Pools specify the default class by +means of the ``bufferClass`` field in the pool class object. This +should be a pointer to a function of type ``PoolBufferClassMethod``. +The normal class "Ensure" function (for example +``EnsureBufferClass()``) has the appropriate type. + +_`.subclassing`: Pools may create their own subclasses of the standard +buffer classes. This is sometimes useful if the pool needs to add an +extra field to the buffer. The convenience macro +``DEFINE_BUFFER_CLASS()`` may be used to define subclasses of buffer +classes. See design.mps.protocol.int.define-special_. + +.. _design.mps.protocol.int.define-special: protocol#.int.define-special + +_`.replay`: To work with the allocation replayer (see +design.mps.telemetry.replayer_), the buffer class has to emit an event +for each call to an external interface, containing all the parameters +passed by the user. If a new event type is required to carry this +information, the replayer (impl.c.eventrep) must then be extended to +recreate the call. + +.. _design.mps.telemetry.replayer: telemetry#.replayer + +_`.replay.pool-buffer`: The replayer must also be updated if the +association of buffer class to pool or the buffer class hierarchy is +changed. + +_`.class.method`: Buffer classes provide the following methods (these +should not be confused with the pool class methods related to the +buffer protocol, described in `.method.create`_ and following +sections): + +``typedef Res (*BufferInitMethod)(Buffer buffer, Pool pool, ArgList args)`` + +_`.class.method.init`: ``init()`` is a class-specific initialization +method called from ``BufferInit()``. It receives the keyword arguments +passed to to ``BufferInit()``. Client-defined methods must call their +superclass method (via a next-method call) before performing any +class-specific behaviour. _`.replay.init`: The ``init()`` method +should emit a ``BufferInit`` event (if there aren't any extra +parameters, `` = ""``). + +``typedef void (*BufferAttachMethod)(Buffer buffer, Addr base, Addr limit, Addr init, Size size)`` + +_`.class.method.attach`: ``attach()`` is a class-specific method +called whenever a buffer is attached to memory, via +``BufferAttach()``. Client-defined methods must call their superclass +method (via a next-method call) before performing any class-specific +behaviour. + +``typedef void (*BufferDetachMethod)(Buffer buffer)`` + +_`.class.method.detach`: ``detach()`` is a class-specific method +called whenever a buffer is detached from memory, via +``BufferDetach()``. Client-defined methods must call their superclass +method (via a next-method call) after performing any class-specific +behaviour. + +``typedef Seg (*BufferSegMethod)(Buffer buffer)`` + +_`.class.method.seg`: ``seg()`` is a class-specific accessor method +which returns the segment attached to a buffer (or ``NULL`` if there +isn't one). It is called from ``BufferSeg()``. Clients should not need +to define their own methods for this. + +``typedef RankSet (*BufferRankSetMethod)(Buffer buffer)`` + +_`.class.method.rankSet`: ``rankSet()`` is a class-specific accessor +method which returns the rank set of a buffer. It is called from +``BufferRankSet()``. Clients should not need to define their own +methods for this. + +``typedef void (*BufferSetRankSetMethod)(Buffer buffer, RankSet rankSet)`` + +_`.class.method.setRankSet`: ``setRankSet()`` is a class-specific +setter method which sets the rank set of a buffer. It is called from +``BufferSetRankSet()``. Clients should not need to define their own +methods for this. + + +Logging +------- + +_`.logging.control`: Buffers have a separate control for whether they +are logged or not, this is because they are particularly high volume. +This is a Boolean flag (``bufferLogging``) in the ``ArenaStruct``. + + +Measurement +----------- + +_`.count`: Counting the allocation volume is done by maintaining two +fields in the buffer struct: + +_`.count.fields`: ``fillSize``, ``emptySize``. + +_`.count.monotonic`: both of these fields are monotonically +increasing. + +_`.count.fillsize`: ``fillSize`` is an accumulated total of the size +of all the fills (as a result of calling the ``PoolClass`` +``BufferFill()`` method) that happen on the buffer. + +_`.count.emptysize`: ``emptySize`` is an accumulated total of the size of +all the empties than happen on the buffer (which are notified to the +pool using the ``PoolClass`` ``BufferEmpty()`` method). + +_`.count.generic`: These fields are maintained by the generic buffer +code in ``BufferAttach()`` and ``BufferDetach()``. + +_`.count.other`: Similar count fields are maintained in the arena. +They are maintained on an internal (buffers used internally by the +MPS) and external (buffers used for mutator allocation points) basis. +The fields are also updated by the buffer code. The fields are: + +- in the arena, ``fillMutatorSize``, ``fillInternalSize``, + ``emptyMutatorSize``, ``emptyInternalSize``, and + ``allocMutatorSize`` (5 fields). + +_`.count.alloc.how`: The amount of allocation in the buffer just +after an empty is ``fillSize - emptySize``. At other times this +computation will include space that the buffer has the use of (between +base and init) but which may not get allocated in (because the +remaining space may be too large for the next reserve so some or all +of it may get emptied). The arena field ``allocMutatorSize`` is +incremented by the allocated size (between base and init) +whenever a buffer is detached. Symmetrically this field is decremented +by by the pre-allocated size (between base and init) whenever +a buffer is attached. The overall count is asymptotically correct. + +_`.count.type`: All the count fields are type double. + +_`.count.type.justify`: This is because double is the type most likely +to give us enough precision. Because of the lack of genuine +requirements the type isn't so important. It's nice to have it more +precise than long. Which double usually is. + + +Notes from the whiteboard +------------------------- + +Requirements + +- atomic update of words +- guarantee order of reads and write to certain memory locations. + +Flip + +- limit:=0 +- record init for scanner + +Commit + +- init:=alloc +- if(limit = 0) ... +- L written only by MM +- A written only by client (except during synchronized MM op) +- I ditto +- I read by MM during flip + +States + +- busy +- ready +- trapped +- reset + +.. note:: + + There are many more states. DRJ. + +Misc + +- During buffer ops all field values can change. Might trash perfectly + good ("valid"?) object if pool isn't careful. + + +Synchronization +--------------- + +Buffers provide a loose form of synchronization between the mutator +and the collector. + +The crucial synchronization issues are between the operation the pool +performs on flip and the mutator's commit operation. + +Commit + +- read init +- write init +- Memory Barrier +- read ``limit`` + +Flip + +- write ``limit`` +- Memory Barrier +- read init + +Commit consists of two parts. The first is the update to init. +This is a declaration that the new object just before init is now +correctly formatted and can be scanned. The second is a check to see +if the buffer has been "tripped". The ordering of the two parts is +crucial. + +Note that the declaration that the object is correctly formatted is +independent of whether the buffer has been tripped or not. In +particular a pool can scan up to the init pointer (including the newly +declared object) whether or not the pool will cause the commit to +fail. In the case where the pool scans the object, but then causes the +commit to fail (and presumably the allocation to occur somewhere +else), the pool will have scanned a "dead" object, but this is just +another example of conservatism in the general sense. + +Not that the read of init in the Flip sequence can in fact be +arbitrarily delayed (as long as it is read before a buffered segment +is scanned). + +On processors with Relaxed Memory Order (such as the DEC Alpha), +Memory Barriers will need to be placed at the points indicated. + +:: + + * DESIGN + * + * An allocation buffer is an interface to a pool which provides + * very fast allocation, and defers the need for synchronization in + * a multi-threaded environment. + * + * Pools which contain formatted objects must be synchronized so + * that the pool can know when an object is valid. Allocation from + * such pools is done in two stages: reserve and commit. The client + * first reserves memory, then initializes it, then commits. + * Committing the memory declares that it contains a valid formatted + * object. Under certain conditions, some pools may cause the + * commit operation to fail. (See the documentation for the pool.) + * Failure to commit indicates that the whole allocation failed and + * must be restarted. When using a pool which introduces the + * possibility of commit failing, the allocation sequence could look + * something like this: + * + * do { + * res = BufferReserve(&p, buffer, size); + * if(res != ResOK) return res; // allocation fails, reason res + * initialize(p); // p now points at valid object + * } while(!BufferCommit(buffer, p, size)); + * + * Pools which do not contain formatted objects can use a one-step + * allocation as usual. Effectively any random rubbish counts as a + * "valid object" to such pools. + * + * An allocation buffer is an area of memory which is pre-allocated + * from a pool, plus a buffer descriptor, which contains, inter + * alia, four pointers: base, init, alloc, and limit. Base points + * to the base address of the area, limit to the last address plus + * one. Init points to the first uninitialized address in the + * buffer, and alloc points to the first unallocated address. + * + * L . - - - - - . ^ + * | | Higher addresses -' + * | junk | + * | | the "busy" state, after Reserve + * A |-----------| + * | uninit | + * I |-----------| + * | init | + * | | Lower addresses -. + * B `-----------' v + * + * L . - - - - - . ^ + * | | Higher addresses -' + * | junk | + * | | the "ready" state, after Commit + * A=I |-----------| + * | | + * | | + * | init | + * | | Lower addresses -. + * B `-----------' v + * + * Access to these pointers is restricted in order to allow + * synchronization between the pool and the client. The client may + * only write to init and alloc, but in a restricted and atomic way + * detailed below. The pool may read the contents of the buffer + * descriptor at _any_ time. During calls to the fill and trip + * methods, the pool may update any or all of the fields + * in the buffer descriptor. The pool may update the limit at _any_ + * time. + * + * Access to buffers by these methods is not synchronized. If a buffer + * is to be used by more than one thread then it is the client's + * responsibility to ensure exclusive access. It is recommended that + * a buffer be used by only a single thread. + * + * [Only one thread may use a buffer at once, unless the client + * places a mutual exclusion around the buffer access in the usual + * way. In such cases it is usually better to create one buffer for + * each thread.] + * + * Here are pseudo-code descriptions of the reserve and commit + * operations. These may be implemented in-line by the client. + * Note that the client is responsible for ensuring that the size + * (and therefore the alloc and init pointers) are aligned according + * to the buffer's alignment. + * + * Reserve(buf, size) ; size must be aligned to pool + * if buf->limit - buf->alloc >= size then + * buf->alloc +=size ; must be atomic update + * p = buf->init + * else + * res = BufferFill(&p, buf, size) ; buf contents may change + * + * Commit(buf, p, size) + * buf->init = buf->alloc ; must be atomic update + * if buf->limit == 0 then + * res = BufferTrip(buf, p, size) ; buf contents may change + * else + * res = True + * (returns True on successful commit) + * + * The pool must allocate the buffer descriptor and initialize it by + * calling BufferInit. The descriptor this creates will fall + * through to the fill method on the first allocation. In general, + * pools should not assign resources to the buffer until the first + * allocation, since the buffer may never be used. + * + * The pool may update the base, init, alloc, and limit fields when + * the fallback methods are called. In addition, the pool may set + * the limit to zero at any time. The effect of this is either: + * + * 1. cause the _next_ allocation in the buffer to fall through to + * the buffer fill method, and allow the buffer to be flushed + * and relocated; + * + * 2. cause the buffer trip method to be called if the client was + * between reserve and commit. + * + * A buffer may not be relocated under other circumstances because + * there is a race between updating the descriptor and the client + * allocation sequence. + + +Interface +--------- + +``Res BufferCreate(Buffer *bufferReturn, BufferClass class, Pool pool, Bool isMutator, ArgList args)`` + +_`.method.create`: Create an allocation buffer in a pool. The buffer +is created in the "ready" state. + +A buffer structure is allocated from the space control pool and +partially initialized (in particularly neither the signature nor the +serial field are initialized). The pool class's ``bufferCreate()`` +method is then called. This method can update (some undefined subset +of) the fields of the structure; it should return with the buffer in +the "ready" state (or fail). The remainder of the initialization then +occurs. + +If and only if successful then a valid buffer is returned. + +``void BufferDestroy(Buffer buffer)`` + +_`.method.destroy`: Free a buffer descriptor. The buffer must be in +the "ready" state, that is, not between a Reserve and Commit. +Allocation in the area of memory to which the descriptor refers must +cease after ``BufferDestroy()`` is called. + +Destroying an allocation buffer does not affect objects which have +been allocated, it just frees resources associated with the buffer +itself. + +The pool class's ``bufferDestroy()`` method is called and then the +buffer structure is uninitialized and freed. + +``Bool BufferCheck(Buffer buffer)`` + +_`.method.check`: The check method is straightforward, the non-trivial dependencies checked are: + +- The ordering constraints between base, init, alloc, and limit. +- The alignment constraints on base, init, alloc, and limit. +- That the buffer's rank is identical to the segment's rank. + +``void BufferAttach(Buffer buffer, Addr base, Addr limit, Addr init, Size size)`` + +_`.method.attach`: Set the base, init, alloc, and limit fields so that +the buffer is ready to start allocating in area of memory. The alloc +field is set to ``init + size``. + +_`.method.attach.unbusy`: ``BufferAttach()`` must only be applied to +buffers that are not busy. + +``void BufferDetach(Buffer buffer, Pool pool)`` + +_`.method.detach`: Set the seg, base, init, alloc, and limit fields to +zero, so that the next reserve request will call the fill method. + +_`.method.detach.unbusy`: ``BufferDetach()`` must only be applied to +buffers that are not busy. + +``Bool BufferIsReset(Buffer buffer)`` + +_`.method.isreset`: Returns ``TRUE`` if and only if the buffer is in the +reset state, that is, with base, init, alloc, and limit all set to +zero. + +``Bool BufferIsReady(Buffer buffer)`` + +_`.method.isready`: Returns ``TRUE`` if and only if the buffer is not +between a reserve and commit. The result is only reliable if the +client is not currently using the buffer, since it may update the +alloc and init pointers asynchronously. + +``mps_ap_t BufferAP(Buffer buffer)`` + +Returns the ``APStruct`` substructure of a buffer. + +``Buffer BufferOfAP(mps_ap_t ap)`` + +_`.method.ofap`: Return the buffer which owns an ``APStruct``. + +_`.method.ofap.thread-safe`: ``BufferOfAP()`` must be thread safe (see +impl.c.mpsi.thread-safety). This is achieved simply because the +underlying operation involved is simply a subtraction. + +``Arena BufferArena(Buffer buffer)`` + +_`.method.arena`: Returns the arena which owns a buffer. + +_`.method.arena.thread-safe`: ``BufferArena()`` must be thread safe +(see impl.c.mpsi.thread-safety). This is achieved simple because the +underlying operation is a read of shared-non-mutable data (see +design.mps.thread-safety_). + +.. _design.mps.thread-safety: thread-safety + +``Pool BufferPool(Buffer buffer)`` + +Returns the pool to which a buffer is attached. + +``Res BufferReserve(Addr *pReturn, Buffer buffer, Size size)`` + +_`.method.reserve`: Reserves memory from an allocation buffer. + +This is a provided version of the reserve procedure described above. +The size must be aligned according to the buffer alignment. If +successful, ``ResOK`` is returned and ``*pReturn`` is updated with a +pointer to the reserved memory. Otherwise ``*pReturn`` is not touched. +The reserved memory is not guaranteed to have any particular contents. +The memory must be initialized with a valid object (according to the +pool to which the buffer belongs) and then passed to the +``BufferCommit()`` method (see below). ``BufferReserve(0`` may not be +applied twice to a buffer without a ``BufferCommit()`` in-between. In +other words, Reserve/Commit pairs do not nest. + +``Res BufferFill(Addr *pReturn, Buffer buffer, Size size)`` + +_`.method.fill`: Refills an empty buffer. If there is not enough space +in a buffer to allocate in-line, ``BufferFill()`` must be called to +"refill" the buffer. + +``Bool BufferCommit(Buffer buffer, Addr p, Size size)`` + +_`.method.commit`: Commit memory previously reserved. + +``BufferCommit()`` notifies the pool that memory which has been +previously reserved (see above) has been initialized with a valid +object (according to the pool to which the buffer belongs). The +pointer ``p`` must be the same as that returned by +``BufferReserve()``, and the size must match the size passed to +``BufferReserve()``. + +``BufferCommit()`` may not be applied twice to a buffer without a +reserve in between. In other words, objects must be reserved, +initialized, then committed only once. + +Commit returns ``TRUE`` if successful, ``FALSE`` otherwise. If commit +fails and returns ``FALSE``, the client may try to allocate again by +going back to the reserve stage, and may not use the memory at ``p`` +again for any purpose. + +Some classes of pool may cause commit to fail under rare +circumstances. + +``void BufferTrip(Buffer buffer, Addr p, Size size)`` + +_`.method.trip`: Act on a tripped buffer. The pool which owns a buffer +may asynchronously set the buffer limit to zero in order to get +control over the buffer. If this occurs after a ``BufferReserve()`` +(but before the corresponding commit), then the ``BufferCommit()`` +method calls ``BufferTrip()`` and the ``BufferCommit()`` method +returns with the return value of ``BufferTrip()``. + +_`.method.trip.precondition`: At the time trip is called, from +``BufferCommit()``, the following are true: + +- _`.method.trip.precondition.limit`: ``limit == 0`` +- _`.method.trip.precondition.init`: ``init == alloc`` +- _`.method.trip.precondition.p`: ``p + size == alloc`` + + +Diagrams +-------- + +Here are a number of diagrams showing how buffers behave. In general, +the horizontal axis corresponds to mutator action (reserve, commit) +and the vertical axis corresponds to collector action. I'm not sure +which of the diagrams are the same as each other, and which are best +or most complete when they are different, but they all attempt to show +essentially the same information. It's very difficult to get all the +details in. These diagrams were drawn by Richard Brooksby, Richard +Tucker, Gavin Matthews, and others in April 1997. In general, the +later diagrams are, I suspect, more correct, complete and useful than +the earlier ones. I have put them all here for the record. Richard +Tucker, 1998-02-09. + +Buffer Diagram: +Buffer States + +Buffer States (3-column) +Buffer States (4-column) +Buffer States (gavinised) +Buffer States (interleaved) +Buffer States (richardized) + +[missing diagrams] + + +Document History +---------------- + +- 1996-09-02 RB_ incomplete design + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2007-03-22 RHSK Created Guide. + +- 2013-05-24 GDR_ Converted to reStructuredText; some tidying and + modernizing (``BufferInit()`` takes keyword arguments; + ``BufferSpace()``, ``BufferSet()`` and ``BufferReset()`` are now + ``BufferArena()``, ``BufferAttach()`` and ``BufferDetach()`` + respectively; ``BufferExpose()`` and ``BufferCover()`` have been + moved to the Shield interface; see design.mps.shield_). + + .. _design.mps.shield: shield + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/cbs.txt b/mps/design/cbs.txt new file mode 100644 index 00000000000..e1ca1d98f4c --- /dev/null +++ b/mps/design/cbs.txt @@ -0,0 +1,328 @@ +.. mode: -*- rst -*- + +Coalescing block structures +=========================== + +:Tag: design.mps.cbs +:Author: Gavin Matthews +:Date: 1998-05-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: coalescing block structures; design + + +Introduction +------------ + +_`.intro`: This is the design for impl.c.cbs, which implements a data +structure for the management of non-intersecting memory ranges, with +eager coalescence. + +_`.readership`: This document is intended for any MM developer. + +_`.source`: design.mps.poolmvt_, design.mps.poolmvff_. + +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff + +_`.overview`: The "coalescing block structure" is a set of addresses +(or a subset of address space), with provision for efficient +management of contiguous ranges, including insertion and deletion, +high level communication with the client about the size of contiguous +ranges, and detection of protocol violations. + + +Requirements +------------ + +In addition to the generic land requirements (see +design.mps.land_), the CBS must satisfy: + +.. _design.mps.land: land + +_`.req.fast`: Common operations must have a low amortized cost. + +_`.req.small`: Must have a small space overhead for the storage of +typical subsets of address space and not have abysmal overhead for the +storage of any subset of address space. + + +Interface +--------- + +_`.land`: CBS is an implementation of the *land* abstract data type, +so the interface consists of the generic functions for lands. See +design.mps.land_. + + +External types +.............. + +``typedef struct CBSStruct *CBS`` + +_`.type.cbs`: The type of coalescing block structures. A ``CBSStruct`` +is typically embedded in another structure. + + +External classes +................ + +_`.class.cbs`: ``CLASS(CBS)`` is the CBS class, a subclass of +``CLASS(Land)`` suitable for passing to ``LandInit()``. + +_`.class.fast`: ``CLASS(CBSFast)`` is subclass of ``CLASS(CBS)`` that +maintains, for each subtree, the size of the largest block in that +subtree. This enables the ``LandFindFirst()``, ``LandFindLast()``, and +``LandFindLargest()`` generic functions. + +_`.class.zoned`: ``CLASS(CBSZoned)`` is a subclass of +``CLASS(CBSFast)`` that maintains, for each subtree, the union of the +zone sets of all ranges in that subtree. This enables the +``LandFindInZones()`` generic function. + + +Keyword arguments +................. + +When initializing a CBS, ``LandInit()`` takes the following optional +keyword arguments: + +* ``CBSBlockPool`` (type ``Pool``) is the pool from which the CBS + block descriptors will be allocated. If omitted, a new MFS pool is + created for this purpose. + +* ``MPS_KEY_CBS_EXTEND_BY`` (type ``Size``; default 4096) is passed as + the ``MPS_KEY_EXTEND_BY`` keyword argument to ``PoolCreate()`` if a + block descriptor pool is created. It specifies the size of segment + that the block descriptor pool will request from the arena. + +* ``MFSExtendSelf`` (type ``Bool``; default ``TRUE``) is passed to + ``PoolCreate()`` if a block descriptor pool is created. If ``TRUE``, + the block descriptor pool automatically extends itself when out of + space; if ``FALSE``, the pool returns ``ResLIMIT`` in this case. + (This feature is used by the arena to bootstrap its own CBS of free + memory. See design.mps.bootstrap.land.sol.pool_.) + + .. _design.mps.bootstrap.land.sol.pool: bootstrap#.land.sol.pool + + +Limitations +........... + +_`.limit.find`: ``CBSLandClass`` does not support the +``LandFindFirst()``, ``LandFindLast()``, and ``LandFindLargest()`` +generic functions (the subclasses do support these operations). + +_`.limit.zones`: ``CBSLandClass`` and ``CBSFastLandClass`` do not +support the ``LandFindInZones()`` generic function (the subclass +``CBSZonedLandClass`` does support this operation). + +_`.limit.iterate`: CBS does not provide an implementation for the +``LandIterateAndDelete()`` generic function. This is because +``TreeTraverse()`` does not permit modification, for speed and to +avoid perturbing the splay tree balance. + +_`.limit.flush`: CBS cannot be used as the source in a call to +``LandFlush()``. (Because of `.limit.iterate`_.) + + +Implementation +-------------- + +Splay tree +.......... + +_`.impl.splay`: The CBS is implemented using a splay tree (see +design.mps.splay_). Each splay tree node is embedded in a block +structure with a semi-open address range (design.mps.range_). The +splay tree is ordered by the range base address. + +.. _design.mps.splay: splay +.. _design.mps.range: range + +_`.impl.splay.fast-find`: In the ``CBSFastLandClass`` class, +``cbsFindFirst()`` and ``cbsFindLast()`` use the update/refresh +facility of splay trees to store, in each block, an accurate summary +of the maximum block size in the tree rooted at the corresponding +splay node. This allows rapid location of the first or last suitable +block, and very rapid failure if there is no suitable block. For +example, this is used in the implementation of allocation in the MVFF +pool class (design.mps.poolmvff_). + +.. _design.mps.poolmvff: poolmvff + +_`.impl.find-largest`: ``cbsFindLargest()`` simply finds out the size +of the largest block in the CBS from the root of the tree, using +``SplayRoot()``, and does ``SplayFindFirst()`` for a block of that +size. This takes time proportional to the logarithm of the size of the +free list, so it's about the best you can do without maintaining a +separate priority queue, just to do ``cbsFindLargest()``. For +example, this is used in the implementation of allocation buffers in +the MVFF pool class (design.mps.poolmvff_). + +_`.impl.splay.zones`: In the ``CBSZonedLandClass`` class, +``cbsFindInZones()`` uses the update/refresh facility of splay trees +to store, in each block, the union of the zones of the ranges in the +tree rooted at the corresponding splay node. This allows rapid +location of a block in a set of zones. For example, this is used to +allocate segments in particular zones in the arena to optimised +garbage collection (see design.mps.critical-path_). + +.. _design.mps.critical-path: critical-path + + +Low memory behaviour +.................... + +_`.impl.low-mem`: When the CBS tries to allocate a new ``CBSBlock`` +structure for a new isolated range as a result of either +``LandInsert()`` or ``LandDelete()``, and there is insufficient memory +to allocate the block structure, then the range is not added to the +CBS or deleted from it, and the call to ``LandInsert()`` or +``LandDelete()`` returns ``ResMEMORY``. + + +The CBS block +............. + +_`.impl.cbs.block`: The block contains a non-empty range and a splay +tree node. + +_`.impl.cbs.block.special`: The range may be empty if the block is +halfway through being deleted. + +_`.impl.cbs.block.special.just`: This conflates values and status, but +is justified because block size is very important. + + +Testing +------- + +_`.test`: The following testing will be performed on this module: + +_`.test.land`: A generic test for land implementations. See +design.mps.land.test_. + +.. _design.mps.land.test: land#.test + +_`.test.pool`: The arena and two pools (MVT_ and MVFF_) are +implemented on top of a CBS. These are subject to testing in +development, QA, and are heavily exercised by customers. + +.. _MVT: poolmvt +.. _MVFF: poolmvff + + +Notes for future development +---------------------------- + +_`.future.not-splay`: The implementation of CBSs is based on splay +trees. It could be revised to use other data structures that meet the +requirements (especially `.req.fast`_). + +_`.future.hybrid`: It would be possible to attenuate the problem of +`.risk.overhead`_ (below) by using a single word bit set to represent +the membership in a (possibly aligned) word-width of grains. This +might be used for block sizes less than a word-width of grains, +converting them when they reach all free in the bit set. Note that +this would make coalescence slightly less eager, by up to +``(word-width - 1)``. + +_`.future.iterate.and.delete`: It would be possible to provide an +implementation for the ``LandIterateAndDelete()`` generic function +using ``TreeTraverseAndDelete()``, which calls ``TreeToVine()`` first, +iterates over the vine (where deletion is straightforward), and then +rebalances the tree. Note that this is little better than using +``SplayFirst()`` and ``SplayNext()``. + +_`.future.lazy-coalesce`: It's long been observed that small blocks +are often freed and then reallocated, so that coalescing them is a +waste of time. It might be worth considering how a splay tree could +implement a lazy coalescing scheme, where blocks are coalesced with +their adjacent neighbours during the search only if they aren't big +enough. This would break `.impl.find-largest`_ and so might be best +done as a different kind of land. On the other hand, since the MPS +does not use client memory to store the tree, eager coalescing avoids +allocation. + + +Risks +----- + +_`.risk.overhead`: Clients should note that the current implementation +of CBSs has a space overhead proportional to the number of isolated +contiguous ranges. [Four words per range.] If the CBS contains every +other grain in an area, then the overhead will be large compared to +the size of that area. [Four words per two grains.] The CBS structure +is thus suitable only for managing large enough ranges. + + +Document History +---------------- + +- 1998-05-01 Gavin Matthews. This document was derived from the + outline in design.mps.poolmv2_. + +.. _design.mps.poolmv2: poolmv2 + +- 1998-07-22 Gavin Matthews. Updated in response to approval comments + in change.epcore.anchovy.160040. There is too much fragmentation in + trapping memory. + +- Gavin Matthews. Updated (as part of change.epcore.brisling.160158: + MVFF cannot be instantiated with 4-byte alignment) to document new + alignment restrictions. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-14 GDR_ Converted to reStructuredText. + +- 2013-05-19 GDR_ Removed the "emergency" free list allocator, the + design notes on an unimplemented "future hybrid" scheme, the + callbacks, block interface, and minimum size interface. Updated the + arguments for ``CBSIterateMethod``, ``CBSInit()``, ``CBSInsert()``, + and ``CBSDelete()``. + +- 2013-05-23 RB_ Removed references to "contingency methods" that were + talking about the deleted "emergency" free list allocator. + Documented ``fastFind`` argument to ``CBSInit()``. + +- 2014-04-01 GDR_ Moved generic material to design.mps.land_. + Documented new keyword arguments. + +- 2016-03-27 RB_ Adding cross references to usage. Updating future + with reference to ``TreeTraverseAndDelete()``. Adding future idea + about lazy coalescing. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 1998–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/check.txt b/mps/design/check.txt new file mode 100644 index 00000000000..c7b29406be8 --- /dev/null +++ b/mps/design/check.txt @@ -0,0 +1,176 @@ +.. mode: -*- rst -*- + +Checking +======== + +:Tag: design.mps.check +:Author: Gavin Matthews +:Date: 1996-08-05 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: checking; design + + +Introduction +------------ + +_`.intro`: This documents the design of structure checking within the +MPS. + +_`.readership`: MPS developers. + + +Implementation +-------------- + +_`.level`: There are three levels of checking: + +#. _`.level.sig`: The lowest level checks only that the structure has + a valid ``Signature`` (see design.mps.sig_). + + .. _design.mps.sig: sig + +#. _`.level.shallow`: Shallow checking checks all local fields + (including signature) and also checks the signatures of any parent + or child structures. + +#. _`.level.deep`: Deep checking checks all local fields + (including signatures), the signatures of any parent structures, + and does full recursive checking on any child structures. + +_`.level.control`: Control over the levels of checking is via the +definition of at most one of the macros ``TARGET_CHECK_SHALLOW`` +(which if defined gives `.level.shallow`_), ``TARGET_CHECK_DEEP`` +(which if defined gives `.level.deep`_). If neither macro is defined +then `.level.sig`_ is used. These macros are not intended to be +manipulated directly by developers, they should use the interface in +impl.h.target. + +_`.order`: Because deep checking (`.level.deep`_) uses unchecked +recursion, it is important that child relationships are acyclic +(`.macro.down`_). + +_`.fun`: Every abstract data type which is a structure pointer should +have a function ``Check`` which takes a pointer of type +```` and returns a ``Bool``. It should check all fields in +order, using one of the macros in `.macro`_, or document why not. + +_`.fun.omit`: The only fields which should be omitted from a check +function are those for which there is no meaningful check (for +example, an unlimited unsigned integer with no relation to other +fields). + +_`.fun.return`: Although the function returns a ``Bool``, if the assert +handler returns (or there is no assert handler), then this is taken to +mean "ignore and continue", and the check function hence returns +``TRUE``. + +_`.macro`: Checking is implemented by invoking four macros in +impl.h.assert: + +``CHECKS(type, val)`` + +_`.macro.sig`: ``CHECKS(type, val)`` checks the signature only, and +should be called precisely on ``type`` and the received object +pointer. + +``CHECKL(cond)`` + +_`.macro.local`: ``CHECKL(cond)`` checks a local field (depending on +level; see `.level`_), and should be called on each local field that +is not an abstract data type structure pointer itself (apart from +the signature), with an appropriate normally-true test condition. + +``CHECKU(type, val)`` + +_`.macro.up`: ``CHECKU(type, val)`` checks a parent abstract data +type structure pointer, performing at most signature checks +(depending on level; see `.level`_). It should be called with the +parent type and pointer. + +``CHECKD(type, val)`` + +_`.macro.down`: ``CHECKD(type, val)`` checks a child abstract data +type structure pointer, possibly invoking ``Check`` (depending +on level; see `.level`_). It should be called with the child type +and pointer. + +_`.full-type`: Use ``CHECKS()``, ``CHECKD()`` or ``CHECKU()`` on all +types that satisfy these three requirements: + +_`.full-type.pointer`: The type is a pointer type. + +_`.full-type.check`: The type provides a function ``Bool TypeCheck(Type +type)`` where ``Type`` is substituted for the name of the type (for +example, ``PoolCheck()``). + +_`.full-type.sig`: The expression ``obj->sig`` is a valid value of +type ``Sig`` whenever ``obj`` is a valid value of type ``Type``. + +_`.partial-type`: Where the type satisfies `.full-type.pointer`_ and +`.full-type.check`_ but not `.full-type.sig`_ because the type lacks a +signature in order to save space (this applies to small structures +that are embedded many times in other structures, for example +``Ring``), use ``CHECKD_NOSIG()``. + +_`.hidden-type`: Where the type satisfies `.full-type.pointer`_ and +`.full-type.check`_ but not `.full-type.sig`_ because the structure +has a signature but the structure definition is not visible at point +of checking (for example ``Root``), use ``CHECKD_NOSIG()`` and +reference this tag. The structure could be considered for addition to +``mpmst.h``. + + +Common assertions +----------------- + +_`.common`: Some assertions are commonly triggered by mistakes in the +client program. These are listed in the section "Common assertions and +their causes" in the MPS Reference, together with an explanation of +their likely cause, and advice for fixing the problem. To assist with +keeping the MPS Reference up to date, these assertions are marked with +a cross-reference to this tag. When you update the assertion, you must +also update the MPS Reference. + + +Document History +---------------- + +- 1996-08-05 Gavin Matthews Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-12 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/clock.txt b/mps/design/clock.txt new file mode 100644 index 00000000000..19da3c66c3a --- /dev/null +++ b/mps/design/clock.txt @@ -0,0 +1,119 @@ +.. mode: -*- rst -*- + +Fast high-resolution clock +========================== + +:Tag: design.mps.clock +:Author: Gareth Rees +:Date: 2016-03-06 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: clock; design + + +Introduction +------------ + +_`.intro`: This is the design of the clock module, which implements a +fast high-resolution clock for use by the telemetry system. + +_`.readership`: This document is intended for any MPS developer. + + +Requirements +------------ + +_`.req.monotonic`: Successive calls to ``EVENT_CLOCK()`` must yield +values that are monotonically increasing. (So that comparing the +timestamp on two events never gives false positives.) + +_`.req.fast`: ``EVENT_CLOCK()`` should take very little time; it +should not require a system call. (So that programs that use the MPS +remain usable when telemetry is turned on.) + +_`.req.high-resolution`: Successive calls to ``EVENT_CLOCK()`` should +yield values that are strictly monotonically increasing (so that +sorting the telemetry stream puts the events in the order they +happened). + + +Interface +--------- + +``EventClock`` + +_`.if.type`: The type of timestamps. It must be an unsigned 64-bit +integral type, for example a ``typedef`` for ``uint64_t`` or +``unsigned __int64``. + +``EVENT_CLOCK_MAKE(lvalue, low, high)`` + +_`.if.make`: Construct an ``EventClock`` timestamp from its two +halves. The first parameter is an lvalue with type ``EventClock``, and +the second and third parameters are 32-bit unsigned integers. The +macro must assign a timestamp to ``lvalue`` with the value ``(high +<< 32) + low``. + +``EVENT_CLOCK(lvalue)`` + +_`.if.get`: Assign an ``EventClock`` timestamp for the current time to +``lvalue``, which is an lvalue with type ``EventClock``. + +``EVENT_CLOCK_PRINT(stream, clock)`` + +_`.if.print`: Write the value of ``clock`` to the standard C output +file handle ``stream`` as 16 hexadecimal digits (with leading zeros, +and capital letters A to F). + +``EVENT_CLOCK_WRITE(stream, clock)`` + +_`.if.write`: Write the value of ``clock`` to the output stream +``stream`` as 16 hexadecimal digits (with leading zeros, and capital +letters A to F). The macro should be implemented using ``WriteF()``. + + +Implementation +-------------- + +_`.impl.tsc`: On IA-32 and x86-64, the `Time Stamp Counter +`_ returned by the +RDTSC instruction is a suitable clock for single-core CPUs, but on +multiple-core CPUs, different cores may have different values or tick at different speeds, and so it may fail to meet `.req.monotonic`_. + + +Document History +---------------- + +- 2016-03-06 GDR_ Created. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2016–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/collection.txt b/mps/design/collection.txt new file mode 100644 index 00000000000..212d2f69968 --- /dev/null +++ b/mps/design/collection.txt @@ -0,0 +1,445 @@ +.. mode: -*- rst -*- + +Collection framework +==================== + +:Tag: design.mps.collection +:Author: Pekka P. Pirinen +:Date: 1998-01-01 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: collection framework; design + + +Introduction +------------ + +_`.intro`: This document describes the Collection Framework. It's a +framework for implementing garbage collection techniques and +integrating them into a system of collectors that all cooperate in +recycling garbage. + + +Overview +-------- + +_`.framework`: MPS provides a framework that allows the integration of +many different types of GC strategies and provides many of the basic +services that those strategies use. + +_`.framework.cover`: The framework subsumes most major GC strategies +and allows many efficient techniques, like in-line allocation or +software barriers. + +_`.framework.overhead`: The overhead due to cooperation is low. + +.. note:: + + But not non-existent. Can we say something useful about it? + +_`.framework.benefits`: The ability to combine collectors contributes +significantly to the flexibility of the system. The reduction in code +duplication contributes to reliability and integrity. The services of +the framework make it easier to write new MM strategies and +collectors. + +_`.framework.mpm`: The Collection Framework is merely a part of the +structure of the MPM. See design.mps.architecture and design.mps.arch +for the big picture. + +.. note:: + + Those two documents should be combined into one. Pekka P. Pirinen, + 1998-01-15. + +Other notable components that the MPM manages to integrate into a +single framework are manually-managed memory and finalization services +(see design.mps.finalize_). + +.. _design.mps.finalize: finalize + +.. note:: + + A document describing the design of manually-managed memory is + missing. Pekka P. Pirinen, 1998-01-15. + +_`.see-also`: This document assumes basic familiarity with the ideas +of pool (see design.mps.arch.pools) and segment (see +design.mps.seg.over). + + +Collection abstractions +----------------------- + +Colours, scanning and fixing +............................ + +_`.state`: The framework knows about the three colours of the +tri-state abstraction and free blocks. Recording the state of each +object is the responsibility of the pool, but the framework gets told +about changes in the states and keeps track of colours in each +segment. Specifically, it records whether a segment might contain +white, grey and black objects with respect to each active trace (see +`.tracer`_) + +.. note:: + + Black not currently implemented. Pekka P. Pirinen, 1998-01-04. + +(A segment might contain objects of all colours at once, or none.) +This information is approximate, because when an object changes +colour, or dies, it usually is too expensive to determine if it was +the last object of its former colour. + +_`.state.transitions`: The possible state transitions are as follows:: + + free ---alloc--> black (or grey) or white or none + none --condemn-> white + none --refine--> grey + grey ---scan---> black + white ----fix---> grey (or black) + black --revert--> grey + white --reclaim-> free + black --reclaim-> none + +_`.none-is-black`: Outside of a trace, objects don't really have +colour, but technically, the colour is black. Objects are only +allocated grey or white during a trace, and by the time the trace has +finished, they are either dead or black, like the other surviving +objects. We might then reuse the colour field for another trace, so +it's convenient to set the colour to black when allocating outside a +trace. This means that refining the foundation +(analysis.tracer.phase.condemn.refine), actually turns black segments +grey, rather than vice versa, but the principle is the same. + +_`.scan-fix`: "Scanning" an object means applying the "fix" function +to all references in that object. Fixing is the generic name for the +operation that takes a reference to a white object and makes it +non-white (usually grey, but black is a possibility, and so is +changing the reference as we do for weak references). Typical examples +of fix methods are copying the object into to-space or setting its +mark bit. + +_`.cooperation`: The separation of scanning and fixing is what allows +different GC techniques to cooperate. The scanning is done by a method +on the pool that the scanned object resides in, and the fixing is done +by a method on the pool that the reference points to. + +_`.scan-all`: Pools provide a method to scan all the grey objects in a +segment. + + +Reference sets +.............. + +_`.refsets`: The cost of scanning can be significantly reduced by +storing remembered sets. We have chosen a very compact and efficient +implementation, called reference sets, or refsets for short (see +idea.remember). + +.. note:: + + design.mps.refset is empty! Perhaps some of this should go there. + Pekka P. Pirinen, 1998-02-19. + +This makes the cost of maintaining them low, so we maintain them for +all references out of all scannable segments. + +_`.refsets.approx`: You might describe refsets as summaries of all +references out of an area of memory, so they are only approximations +of remembered sets. When a refset indicates that an interesting +reference might be present in a segment, we still have to scan the +segment to find it. + +_`.refsets.scan`: The refset information is collected during scanning. +The scan state protocol provides a way for the pool and the format +scan methods to cooperate in this, and to pass this information to the +tracer module which checks it and updates the segment (see +design.mps.scan_). + +.. _design.mps.scan: scan + +.. note:: + + Actually, there's very little doc there. Pekka P. Pirinen, + 1998-02-17. + +_`.refsets.maintain`: The MPS tries to maintain the refset information +when it moves or changes object. + +_`.refsets.pollution`: Ambiguous references and pointers outside the +arena will introduce spurious zones into the refsets. We put up with +this to keep the scanning costs down. Consistency checks on refsets +have to take this into account. + +_`.refsets.write-barrier`: A write-barrier are needed to keep the +mutator from invalidating the refsets when writing to a segment. We +need one on any scannable segment whose refset is not a superset of +the mutator's (and that the mutator can see). If we know what the +mutator is writing and whether it's a reference, we can just add that +reference to the refset (figuring out whether anything can be removed +from the refset is too expensive). If we don't know or if we cannot +afford to keep the barrier up, the framework can union the mutator's +refset to the segment's refset. + +_`.refset.mutator`: The mutator's refset could be computed during root +scanning in the usual way, and then kept up to date by using a +read-barrier. It's not a problem that the mutator can create new +pointers out of nothing behind the read-barrier, as they won't be real +references. However, this is probably not cost-effective, since it +would cause lots of barrier hits. We'd need a read-barrier on every +scannable segment whose refset is not a subset of the mutator's (and +that the mutator can see). So instead we approximate the mutator's +refset with the universal refset. + + +The tracer +---------- + +_`.tracer`: The tracer is an engine for implementing multiple garbage +collection processes. Each process (called a "trace") proceeds +independently of the others through five phases as described in +analysis.tracer. The following sections describe how the action of +each phase fits into the framework. See design.mps.trace_ for details + +.. _design.mps.trace: trace + +.. note:: + + No, there's not much there, either. Possibly some of this section + should go there. Pekka P. Pirinen, 1998-02-18. + +_`.combine`: The tracer can also combine several traces for some +actions, like scanning a segment or a root. The methods the tracer +calls to do the work get an argument that tells them which traces they +are expected to act for. + +.. note:: + + Extend this. + +_`.trace.begin`: Traces are started by external request, usually from +a client function or an action (see design.mps.action). + +_`.trace.progress`: The tracer gets time slices from the arena to work +on a given trace. + +.. note:: + + This is just a provisional arrangement, in lieu of real progress + control. Pekka P. Pirinen, 1998-02-18. + +In each slice, it selects a small amount of work to do, based on the +state of the trace, and does it, using facilities provided by the +pools. + +_`.trace.scan`: A typical unit of work is to scan a single segment. +The tracer can choose to do this for multiple traces at once, provided +the segment is grey for more than one trace. + +_`.trace.barrier`: Barrier hits might also cause a need to scan :mps:a +segment (see `.hw-barriers.hit`_). Again, the tracer can +:mps:choose to combine traces, when it does this. + +_`.mutator-colour`: The framework keeps track of the colour of the +mutator separately for each trace. + + +The condemn phase +................. + +_`.phase.condemn`: The agent that creates the trace (see +`.trace.begin`_) determines the condemned set and colours it white. +The tracer then examines the refsets on all scannable segments, and if +it can deduce some segment cannot refer to the white set, it's +immediately coloured black, otherwise the pool is asked to grey any +objects in the segment that might need to be scanned (in copying +pools, this is typically the whole segment). + +_`.phase.condemn.zones`: To get the maximum benefit from the refsets, +we try to arrange that the zones are a minimal superset (for example, +generations uniquely occupy zones) and a maximal subset (there's +nothing else in the zone) of the condemned set. This needs to be +arranged at allocation time (or when copying during collection, which +is much like allocation) + +.. note:: + + Soon, this will be handled by segment loci, see design.mps.locus_. + + .. _design.mps.locus: locus + +_`.phase.condemn.mutator`: At this point, the mutator might reference +any objects, that is, it is grey. Allocation can be in any colour, +most commonly white. + +.. note:: + + More could be said about this. + + +The grey mutator phase +...................... + +_`.phase.grey-mutator`: Grey segments are chosen according to some +sort of progress control and scanned by the pool to make them black. +Eventually, the tracer will decide to flip or it runs out of grey +segments, and proceeds to the next phase. + +.. note:: + + Currently, this phase has not been implemented; all traces flip + immediately after condemn. Pekka P. Pirinen, 1998-02-18. + +_`.phase.grey-mutator.copy`: At this stage, we don't want to copy +condemned objects, because we would need an additional barrier to keep +the mutator's view of the heap consistent (see +analysis.async-gc.copied.pointers-and-new-copy). + +_`.phase.grey-mutator.ambig`: This is a good time to get all ambiguous +scanning out of the way, because we usually can't do any after the +flip and because it doesn't cause any copying. + +.. note:: + + Write a detailed explanation of this some day. + + +The flip phase +.............. + +_`.phase.flip`: The roots (see design.mps.root_) are scanned. This has +to be an atomic action as far as the mutator is concerned, so all +threads are suspended for the duration. + +.. _design.mps.root: root + +_`.phase.flip.mutator`: After this, the mutator is black: if we use a +strong barrier (analysis.async-gc.strong), this means it cannot refer +to white objects. Allocation will be in black (could be grey as well, +but there's no point to it). + + +The black mutator phase +....................... + +_`.phase.black-mutator`: Grey segments are chosen according to some +sort of progress control and scanned by the pool to make them black. +Eventually, the tracer runs out of segments that are grey for this +trace, and proceeds to the next phase. + +_`.phase.black-mutator.copy`: At this stage white objects can be +relocated, because the mutator cannot see them (as long as a strong +barrier is used, as we must do for a copying collection, see +analysis.async-gc.copied.pointers). + + +The reclaim phase +................. + +_`.phase.reclaim`: The tracer finds the remaining white segments and +asks the pool to reclaim any white objects in them. + +_`.phase.reclaim.barrier`: Once a trace has started reclaiming +objects, the others shouldn't try to scan any objects that are white +for it, because they might have dangling pointers in them. + +.. note:: + + Needs cross-reference to document that is yet to be written. + + Currently, we reclaim atomically, but it could be incremental, or + even overlapped with a new trace on the same condemned set. + Pekka P. Pirinen, 1997-12-31. + + +Barriers +-------- + +.. note:: + + An introduction and a discussion of general principles should go + here. This is a completely undesigned area. + + +Hardware barriers +................. + +_`.hw-barriers`: Hardware barrier services cannot, by their very +nature, be independently provided to each trace. A segment is either +protected or not, and we have to set the protection on a segment if +any trace needs a hardware barrier on it. + +_`.hw-barriers.supported`: The framework currently supports +segment-oriented Appel-Ellis-Li barriers +(analysis.async-gc.barrier.appel-ellis-li), and write-barriers for +keeping the refsets up-to-date. It would not be hard to add Steele +barriers (analysis.async-gc.barrier.steele.scalable). + +_`.hw-barriers.hit`: When a barrier hit happens, the arena determines +which segment it was on. The segment colour info is used to determine +whether it had trace barriers on it, and if so, the appropriate +barrier action is performed, using the methods of the owning pool. If +the segment was write-protected, its refset is unioned with the refset +of the mutator. + +.. note:: In practice this is ``RefSetUNIV``. + +_`.hw-barriers.hit.multiple`: Fortunately, if we get a barrier hit on +a segment with multiple trace barriers on it, we can scan it for all +the traces that it had a barrier for. + +.. note:: Needs link to unwritten section under `.combine`_. + + +Software barriers +................. + +.. note:: + + Write something about software barriers. + + +Document History +---------------- + +- 1998-01-01 Pekka P. Pirinen. Initial draft based on the current + implementation of the MPS, analysis.async-gc, [that note on the + independence of collections] and analysis.tracer. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/config.txt b/mps/design/config.txt new file mode 100644 index 00000000000..7f6337c9e54 --- /dev/null +++ b/mps/design/config.txt @@ -0,0 +1,690 @@ +.. mode: -*- rst -*- + +MPS Configuration +================= + +:Tag: design.mps.config +:Author: Richard Brooksby +:Date: 1997-02-19 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: configuration; design + + +Introduction +------------ + +_`.intro`: This document describes how the `Memory Pool System +`_ source code is configured so +that it can target different architectures, operating systems, build +environments, varieties, and products. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + + + +Requirements +------------ + +_`.req.import`: The MPS must be simple to include in third-party projects. + +_`.req.arch`: Allow architecture specific configurations of the MPS, so +that we can vary the MPS according to the target architecture. + +_`.req.os`: Allow operating system specific configurations of the MPS, +so that we can vary the MPS according to the target OS. + +_`.req.builder`: Allow build environment specific configurations of the +MPS, so that we can vary the MPS according to the compiler, etc. + +_`.req.var`: Allow configurations with different amounts of +instrumentation (assertions, metering, etc.). + +_`.req.impact`: The configuration system should have a minimal effect on +maintainability of the implementation. + +_`.req.port`: The system should be easy to port across platforms. + +_`.req.maint`: Maintenance of the configuration and build system should +not consume much developer time. + + +Retired requirements +.................... + +_`.req.prod`: Allow product specific configurations of the MPS, so that +we can build variants of the MPS for use in different products. This +requirement has been retired on 2012-09-03 as part of work on the +variety-reform_ branch. Client-specific customisation of the MPS will +be handled in source control, while the MPS source remains generic, to +reduce costs and increase reliability. See [RB_2012-09-13]_. + +.. _variety-reform: /project/mps/branch/2012-08-15/variety-reform + + +Definitions +----------- + +_`.def.platform`: A *platform* is a combination of an architecture +(`.def.arch`_), an operating system (`.def.os`_), and a builder +(`.def.builder`_). The set of supported platforms is maintained in the +`Platforms section of "Building the Memory Pool System" +`_. + +.. _platforms: ../manual/html/guide/build.html#platforms + +_`.def.arch`: An *architecture* is processor type with associated calling +conventions and other binary interface stuff these days often called the +`ABI `_. +Most importantly for the MPS it determines the layout of the register +file, thread context, and thread stack. + +_`.def.os`: An *operating system* is the interface to external resources. +Most importantly for the MPS it determines the low level interface to +virtual memory (if any) and threading. + +_`.def.builder`: A *builder* is the tools (C compiler, etc.) used to make +the target (`.def.target`_). The MPS minimises use of compiler-specific +extensions, but this is handy for suppressing warnings, inlining hints, +etc. + +_`.def.var`: A *variety* determines things like the amount of debugging, +internal consistency checking, annotation, etc. In modern IDEs this +called a "build configuration" and the usual default is to have two: +"debug" and "release". The MPS predates this convention, but the concept +is the same. + +_`.def.prod`: A *product* is the intended product into which the MPS will +fit, e.g. ScriptWorks, Dylan, etc. We no longer maintain this concept +as a dimension of configuration since `.req.prod`_ has been retired. + +_`.def.target`: The *target* is the result of the build. + +_`.def.option`: An *option* is a feature of the MPS that is not +selected via the *platform* and *variety*. See `.opt`_. + + + +Overview +-------- + +_`.import.source`: The MPS can be simply included in client products as +source code. Since `version 1.110`_ we made it possible to simply +include the file ``mps.c`` in a client's build process, without +requiring a separate build of the MPS or linking a library. This is +described `section 2.3.1, "Compiling for production" of the MPS manual +`_. + +.. _`version 1.110`: https://www.ravenbrook.com/project/mps/version/1.110/ +.. _compiling: ../manual/html/guide/build.html#compiling-for-production + +_`.no-gen`: No generated code or external tools are required. On most +platforms the only tool is the C compiler. On 64-bit Windows we require +the assembler since Microsoft withdrew in-line assembler from their C +compiler. + +_`.no-spaghetti`: Several of the MPS team have worked on some extremely +messy code bases which used a great number of ``#ifdef`` statements. +These quickly became very expensive to maintain and develop. The +general rule in the MPS is "no ``#ifdefs``". Instead, platform-specific +code is kept in separate source files and selected by carefully controlled +``#ifdefs``, such as in mps.c_. + +.. _mps.c: ../code/mps.c + +_`.min-dep`: Dependency on a particular configuration should be +minimized and localized when developing code. This is enshrined in the +general rules for implementation [ref?] that are enforced by MPS +development procedures including code review and inspection. + + +The build system +---------------- + +Abstract build function +....................... + +_`.build.fun`: The MPS implementation assumes only a simple "build +function" that takes a set of sources, possibly in several languages, +compiles them with a set of predefined preprocessor symbols, and links +the result with a set of libraries to form the target:: + + target := build(, , ) + +_`.build.sep`: Separate compilation and linkage can be seen as a +memoization of this function, and is not strictly necessary for the +build. Indeed, since `version 1.110` we found that modern compilers +are quite happy to compile the whole MPS in one go `.import.source`_. + +_`.build.cc`: A consequence of this approach is that it should always +be possible to build a complete target with a single UNIX command line +calling the compiler driver (usually "cc" or "gcc"), for example:: + + cc -o main -DCONFIG_VAR_COOL foo.c bar.c baz.s -lz + +_`.build.defs`: The "defs" are the set of preprocessor macros which are to be +predefined when compiling the module sources:: + + CONFIG_VAR_ + +_`.var.codes`: The variety codes are as follows: + +_`.var.hot`: ``HOT`` + + Intended for release in products. Optimised, reduced internal + checking, especially on the critical path [RB_2012-09-07]_. + +_`.var.cool`: ``COOL`` + + Intended for use during development. Moderately thorough internal + consistency checking. Reduced optimisation to allow for + single-stepping. + +_`.var.rash`: ``RASH`` + + No internal checking at all. Slight performance improvement over + `.var.hot`_ at the cost of early detection of memory management + bugs. We do not advise use of this variety, as memory management + bugs tend to be extremely expensive to deal with. + +_`.default.hot`: If no ``CONFIG_VAR`` is present, ``HOT`` is assumed in +config.h_. + +_`.build.srcs`: The "srcs" are the set of sources that must be +compiled in order to build the target. The set of sources may vary +depending on the configuration. For example, different sets of sources +may be required to build different architectures. + +.. note:: + + This is a dependency between the makefile (or whatever) and the + module configuration in config.h_. + +_`.build.libs`: The "libs" are the set of libraries to which the +compiled sources must be linked in order to build the target. For +example, when building a test program, it might include the ANSI C +library and an operating system interface library. + + +File Structure +.............. + +_`.file.dir`: The MPS source code is arranged in a single directory +called "code" containing all the sources for the whole family of +targets. + +_`.file.base`: The names of sources must be unique in the first eight +characters in order to conform to FAT filesystem naming restrictions. +(Do not scoff -- this has been an important requirement as recently as +2012!) + +_`.file.ext`: The extension may be up to three characters and directly +indicates the source language. + +_`.file.platform`: Platform-specific files include the platform code +in their name. See `.mod.impls`_. + + +Modules and naming +.................. + +_`.mod.unique`: Each module has an identifier which is unique within the MPS. + +_`.mod.impls`: Each module has one or more implementations which may be +in any language supported by the relevant build environment. + +_`.mod.primary`: The primary implementation of a module is written in +target-independent ANSI C in a source file with the same name as the +module. + +_`.mod.an`: Where there are platform-specific implementations and an +inferior portable ANSI C fallback implementation, "an" is used in +place of the platform code. + +_`.mod.secondary`: The names of other implementations should begin +with the same prefix (the module id or a shortened version of it) and +be suffixed with on or more target parameter codes (defined below). In +particular, the names of assembly language sources must include the +target parameter code for the relevant architecture. + +_`.mod.example`: For example, the stack scanner is defined in ss.h_ +(which is platform-independent). It has some platform-independent C in +ss.c_ and, for example, ssw3i6mv.c_ is specific to Windows on the x64 +architecture built with Microsoft Visual C. + +.. _ss.c: ../code/ss.c +.. _ss.h: ../code/ss.h +.. _ssw3i6mv.c: ../code/ssw3i6mv.c + + +Build system rationale +...................... + +_`.build.rat`: This simple design makes it possible to build the MPS +using many different tools. Microsoft Visual C and other graphical +development tools do not support much in the way of generated sources, +staged building, or other such stuff. The Visual C and Xcode "project" +files correspond closely to a closure of the build function +(`.build.fun`_). The simplicity of the build function has also made it +easy to set up builds using NMAKE (DOS), MPW (Macintosh), and to get the +MPS up and running on other platforms such as FreeBSD and Linux in very +little time. The cost of maintaining the build systems on these various +platforms is also reduced to a minimum, allowing the MPS developers to +concentrate on primary development. The source code is kept simple and +straightforward. When looking at MPS sources you can tell exactly what +is going to be generated with very little context. The sources are not +munged beyond the standard ANSI C preprocessor. + +_`.build.port`: The portability requirement (`.req.port`_) implies that +the build system must use only standard tools that will be available on +all conceivable target platforms. Experience of development +environments on the Macintosh (Metrowerks Codewarrior) and Windows NT +(Visual C++) indicates that we cannot assume much sophistication in the +use of file structure by development environments. The best that we can +hope for is the ability to combine a fixed list of source files, +libraries, and predefined preprocessor symbols into a single target. + +_`.build.maint`: The maintainability requirement (`.req.maint`_) implies +that we don't spend time trying to develop a set of tools to support +anything more complicated than the simple build function described +above. The effort in constructing and maintaining a portable system of +this kind is considerable. Such efforts failed in the Electronic +Publishing division of Harlequin. + + +Warnings and errors +................... + +_`.warning.free`: A consequence of `.import.source`_ is that the MPS +needs to compile in the context of the client's build system, with +*whatever compilation and warning options* the client has enabled in +that system, and this might include options causing warnings to be +treated as errors. Accordingly, the MPS should compile without +warnings when enabling the compiler options most likely to be employed +by clients. + +_`.warning.impl`: In order to ensure that the MPS meets the +requirement in `.warning.free`_, during development and testing of the +MPS we compile with a large selection of warning options for each +supported compiler, and with warnings treated as errors so that +developers do not get into the habit of ignoring warning messages. +These are enabled in the compiler makefile fragments for each +compiler, for example ll.gmk_ for Clang/LLVM. + +.. _ll.gmk: ../code/ll.gmk + +_`.warning.benefit`: The implementation in `.warning.impl`_ also helps +us keep the code free of subtle compiler issues that break memory +managers, and free of constructs which might be accidentally +mis-interpreted by other developers. + +_`.warning.silence`: When code needs to be modified, for example by +adding a cast, to silence a warning that has been analyzed and turned +out to be harmless, it is best practice to introduce a macro that +expresses the intention, and cross-reference this paragraph from the +macro's comment. If the macro is general-purpose then misc.h_ is a +good place to put it. + +.. _misc.h: ../code/misc.h + + +Implementation +-------------- + +_`.impl`: The two implementation files config.h_ and mpstd.h_ can be +seen as preprocessor programs which "accept" build parameters and "emit" +configuration parameters (`.fig.impl`_). The build parameters are +defined either by the builder (in the case of target detection) or by +the build function (in the case of selecting the variety). + +_`.fig.impl`: + +=========================== ============== =========================================== +Build parameters Source file Configuration parameters +=========================== ============== =========================================== +``CONFIG_VAR_HOT`` ⟶ ``config.h`` ⟶ ``MPS_ASSERT_STRING``, etc. +``_WIN32`` ⟶ ``mpstd.h`` ⟶ ``MPS_OS_W3``, etc. +=========================== ============== =========================================== + +_`.impl.dep`: No source code, other than the directives in config.h_ +and mpstd.h_, should depend on any build parameters. That is, +identifiers beginning "CONFIG\_" should only appear in impl.h.config. +Code may depend on configuration parameters in certain, limited ways, as +defined below (`.conf`_). + +.. _config.h: ../code/config.h +.. _mpstd.h: ../code/mpstd.h + + +Target platform detection +......................... + +_`.pf`: The target platform is "detected" by the preprocessor directives in +mpstd.h_. + +_`.pf.form`: This file consists of sets of directives of the form:: + + #elif + #define MPS_PF_ + #define MPS_PF_STRING "" + #define MPS_OS_ + #define MPS_ARCH_ + #define MPS_BUILD_ + #define MPS_T_WORD + #define MPS_T_ULONGEST + #define MPS_WORD_WIDTH + #define MPS_WORD_SHIFT + #define MPS_PF_ALIGN + +_`.pf.detect`: The conjunction of builder predefinitions is a constant +expression which detects the target platform. It is a logical AND of +expressions which look for preprocessor symbols defined by the build +environment to indicate the target. These must be accompanied by a +reference to the build tool documentation from which the symbols came. +For example:: + + /* "Predefined Macros" from "Visual Studio 2010" on MSDN + * . */ + + #elif defined(_MSC_VER) && defined(_WIN32) && defined(_M_IX86) + +_`.pf.codes`: The declarations of the platform, operating system, +architecture, and builder codes define preprocessor macros corresponding +to the target detected (`.pf.detect`_). For example:: + + #define MPS_PF_W3I3MV + #define MPS_OS_W3 + #define MPS_ARCH_I3 + #define MPS_BUILD_MV + +_`.pf.word`: The declaration of ``MPS_T_WORD`` defines the unsigned +integral type which corresponds, on the detected target, to the +machine word. It is used to defined the MPS Word type +(design.mps.type.word_). For example:: + + #define MPS_T_WORD unsigned long + +.. _design.mps.type.word: type#.word + +We avoid using ``typedef`` here because mpstd.h_ could potentially +be included in assembly language source code. + +_`.pf.word-width`: The declaration of ``MPS_WORD_WIDTH`` defines the +number of bits in the type defined by ``MPS_T_WORD`` (`.pf.word`_) on the +target. For example:: + + #define MPS_WORD_WIDTH 32 + +_`.pf.word-shift`: The declaration of ``MPS_WORD_SHIFT`` defines the log +to the base 2 of ``MPS_WORD_WIDTH``. For example:: + + #define MPS_WORD_SHIFT 5 + +_`.pf.pf-align`: The declaration of ``MPS_PF_ALIGN`` defines the minimum +alignment which must be used for a memory block to permit any normal +processor memory access. In other words, it is the maximum alignment +required by the processor for normal memory access. For example:: + + #define MPS_PF_ALIGN 4 + +_`.pf.ulongest`: The declaration of ``MPS_T_ULONGEST`` defines the +longest available unsigned integer type on the platform. This is +usually just ``unsigned long`` but under Microsoft C on 64-bit Windows +``unsigned long`` is just 32-bits (curse them!) For example:: + + #define MPS_T_ULONGEST unsigned __int64 + +_`.pf.pf-string`: The declaration of ``MPS_PF_STRING`` defines a +string that is used to identify the target platform in version.c_. For +example:: + + #define MPS_PF_STRING "w3i6mv" + +.. _version.c: ../code/version.c + + +Target varieties +................ + +_`.var`: The target variety is handled by preprocessor directives in +impl.h.config. + +_`.var.form`: The file contains sets of directives of the form:: + + #if defined(CONFIG_VAR_COOL) + #define CONFIG_ASSERT + #define CONFIG_ASSERT_ALL + #define CONFIG_STATS + +_`.var.detect`: The configured variety is one of the variety +preprocessor definitions passed to the build function +(`.build.defs`_), for example, ``CONFIG_VAR_COOL``. These are +decoupled in order to keep the number of supported varieties small, +controlling each feature (for example, assertions) by a single +preprocessor definition, and maintaining flexibility about which +features are enabled in each variety. + +_`.var.symbols`: The directives should define whatever symbols are +necessary to control features. These symbols parameterize other parts +of the code, such as the declaration of assertions, etc. The symbols +should all begin with the prefix ``CONFIG_``. + + +Source code configuration +------------------------- + +_`.conf`: This section describes how the configuration may affect the +source code of the MPS. + +_`.conf.limit`: The form of dependency allowed is carefully limited to +ensure that code remains maintainable and portable (`.req.impact`_). + +_`.conf.min`: The dependency of code on configuration parameters should +be kept to a minimum in order to keep the system maintainable +(`.req.impact`_). + + +Configuration Parameters +........................ + +_`.conf.params`: The compilation of a module is parameterized by:: + + MPS_ARCH_ + MPS_OS_ + MPS_BUILD_ + MPS_PF_ + + +Abstract and Concrete Module Interfaces +....................................... + +_`.abs.caller`: Basic principle: the caller musn't be affected by +configuration of a module. This reduces complexity and dependency of +configuration. All callers use the same abstract interface. Caller +code does not change. + +_`.abs.interface`: Abstract interface includes: + +- method definitions (logical function prototypes which may be macro methods) +- names of types +- names of constants +- names of structures and fields which form part of the interface, and + possibly their types, depending on the protocol defined +- the protocols + +_`.abs.rule`: The abstract interface to a module may not be altered by a +configuration parameter. However, the concrete interface may vary. + +For example, this isn't allowed, because there is a change in the interface:: + + #if defined(PROT_FOO) + void ProtSpong(Foo foo, Bar bar); + #else + int ProtSpong(Bar bar, Foo foo); + #endif + +This example shows how:: + + #ifdef PROTECTION + void ProtSync(Space space); + /* more decls. */ + #else /* PROTECTION not */ + #define ProtSync(space) NOOP + /* more decls. */ + #endif /* PROTECTION */ + +or:: + + #if defined(PROT_FOO) + typedef struct ProtStruct { + int foo; + } ProtStruct; + #define ProtSpong(prot) X((prot)->foo) + #elif defined(PROT_BAR) + typedef struct ProtStruct { + float bar; + } ProtStruct; + #define ProtSpong(prot) Y((prot)->bar) + #else + #error "No PROT_* configured." + #endif + +Configuration parameters may not be used to vary implementations in C files. +For example, this sort of thing:: + + int map(void *base, size_t size) + { + #if defined(MPS_OS_W3) + VirtualAlloc(foo, bar, base, size); + #elif defined(MPS_OS_SU) + mmap(base, size, frob); + #else + #error "No implementation of map." + #endif + } + +This violates `.no-spaghetti`_. + + +Configuration options +--------------------- + +_`.opt`: Options select features of the MPS that are not selected by the *platform* and the *variety*. + +_`.opt.support`: The features selected by options are not supported or +documented in the public interface. This is to keep the complexity of +the MPS manageable: at present the number of supported configuration +is *platforms* × *varieties* (at time of writing, 9 × 3 = 27). Each +supported option would double (or worse) the number of supported +configurations. + +_`.opt.ansi`: ``CONFIG_PF_ANSI`` tells ``mps.c`` to exclude the +sources for the auto-detected platform, and use the generic ("ANSI") +platform instead. + +_`.opt.thread`: ``CONFIG_THREAD_SINGLE`` causes the MPS to be built +for single-threaded execution only, where locks are not needed and so +the generic ("ANSI") lock module ``lockan.c`` can be used instead of +the platform-specific lock module. + +_`.opt.poll`: ``CONFIG_POLL_NONE`` causes the MPS to be built without +support for polling. This means that garbage collections will only +happen if requested explicitly via ``mps_arena_collect()`` or +``mps_arena_step()``, but it also means that protection is not needed, +and so shield operations can be replaced with no-ops in ``mpm.h``. + +_`.opt.signal.suspend`: ``CONFIG_PTHREADEXT_SIGSUSPEND`` names the +signal used to suspend a thread, on platforms using the POSIX thread +extensions module. See design.pthreadext.impl.signals_. + +.. _design.pthreadext.impl.signals: pthreadext#impl.signals + +_`.opt.signal.resume`: ``CONFIG_PTHREADEXT_SIGRESUME`` names the +signal used to resume a thread, on platforms using the POSIX thread +extensions module. See design.pthreadext.impl.signals_. + + +To document +----------- +- What about constants in config.h? +- Update files to refer to this design document. +- Explain the role of ``mps.c`` +- Reference to ``build.txt`` +- Procedures for adding an architecture, etc. +- Reduce duplication in this document (especially after + `Configuration Parameters`_ which looks like it's been pasted in from + elsewhere.) + + +References +---------- + +.. [RB_2012-09-07] "The critical path through the MPS"; Richard Brooksby; + Ravenbrook Limited; 2012-09-07; + . + +.. [RB_2012-09-13] "The Configura CET custom mainline"; Richard + Brooksby; Ravenbrook Limited; 2013-09-13; + . + + +Document History +---------------- + +- 1997-02-19 RB_ Initial draft based on discussions of configuration at + meeting.general.1997-02-05. + +- 1997-02-20? RB_ Various improvements and clarifications to the draft + discussed between RB_ and NB_ at meeting.general.1997-02-19. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2012-09-03 RB_ Updated for variety-reform_ branch, to remove untrue + things, though the document could do with a rewrite. + +- 2013-05-11 RB_ Converted to reStructuredText. Clarified various + sections, brought some up-to-date, and removed obsolete junk. + +- 2013-05-24 GDR_ Updated variety section to match the current style. + It's not true any more that some choice must be made for each + feature (for example, we now default to the hot variety if none is + selected). + +- 2013-06-06 GDR_ Removed reference to obsolete DIAG variety. + +- 2021-01-10 GDR_ Added section on warnings and errors. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _NB: https://www.ravenbrook.com/consultants/nb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/critical-path.txt b/mps/design/critical-path.txt new file mode 100644 index 00000000000..d3b09dad66a --- /dev/null +++ b/mps/design/critical-path.txt @@ -0,0 +1,396 @@ +.. mode: -*- rst -*- + +The critical path through the MPS +================================= + +:Author: Richard Brooksby +:Organization: Ravenbrook Limited +:Date: 2012-09-07 +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: + single: critical path + single: path; critical + single: Memory Pool System; critical path + + +Introduction +------------ +The critical path is a key concept in the design of the `Memory Pool +System `_. Code on the critical +path is usually executed more than any other code in the process. A +change of just one instruction on the critical path can make as much as +a 1% difference in overall run-time. A lot of the design of the MPS is +arranged around making the critical path as short and fast as possible. +This document describes the critical path and explains some of that +design, with reference to more detailed documents. + + +What makes the critical path critical +------------------------------------- +In order to determine which objects can be recycled, the garbage +collector has to frequently examine a very large number of pointers in +the program's objects. It does this by scanning_ memory, both +allocated objects and roots (such as the thread stacks). + +This means that the scanning functions must loop over pretty much *every +word in memory* sooner or later. The MPS takes great pains to avoid +scanning memory which does not need scanning, but to get good +performance, scanning must be highly optimised. + +What's more, the scanning functions apply an operation called "fix" to +every pointer (or potential pointer) that they find in the objects in +memory. Fixing also attempts to eliminate uninteresting pointers as +fast as possible, but it has to do some work on every object that is +being considered for recycling, and that can be a large proportion of +the objects in existence. The path through fixing must also be highly +optimised, especially in the early stages. + + +How the MPS avoids scanning and fixing +-------------------------------------- +This is just a brief overview of how the MPS is designed to reduce +unnecessary scanning and fixing. + +Firstly, the MPS must occasionally decide which objects to try to +recycle. It does this using various facts it knows about the objects, +primarily their age and whether they've survived previous attempts at +recycling them. It then "`condemns`_" a large number of objects +at once, and each of these objects must be "preserved" by fixing +references to them. + +When the MPS condemns objects it chooses sets of objects in a small set +of "zones" in memory (preferably a single zone). The zone of an object +can be determined extremely quickly from its address, without looking at +the object or any other data structure. + +The MPS arranges that objects which will probably die at the same time +are in the same zones. + +The MPS allocates in "segments". Each segment is of the order of one +"tract" of memory (generally the same as the operating system page +size, usually 4 KiB or 8 KiB) but may be larger if there are large +objects inside. The MPS maintains a "summary" of the zones pointed to +by all the pointers in a segment from previous scans. + +So, once the MPS has decided what to condemn, it can quickly eliminate +all segments which definitely do not point to anything in those zones. +This avoids a large amount of scanning. It is an implementation of a +`remembered set`_, though it is unlike that in most other garbage +collectors. + +In addition, the fix operation can quickly ignore pointers to the wrong +zones. This is called the "zone check" and is a BIBOP_ technique. + +Even if a pointer passes the zone check, it may still not point to a +segment containing condemned objects. The next stage of the fix +operation is to look up the segment pointed to by the pointer and see if +it was condemned. This is a fast lookup. + +After that, each pool class must decide whether the pointer is to a +condemned object and do something to preserve it. This code is still +critical. The MPS will have tried to condemn objects that are dead, +but those objects are still likely to be in segments with other +objects that must be preserved. The segment class fix method must +quickly distinguish between them. + +Furthermore, many objects will be preserved at least once in their +lifetime, so even the code that preserves an object needs to be highly +efficient. (Programs in languages like ML might not preserve 95% of +their objects even once, but many other programs will preserve nearly +all of theirs many times.) + + +Where to find the critical path +------------------------------- +Very briefly, the critical path consists of five stages: + +#. The scanner, which iterates over pointers in objects. The MPS has + several internal scanners, but the most important ones will be + format scanners in client code registered through + ``mps_fmt_create_k()``. + + .. note:: + + There needs to be a chapter in the manual explaining how to + write a good scanner. Then that could be linked from here. + +#. The first-stage fix, which filters out pointers inline in the + scanner. This is implemented in the ``MPS_FIX1()`` macro in + mps.h_. + + .. _mps.h: ../code/mps.h + +#. The second-stage fix, which filters out pointers using general + information about segments. This is ``_mps_fix2()`` in trace.c_. + + .. _trace.c: ../code/trace.c + +#. The third-stage fix, which filters out pointers using + segment-specific information. Implemented in segment class + functions called ``amcSegFix()``, ``loSegFix()``, etc. in pool*.c. + +#. Preserving the object, which might entail: + + - marking_ it to prevent it being recycled; and/or + + - copying_ it and updating the original pointer (or just + updating the pointer, if the object has previously been + copied); and/or + + - adding it to a queue of objects to be scanned later, if it + contains pointers. + + +The format scanner +------------------ +The critical path starts when a format scan method is called. That is +a call from the MPS to a client function of type ``mps_fmt_scan_t`` +registered with ``mps_fmt_create_k()``. + +Here is an example of part of a format scanner for scanning contiguous +runs of pointers, from fmtdy.c_, the scanner for the `Open Dylan`_ +runtime:: + + static mps_res_t dylan_scan_contig(mps_ss_t mps_ss, + mps_addr_t *base, mps_addr_t *limit) + { + mps_res_t res; + mps_addr_t *p; /* reference cursor */ + mps_addr_t r; /* reference to be fixed */ + + MPS_SCAN_BEGIN(mps_ss) { + p = base; + loop: if(p >= limit) goto out; + r = *p++; + if(((mps_word_t)r&3) != 0) /* pointers tagged with 0 */ + goto loop; /* not a pointer */ + if(!MPS_FIX1(mps_ss, r)) goto loop; + res = MPS_FIX2(mps_ss, p-1); + if(res == MPS_RES_OK) goto loop; + return res; + out: assert(p == limit); + } MPS_SCAN_END(mps_ss); + + return MPS_RES_OK; + } + +.. _fmtdy.c: ../code/fmtdy.c + +(To help with understanding optimisation of this code, it's written in +a pseudo-assembler style, with one line roughly corresponding to each +instruction of an idealized intermediate code.) + +The MPS C interface provides macros to try to help optimise this code. +The ``mps_ss`` object is a "scan state" and contains data that is used +to eliminate uninteresting pointers now, and record information which +will be used to reduce scanning in future by maintaining the +remembered set. + +The macros ``MPS_SCAN_BEGIN()`` and ``MPS_SCAN_END()`` load key data +from the scan state into local variables, and hopefully into processor +registers. This avoids aliasing values that we know won't change when +calls are made to ``_mps_fix2()`` later, and so allows the compiler to +keep the scan loop small and avoid unnecessary memory references. + +This scanner knows that words not ending in 0b00 aren't pointers to +objects, so it eliminates them straight away. This is a kind of +`reference tag`_ chosen by the client for its object representation. + +Next, the pointer is tested using ``MPS_FIX1()``. This performs fast +tests on the pointer without using any other memory. In particular, it +does the "zone check" described in section 3. If a pointer fails these +tests, it isn't interesting and can be skipped. It is very important +to proceed to the next pointer as fast as possible in this case. + +Having passed these tests, we need to fix the pointer using other data +in memory, and possibly call the MPS to preserve the object. This is +what ``MPS_FIX2()`` does. The important distinction here is that +``MPS_FIX2()`` can fail and return an error code, which must be +propagated without ado by returning from the scanner. Separating +``MPS_FIX1()`` from ``MPS_FIX2()`` helps keep the error handling code +away from the tight loop with the zone check. + +``MPS_FIX*``, the macro/inline part of the fix operation, are referred +to as "fix stage 1" or "the first stage fix" in other documents and +comments. + +If these inline checks pass, ``_mps_fix2()`` is called. If the MPS has +been built as a separate object file or library, this is where the +function call out of the scan loop happens. Since version 1.110 of the +MPS, we encourage clients to compile the MPS in the same translation +unit as their format code, so that the compiler can be intelligent +about inlining parts of ``_mps_fix2()`` in the format scanner. The +instructions for doing this are in `Building the Memory Pool System +`_, part of the manual. + +.. _build.txt: ../manual/build.txt + + +The second stage fix in the MPM +------------------------------- +If a pointer gets past the first-stage fix filters, it is passed to +``_mps_fix2()``, the "second stage fix". The second stage can filter +out yet more pointers using information about segments before it has +to consult the pool class. + +The first test is to determine if the address points to a *chunk* (a +contiguous region of address space managed by the arena). Addresses +that do not point to any chunk (for example, ambiguous references that +are not in fact pointers) are rejected immediately. See +``ChunkOfAddr()``. + +When there are many chunks (that is, when the arena has been extended +many times), this test can consume the majority of the garbage +collection time. This is the reason that it's important to give a good +estimate of the amount of address space you will ever occupy with +objects when you initialize the arena. + +The second test applied is the "tract test". The MPS looks up the +tract containing the address in the tract table, which is a simple +linear table indexed by the address shifted---a kind of flat page +table. See ``TractOfAddr()``. + +If the pointer is in a tract allocated with garbage collected objects, +then the table also contains a pointer to a "segment", which contains +a bitfield representing the "white set"---the set of garbage +collection traces for which the tract is "interesting". If a segment +isn't interesting, then we know that it contains no condemned objects, +and we can filter out the pointer. + +The MPM can't know anything about the internal layout of the segment, +so at this point we dispatch to the third stage fix. + +This dispatch is slightly subtle. We have a cache of the function to +dispatch to in the scan state, which has recently been looked at and +is with luck still in the processor cache. The reason there is a +dispatch at all is to allow for a fast changeover to emergency garbage +collection, or overriding of garbage collection with extra operations. +Those are beyond the scope of this document. Normally, ``ss->fix`` +points at ``SegFix()``. + +``SegFix()`` is passed the segment, which is fetched from the tract +table entry, and that should be in the cache. ``SegFix()`` itself +dispatches to the segment class. + + +The third stage fix in the segment class +---------------------------------------- +The final stage of fixing is entirely dependent on the segment class. +The MPM can't, in general, know how the objects within a segment are +arranged, so this is segment class specific code. + +Furthermore, the segment class must make decisions based on the +"reference rank" of the pointer. If a pointer is ambiguous +(``RankAMBIG``) then it can't be changed, so even a copying segment +class can't move an object. On the other hand, if the pointer is weak +(``RankWEAK``) then the segment fix method shouldn't preserve the +object at all, even if it's condemned. + +The exact details of the logic that the segment fix must implement in +order to co-operate with the MPM and other pools are beyond the scope +of this document, which is about the critical path. Since it is on +the critical path, it's important that whatever the segment fix does is +simple and fast and returns to scanning as soon as possible. + +The first step, though, is to further filter out pointers which aren't +to objects, if that's its policy. Then, it may preserve the object, +according to its policy, and possibly ensure that the object gets +scanned at some point in the future, if it contains more pointers. + +If the object is moved to preserve it (for instance, if the pool class +implements a copying collector), or was already moved when fixing a +previous reference to it, the reference being fixed must be updated +(this is the origin of the term "fix"). + +As a simple example, ``loSegFix()`` is the segment fix method for +segments belonging to the LO (Leaf Object) pool class. It implements a +marking garbage collector, and does not have to worry about scanning +preserved objects because it is used to store objects that don't +contain pointers. (It is used in compiler run-time systems to store +binary data such as character strings, thus avoiding any scanning, +decoding, or remembered set overhead for them.) + +``loSegFix()`` filters any ambiguous pointers that aren't aligned, +since they can't point to objects it allocated. Otherwise it subtracts +the segment base address and shifts the result to get an index into a +mark bit table. If the object wasn't marked and the pointer is weak, +then it sets the pointer to zero, since the object is about to be +recycled. Otherwise, the mark bit is set, which preserves the object +from recycling when ``loSegReclaim()`` is called later on. +``loSegFix()`` illustrates about the minimum and most efficient thing +a segment fix method can do. + + +Other considerations +-------------------- +So far this document has described the ways in which the garbage +collector is designed around optimising the critical path. There are a +few other things that the MPS does that are important. + +Firstly, inlining is very important. The first stage fix is inlined +into the format scanner by being implemented in macros in mps.h_. And +to get even better inlining, `we recommend `_ that the +whole MPS is compiled in a single translation unit with the client +format and that strong global optimisation is applied. + +Secondly, we are very careful with code annotations on the critical +path. Assertions, statistics, and telemetry are all disabled on the +critical path in "hot" (production) builds. (In fact, it's because the +critical path is critical that we can afford to leave annotations +switched on elsewhere.) + +Last, but by no means least, we pay a lot of brainpower and measurement +to the critical path, and are very very careful about changing it. Code +review around the critical path is especially vigilant. + +And we write long documents about it. + + +References +---------- +.. _scanning: https://www.memorymanagement.org/glossary/s.html#scan +.. _marking: https://www.memorymanagement.org/glossary/m.html#marking +.. _copying: https://www.memorymanagement.org/glossary/c.html#copying.garbage.collection +.. _condemns: https://www.memorymanagement.org/glossary/c.html#condemned.set +.. _BIBOP: https://www.memorymanagement.org/glossary/b.html#bibop +.. _remembered set: https://www.memorymanagement.org/glossary/r.html#remembered.set +.. _reference tag: https://www.memorymanagement.org/glossary/t.html#tag +.. _Open Dylan: https://opendylan.org/ + + +Document History +---------------- +- 2012-09-07 RB_ First draft. +- 2013-05-10 RB_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2012–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/diag.txt b/mps/design/diag.txt new file mode 100644 index 00000000000..7fc5864d5d7 --- /dev/null +++ b/mps/design/diag.txt @@ -0,0 +1,264 @@ +.. mode: -*- rst -*- + +Diagnostic feedback +=================== + +:Tag: design.mps.diag +:Author: Richard Kistruck +:Date: 2007-06-28 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: diagnostic feedback; design + + +Introduction +------------ + +_`.intro`: This document describes how to use the diagnostic feedback +mechanism in the Memory Pool System. + +_`.sources`: Initially abased on [RHSK_2007-04-13]_ and [RHSK_2007-04-18]_. + + +Overview +-------- + +Diagnostic feedback is information created by the MPS diagnostic +system for the purpose of helping MPS programmers and client +programmers. + +Such a piece of information is called "a diagnostic". (See also +`.parts`_.) + +A diagnostic is not intended to be visible to end users, or readable +by them. + +A diagnostic is not intended to be stable from one release to the +next: it may be modified or removed at any time. + + +Requirements +------------ + +MPS diagnostic feedback code must do these things: + +- calculate, store, and propagate data; +- collate, synthesise, and format it into a human-useful diagnostic; +- control (for example, filter) output of diagnostics; +- use a channel to get the diagnostic out. + + +Usage +----- + +To get diagnostic output from the MPS, you must use a variety with +diagnostics compiled-in. Currently, that means variety.cool. See +``config.h``. + +There are two mechanism for getting diagnostic output: + +#. Automatically via the telemetry system. See design.mps.telemetry_, + and the "Telemetry" chapter in the manual. + + .. _design.mps.telemetry: telemetry + +#. Manually via the debugger. In the debugger, set break points at the + places where you want to inspect data structures (or wait for the + debugger to be entered via an ``abort()`` call or unhandled + segmentation fault). Then at the debugger command prompt, run + ``Describe()`` commands of your choice. For example:: + + (gdb) run + Starting program: mv2test + Reading symbols for shared libraries +............................. done + cbs.c:94: MPS ASSERTION FAILED: !cbs->inCBS + + Program received signal SIGABRT, Aborted. + 0x00007fff83e42d46 in __kill () + (gdb) frame 12 + #12 0x000000010000b1fc in MVTFree (pool=0x103ffe160, base=0x101dfd000, size=5024) at poolmv2.c:711 + 711 Res res = CBSInsert(MVTCBS(mvt), base, limit); + (gdb) p MVTDescribe(mvt, mps_lib_get_stdout(), 0) + MVT 0000000103FFE160 { + minSize: 8 + meanSize: 42 + maxSize: 8192 + fragLimit: 30 + reuseSize: 16384 + fillSize: 8192 + availLimit: 90931 + abqOverflow: FALSE + splinter: TRUE + splinterBase: 0000000106192FF0 + splinterLimit: 0000000106193000 + size: 303104 + allocated: 262928 + available: 40176 + unavailable: 0 + # ... etc ... + } + + +How to write a diagnostic +------------------------- + +Compile away in non-diag varieties; no side effects +................................................... + +Wrap code with the ``STATISTIC`` and ``METER`` macros, to make sure +that non-diagnostic varieties do not execute diagnostic-generating +code. + +Diagnostic-generating code must have no side effects. + + +Writing good paragraph text +........................... + +Make your diagnostics easy to understand! Other people will read your +diagnostics! Make them clear and helpful. Do not make them terse and +cryptic. If you use symbols, print a key in the diagnostic. + + +How the MPS diagnostic system works +----------------------------------- + +Parts of the MPS diagnostic system +.................................. + +_`.parts`: The following facilities are considered part of the MPS +diagnostic system: + +- the ``Describe()`` methods. +- the ``STATISTIC`` macros (see ``mpm.h``); +- the ``METER`` macros and meter subsystem. + + +Statistics +.......... + +_`.stat`: The statistic system collects information about the +behaviour and performance of the MPS that may be useful for MPS +developers and customers, but which is not needed by the MPS itself +for internal decision-making. + +_`.stat.remove`: The space needed for these statistics, and the code +for maintaining them, can therefore be removed (compiled out) in some +varieties. + +_`.stat.config`: Statistics are compiled in if ``CONFIG_STATS`` is +defined (in the cool variety) and compiled out if +``CONFIG_STATS_NONE`` is defined (in the hot and rash varieties). + +``STATISTIC_DECL(decl)`` + +_`.stat.decl`: The ``STATISTIC_DECL`` macro is used to wrap the +declaration of storage for a statistic. Note that the expansion +supplies a terminating semi-colon and so it must not be followed by a +semi-colon in use. This is so that it can be used in structure +declarations. + +``STATISTIC(gather)`` + +_`.stat.gather`: The ``STATISTIC`` macro is used to gather statistics. +The argument is a statement and the expansion followed by a semicolon +is syntactically a statement. The macro expends to ``NOOP`` in +non-statistical varieties. (Note that it can't use ``DISCARD_STAT`` to +check the syntax of the statement because it is expected to use fields +that have been compiled away by ``STATISTIC_DECL``, and these will +cause compilation errors.) + +_`.stat.gather.effect`: The argument to the ``STATISTIC`` macro is not +executed in non-statistical varieties and must have no side effects, +except for updates to fields that are declared in ``STATISTIC_DECL``, +and telemetry output containing the values of such fields. + +``STATISTIC_WRITE(format, arg)`` + +_`.stat.write`: The ``STATISTIC_WRITE`` macro is used in ``WriteF()`` +argument lists to output the values of statistics. + + +Related systems +............... + +The MPS diagnostic system is separate from the following other MPS +systems: + +- The telemetry-log-events system. This emits much more data, in a + less human-readable form, requires MPS-aware external tools, and is + more stable from release to release). In non-diagnostic telemetry + varieties, the telemetry-log-events system emits events that log all + normal MPS actions. In diagnostic telemetry varieties, it may emit + additional events containing diagnostic information. Additionally, + the telemetry-log-events stream might in future be available as a + channel for emitting human-readable text diagnostics. See also + design.mps.telemetry_. + +- The MPS message system. This is present in all varieties, and + manages asynchronous communication from the MPS to the client + program). However, the MPS message system might in future also be + available as a channel for emitting diagnostics. See also + design.mps.message_. + + .. _design.mps.message: message + + + +References +---------- + +.. [RHSK_2007-04-13] Richard Kistruck. 2007-04-13. "`diagnostic feedback from the MPS `_". + +.. [RHSK_2007-04-18] Richard Kistruck. 2007-04-18. "`Diverse types of diagnostic feedback `_". + + +Document History +---------------- + +- 2007-06-28 Richard Kistruck Create. Telemetry-log-events system is a possible channel. + +- 2007-06-29 Richard Kistruck Feedback (not output), each "a + diagnostic". Parts of the system; related systems. Link to initial + design email. + +- 2007-08-14 Richard Kistruck (Diag Filtering). Expand section: How to + see some MPS diagnostic output, with what a diagnostic is, and how + to filter it. New section: How to write a diagnostic. Various minor + updates and corrections. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2013-06-05 GDR_ DIAG part of the system has been removed. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/doc.txt b/mps/design/doc.txt new file mode 100644 index 00000000000..0dd1b038893 --- /dev/null +++ b/mps/design/doc.txt @@ -0,0 +1,361 @@ +.. mode: -*- rst -*- + +Documentation +============= + +:Tag: design.mps.doc +:Author: Gareth Rees +:Date: 2018-09-18 +:Status: draft design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: documentation; design + + +Introduction +------------ + +_`.intro`: This is the design of the documentation system for the +Memory Pool System. + +_`.readership`: This document is intended for any MPS developer. + + +Types +----- + +_`.type`: The MPS has multiple types of documentation, suitable for +different audiences. + +_`.type.comment`: Comments in the code provide information that is +required in order for developers to make correct edits to nearby code. +(Audience: MPS developers editing nearby code.) + +_`.type.design`: Design documentation lists requirements and explains +how the code meets the requirements. (Audience: MPS developers working +on a subsystem.) + +_`.type.devguide`: Developer guides provide general guidance for +developers, not specific to any particular subsystem. (Audience: MPS +developers generally.) + +_`.type.procedure`: Procedures list the steps for carrying out +development tasks. (Audience: MPS developers who need to carry out +particular tasks reliably.) + +_`.type.tutorial`: Tutorials describe how to use the MPS to meet +client program requirements. (Audience: beginner client program +developers.) + +_`.type.reference`: Reference documentation specifies the public +features of the MPS. (Audience: expert client program developers.) + +_`.type.mmref`: The Memory Management Reference describes general +principles of memory management, with cross-references to the MPS +documentation. (Audience: the world.) + + +Requirements +------------ + +_`.req.source`: Derived from [RB_2013-05-09]_. + +_`.req.easy`: It must be easy to read and write documentation using +standard text editors. Barriers to documentation must be low. + +_`.req.presentation`: It must be possible to process documentation +into presentation formats, for example web pages. + +_`.req.single-source`: Documents must have a single source. Processing +into other formats must be automatic and not depend on hand editing or +maintaining parallel versions. + +_`.req.durable`: The format of documents should be supported for the +foreseeable future. It must not require continual updating to keep up +with changes to processing software. + +_`.req.design.ref`: It must be easy to reference points made in design +documents from the code. + +_`.req.design.standalone`: Design documents must stand alone: they +must not require particular software to make them readable or +complete. + + +Implementation +-------------- + +_`.impl.rst`: Documents are written in reStructuredText_ (RST). + +.. _reStructuredText: http://docutils.sourceforge.net/rst.html + +_`.impl.design`: Design documents are written in plain RST (with no +custom directives) to meet `.req.design.standalone`_. + +_`.impl.design.pelican`: Design documents are converted to HTML using +|RstReader|_ as part of `Charlotte +`__. + +.. |RstReader| replace:: ``pelican.readers.RstReader`` +.. _RstReader: https://fossies.org/dox/pelican-3.7.1/classpelican_1_1readers_1_1RstReader.html + +_`.impl.design.github`: Design documents are also `rendered as HTML by GitHub `__. + +_`.impl.manual`: The manual is written in RST using Sphinx_ extensions +and custom manual extensions (see `.ext`_). + +.. _Sphinx: https://www.sphinx-doc.org/en/master/ + +_`.impl.manual.sphinx`: The manual is converted to HTML using the +Sphinx_ documentation generator. + +_`.impl.manual.design`: Design documents are automatically processed +for inclusion in the manual using a set of formatting conventions (see +`.fmt`_). + + +Manual extensions +----------------- + +_`.ext`: These are reStructuredText directives and roles used by the +MPS manual. See manual/source/extensions/mps/__init__.py. + +_`.ext.aka`: The ``aka`` directive generates an "Also known as" +section. This should be used in a glossary entry, and should contain a +comma-separated, alphabetically ordered, list of glossary entries (in +italics) that are synonyms for this glossary entry. + +_`.ext.bibref`: The ``bibref`` directive generates a "Related +publication" or "Related publications" section. This should be used in +a glossary entry, and should contain a comma-separated, alphabetically +ordered, list of ``:ref:`` roles referring to entries in the +bibliography. + +_`.ext.deprecated`: The ``deprecated`` directive generates a +"Deprecated" section. It should be used in a description of a public +interface in the MPS Reference, and describe the first version in +which the interface was deprecated, and the interface that should be +used instead. There may be an initial "starting with version 1.115" +paragraph, but this is unnecessary if the directive is used in the +"Deprecated interfaces" chapter. + +_`.ext.historical`: The ``historical`` directive generates a +"Historical note" section. This should be used in a glossary entry, +and should contain material of historical interest, for example the +origin of the term, or ways in which it was formerly used. + +_`.ext.link`: The ``link`` directive generates a "Related link" or +"Related links" section. This should be used in a glossary entry, and +should contain a comma-separated list of references to URLs. + +_`.ext.note`: The ``note`` directive generates a "Note" or "Notes" +section. This should consist of a paragraph or a numbered list +containing especially important information about an interface that a +user should be aware of when using it. + +_`.ext.opposite`: The ``opposite`` directive generates an "Opposite +term" or "Opposite terms" section. This should be used in a glossary +entry, and should contain a comma-separated, alphabetically ordered, +list of ``:term:`` roles referring to glossary entries with opposite +meaning. + +_`.ext.relevance`: The ``relevance`` directive generates a "Relevance +to memory management" section. This should be used in a glossary +entry, and should contain an explanation of how the term relates to +memory management, if this is not obvious. + +_`.ext.see`: The ``see`` directive generates a "See" section. This +should be used in a glossary entry, and should contain a single +``:term:`` role referring to the entry for which the currente entry is +a synonym. + +_`.ext.seealso`: The ``seealso`` directive generates a "See also" +section. This should be used in a glossary entry, and should contain a +comma-separated, alphabetically ordered, list of ``:term:`` roles +referring to glossary entries that relate to the entry but are neither +synonyms for it (`.ext.aka`_), nor opposites (`.ext.opposite`_), nor +similar (`.ext.similar`_). + +_`.ext.similar`: The ``similar`` directive generates a "Similar term" +or "Similar terms" section. This should be used in a glossary entry, +and should contain a comma-separated, alphabetically ordered, list of +``:term:`` roles referring to glossary entries with similar meaning to +the entry but which are not synonyms for it (`.ext.aka`_). + +_`.ext.specific`: The ``mps:specific`` directive generates an "In the +MPS" section. This should be used in a glossary entry, and should +contain an explanation of how the glossary entry pertains to the MPS. +If the term is idiosyncratic to the MPS, for example "spare committed +memory" then the entire glossary entry should consist of a single +``mps:specific`` directive to make it clear that the term is not in +general use. + + +Design formatting conventions +----------------------------- + +_`.fmt`: This section lists formatting conventions used in the design +documentation that are used to generate extended markup when the +design document is converted for use in the MPS manual. See +manual/source/extensions/mps/designs.py. + +_`.fmt.function-decl`: A paragraph consisting of a function +declaration on a single line formatted as code, for example:: + + ``void LandFinish(Land land)`` + +is translated into a ``c:function`` directive:: + + .. c:function:: void LandFinish(Land land) + + +_`.fmt.macro-decl`: A paragraph consisting of a macro declaration on a +single line formatted as code, for example:: + + ``RING_FOR(node, ring, next)`` + +is translated into a ``c:macro`` directive:: + + .. c:macro:: RING_FOR(node, ring, next) + +_`.fmt.macro`: Macros are identified by having names consisting of +capital letters, numbers, and underscore, or appearing in the list of +exceptions given by the ``MACROS`` global in designs.py. + +_`.fmt.type-def`: A paragraph consisting of a type definition on a +single line formatted as code, for example:: + + ``typedef LandStruct *Land`` + +is translated into a ``c:type`` directive:: + + .. c:type:: LandStruct *Land + +_`.fmt.function-ref`: A word formatted as code and suffixed by ``()``, +for example:: + + This saves a separate call to ``LandDelete()``, and uses the + knowledge of exactly where we found the range. + +is translated into a ``:c:func:`` role:: + + This saves a separate call to :c:func:`LandDelete`, and uses the + knowledge of exactly where we found the range. + +_`.fmt.type-ref`: The name of an MPS type formatted as code, for +example:: + + The function must return a ``Bool`` indicating whether to continue + with the iteration. + +is translated into a ``:c:type:`` role:: + + The function must return a :c:type:`Bool` indicating whether to + continue with the iteration. + +The list of MPS types thus converted is given by the ``TYPES`` global +in designs.py, plus any word matching ``mps_[a-z_]+_[stu]``, plus any +word ending ``Class``, ``Function``, ``Method``, ``Struct``, or +``Union``. + +_`.fmt.tag`: A paragraph starting with an MPS tag, for example:: + + _`.type.land`: The type of a generic land instance. + +is translated into an ``:mps:tag:`` role:: + + :mps:tag:`type.land` The type of a generic land instance. + +_`.fmt.ref`: Cross-references to tags, for example:: + + A *node* is used in the typical data structure sense to mean an + element of a tree (see also `.type.tree`_). + +is translated into an ``:mps:ref:`` role:: + + A *node* is used in the typical data structure sense to mean an + element of a tree (see also :mps:ref:`.type.tree`). + +_`.fmt.history`: The section "Document History" is removed. + +_`.fmt.copyright`: The section "Copyright and License" is removed. + +_`.fmt.sections`: Section numbers are removed. + +_`.fmt.metadata`: Metadata roles are removed, except for: + +_`.fmt.metadata.tag`: ``:Tag:``, which is translated into an +``mps:prefix`` directive; and + +_`.fmt.metadata.index`: ``:Index Terms:``, which is is translated into +an ``index`` directive. + +_`.fmt.citation`: Citations are translated from design style:: + + [Citation] "Title"; Author; Date; . + +into manual style:: + + [Citation] Author. Date. "`Title `__". + +_`.fmt.link.relative`: Project-relative links must be specified using +`named hyperlink targets`_ whose targets start with ``../``, for +example:: + + ``#ifdefs``, such as in mps.c_. + + .. _mps.c: ../code/mps.c + +The target is adjusted to reflect the different location of the manual +sources relative to the design sources. + +.. _named hyperlink targets: https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#hyperlink-targets + + + +References +---------- + +.. [RB_2013-05-09] + "MPS design document format and process"; + Richard Brooksby; Ravenbrook Limited; 2013-05-09; + . + + +Document History +---------------- + +- 2018-09-18 GDR_ Created based on [RB_2013-05-09]_. +- 2023-02-15 RB_ Updating for migration to GitHub. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ +.. _RB: mailto:rb@ravenbrook.com + + +Copyright and License +--------------------- + +Copyright © 2018–2023 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/exec-env.txt b/mps/design/exec-env.txt new file mode 100644 index 00000000000..2550f3fbde8 --- /dev/null +++ b/mps/design/exec-env.txt @@ -0,0 +1,175 @@ +.. mode: -*- rst -*- + +Execution environment +===================== + +:Tag: design.mps.exec-env +:Author: Richard Brooksby +:Date: 1996-08-30 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: execution; environment + + +Introduction +------------ + +_`.intro`: This document describes how the MPS is designed to work in +different execution environments (see standard.ansic section 5.1.2). + + +Discussion +---------- + +_`.std`: These are the relevant statements from the International +Standard ISO/IEC 9899:1990 "Programming languages — C", with tags +added: + + 4. Compliance + + […] + + _`.std.com.hosted`: A "conforming hosted implementation" shall + accept any strictly conforming program. _`.std.com.free`: A + "conforming freestanding implementation" shall accept any strictly + conforming program in which the use of the features specified in + the library clause (clause 7) is confined to the contents of the + standard headers ````, ````, ````, + and ````. A conforming implementation may have + extensions (including additional library functions), provided they + do not alter the behaviour of any strictly conforming program. + + […] + + 5.1.2 Execution environments + + _`.std.def`: Two execution environments are defined: + "freestanding" and "hosted". […] + + _`.std.init`: All objects in static storage shall be "initialized" + (set to their initial values) before program startup. The manner + and timing of such initialization are otherwise unspecified. […] + + _`.std.term`: "Program termination" returns control to the execution + environment. […] + + 5.1.2.1 Freestanding environment + + _`.std.free.lib`: Any library facilities available to a + freestanding environment are implementation-defined. + + _`.std.free.term`: The effect of program termination in a + free-standing environment is implementation-defined. + + +Interpretation +-------------- + +_`.int.free`: We interpret the "freestanding environment" as being the +sort of environment you'd expect in an embedded system. The classic +example is a washing machine. There are no library facilities +available, only language facilities. + +_`.int.free.lib`: We assume that the headers ````, +````, ```` and ```` are available in the +freestanding environment, because they define only language features +and not library calls. We assume that we may not make use of +definitions in any other headers in freestanding parts of the system. + +_`.int.free.term`: We may not terminate the program in a freestanding +environment, and therefore we may not call ``abort()``. We can't call +``abort()`` anyway, because it's not defined in the headers listed +above (`.int.free.lib`_). + +_`.int.free.term.own`: We can add an interface for asserting, that is, +reporting an error and not returning, for use in debugging builds +only. This is because the environment can implement this in a way that +does not return to the MPS, but doesn't terminate, either. We need +this if debugging builds are to run in a (possibly simulated or +emulated) freestanding environment at all. + + +Requirements +------------ + +_`.req`: It should be possible to make use of the MPS in a +freestanding environment such as an embedded controller. + +_`.req.conf`: There can be configurations of the MPS that are not +freestanding (such as using a VM arena). + + +Architecture +------------ + +_`.arch`: Like Gaul, the MPS is divided into three parts: the *core*, +the *platform*, and the *plinth*. + +_`.arch.core`: The *core* consists of the Memory Pool Manager (the +core data structures and algorithms) and the built-in Pool Classes. +The core must be freestanding. + +_`.arch.platform`: The *platform* provides the core with interfaces to +features of the operating system and processor (locks, memory +protection, mutator context, stack probing, stack and register +scanning, thread management, and virtual memory). The platform is +specialized to a particular environment and so can safely use whatever +features are available in that environment. + +_`.arch.plinth`: The *plinth* provides the core with interfaces to +features of the user environment (time, assertions, and logging). See +design.mps.io_ and design.mps.lib_. + +.. _design.mps.io: io +.. _design.mps.lib: lib + +_`.arch.distinction`: The distinction between *plinth* and *platform* +is that end users will need to customize the features provided by the +plinth for most programs that use the MPS (and so the interface needs +to be simple, documented and supported), whereas implementing the +platform interface is a specialized task that will typically be done +once for each platform and then maintained alongside the core. + + +Document History +---------------- + +- 1996-08-30 RB_ Created to clarify concepts needed for + design.mps.io_. + +- 2015-02-06 GDR_ Converted to reStructuredText; bring the + architecture description up to date by describing the platform + interface. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 1996–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/failover.txt b/mps/design/failover.txt new file mode 100644 index 00000000000..b1d4c7591bf --- /dev/null +++ b/mps/design/failover.txt @@ -0,0 +1,133 @@ +.. mode: -*- rst -*- + +Fail-over allocator +=================== + +:Tag: design.mps.failover +:Author: Gareth Rees +:Date: 2014-04-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: fail-over allocator; design + + +Introduction +------------ + +_`.intro`: This is the design of the fail-over allocator, a data +structure for the management of address ranges. + +_`.readership`: This document is intended for any MPS developer. + +_`.source`: design.mps.land_, design.mps.poolmvt_, design.mps.poolmvff_. + +_`.overview`: The fail-over allocator combines two *land* instances. +It stores address ranges in one of the lands (the *primary*) unless +insertion fails, in which case it falls back to the other (the +*secondary*). The purpose is to be able to combine two lands with +different properties: with a CBS_ for the primary and a +Freelist_ for the secondary, operations are fast so long as there +is memory to allocate new nodes in the CBS_, but operations can +continue using the Freelist_ when memory is low. + +.. _CBS: cbs +.. _Freelist: freelist +.. _design.mps.land: land +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff + + +Interface +--------- + +_`.land`: The fail-over allocator is an implementation of the *land* +abstract data type, so the interface consists of the generic functions +for lands. See design.mps.land_. + + +Types +..... + +``typedef struct FailoverStruct *Failover`` + +_`.type.failover`: The type of fail-over allocator structures. A +``FailoverStruct`` is typically embedded in another structure. + + +Classes +....... + +_`.class`: ``CLASS(Failover)`` is the fail-over allocator class, a +subclass of ``CLASS(Land)`` suitable for passing to ``LandInit()``. + + +Keyword arguments +................. + +When initializing a fail-over allocator, ``LandInit()`` requires these +two keyword arguments: + +* ``FailoverPrimary`` (type ``Land``) is the primary land. + +* ``FailoverSecondary`` (type ``Land``) is the secondary land. + + +Implementation +-------------- + +_`.impl.assume`: The implementation assumes that the primary is fast +but space-hungry (a CBS_) and the secondary is slow but space-frugal +(a Freelist_). This assumption is used in the following places: + +_`.impl.assume.flush`: The fail-over allocator attempts to flush the +secondary to the primary before any operation, in order to benefit +from the speed of the primary wherever possible. In the normal case +where the secondary is empty this is cheap. + +_`.impl.assume.delete`: When deletion of a range on the primary fails +due to lack of memory, we assume that this can only happen when there +are splinters on both sides of the deleted range, one of which needs +to be allocated a new node (this is the case for CBS_), and that +therefore the following procedure will be effective: first, delete the +enclosing range from the primary (leaving no splinters and thus +requiring no allocation), and re-insert the splinters (failing over to +the secondary if necessary). + + + +Document History +---------------- + +- 2014-04-03 GDR_ Created. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/finalize.txt b/mps/design/finalize.txt new file mode 100644 index 00000000000..d7f6816f2c4 --- /dev/null +++ b/mps/design/finalize.txt @@ -0,0 +1,183 @@ +.. mode: -*- rst -*- + +Finalization +============ + +:Tag: design.mps.finalize +:Author: David Jones +:Date: 1997-02-14 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: finalization; design + + +Overview +-------- + +_`.overview`: Finalization is implemented internally using the MRG +pool class (design.mps.poolmrg_). Objects can be registered for +finalization by calling ``mps_finalize()``. Notification of +finalization is given to the client via the messaging interface +(design.mps.message_). The MRG pool class implements a ``Message`` +subclass which implements the finalization messages. + +.. _design.mps.poolmrg: poolmrg +.. _design.mps.message: message + + +Requirements +------------ + +_`.req`: Historically only Dylan had requirements for finalization, +see req.dylan.fun.final_. Now (2003-02-19) Configura have requirements +for finalization. Happily they are very similar. + +.. _req.dylan.fun.final: https://info.ravenbrook.com/project/mps/import/2001-09-27/mminfo/doc/req/dylan + + +Implementation +-------------- + +_`.impl.over`: Registering an object for finalization corresponds to +allocating a reference of rank FINAL to that object. This reference is +allocated in a guardian object in a pool belonging to the MRG pool +class (see design.mps.poolmrg_). + +.. _design.mps.poolmrg: poolmrg + +_`.impl.arena.struct`: A single pool belonging to the MRG pool class +and used for managing final references is kept in the arena and +referred to as the "final pool". + +_`.impl.arena.lazy`: The final pool is lazily created. It is not +created until the first object is registered for finalization. + +_`.impl.arena.flag`: There is a flag in the Arena that indicates +whether the final pool has been created yet or not. + +_`.impl.scan`: An object is determined to be finalizable if it is +fixed at rank FINAL for a trace, and was not fixed at any lower rank +for that trace. See design.mps.poolmrg.scan.wasold_. + +.. _design.mps.poolmrg.scan.wasold: poolmrg#.scan.wasold + +_`.impl.message`: When an object is determined to be finalizable, a +message for that object is posted to the arena's message queue. + +_`.impl.arena-destroy.empty`: ``ArenaDestroy()`` empties the message +queue by calling ``MessageEmpty()``. + +_`.impl.arena-destroy.final-pool`: If the final pool has been created +then ``ArenaDestroy()`` destroys the final pool. + +_`.impl.access`: ``mps_message_finalization_ref()`` needs to access +the finalization message to retrieve the reference and then write it +to where the client asks. This must be done carefully, in order to +avoid invalidating collection invariants such as the segment summary. + +_`.impl.invariants`: We protect the invariants by using +``ArenaRead()`` and ``ArenaWrite()`` to read and write the reference +via the software barrier. + + +External interface +------------------ + +_`.if.register`: ``mps_finalize()`` registers an object for +finalization. + +_`.if.deregister`: ``mps_definalize()`` deregisters an object for +finalization. It is an error to definalize an object that has not been +registered for finalization. + +_`.if.get-ref`: ``mps_message_finalization_ref()`` returns the reference +to the finalized object stored in the finalization message. + +_`.if.multiple`: The external interface allows an object to be +registered multiple times, but does not specify the number of +finalization messages that will be posted for that object. + + +Internal interface +------------------ + +``Res ArenaFinalize(Arena arena, Ref addr)`` + +_`.int.finalize.create`: Creates the final pool if it has not been +created yet. + +_`.int.finalize.alloc`: Allocates a guardian in the final pool. + +_`.int.finalize.alloc.multiple`: A consequence of this implementation +is that if an object is finalized multiple times, then multiple +guardians are created in the final pool, and so multiple messages will +be posted to the message queue when the object is determined to be +finalizable. But this behaviour is not guaranteed by the +documentation, leaving us free to change the implementation. + +_`.int.finalize.write`: Writes a reference to the object into the +guardian object. + +_`.int.finalize.all`: That's all. + +_`.int.finalize.error`: If either the creation of the pool or the +allocation of the object fails then the error is returned to the +caller. + +_`.int.finalize.error.no-unwind`: This function does not need to do +any unwinding in the error cases because the creation of the pool is +not something that needs to be undone. + +``Res ArenaDefinalize(Arena arena, Ref obj)`` + +_`.int.definalize.fail`: If the final pool has not been created, +return ``ResFAIL`` immediately. + +_`.int.definalize.search`: Otherwise, search for a guardian in the +final pool that refers to the object and which has not yet been +finalized. If one is found, delete it and return ``ResOK``. Otherwise +no guardians in the final pool refer to the object, so return +``ResFAIL``. + + +Document History +---------------- + +- 1997-02-14 David Jones. Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-13 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/fix.txt b/mps/design/fix.txt new file mode 100644 index 00000000000..5809824157c --- /dev/null +++ b/mps/design/fix.txt @@ -0,0 +1,98 @@ +.. mode: -*- rst -*- + +The generic fix function +======================== + +:Tag: design.mps.fix +:Author: Richard Brooksby +:Date: 1995-08-25 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: fix function; design + + +Introduction +------------- + +_`.intro`: Fix is the interface through which the existence of +references are communicated from the MPS client to the MPS. The +interface also allows the value of such references to be changed (this +is necessary in order to implement a moving memory manager). + + +Was-marked protocol +------------------- + +_`.was-marked`: The ``ScanState`` has a ``Bool wasMarked`` +field. This is used for finalization. + +_`.was-marked.not`: If a segment's fix method discovers that the +object referred to by the ref (the one that it is supposed to be +fixing) has not previously been marked (that is, this is the first +reference to this object that has been fixed), and that the object was +white (that is, in condemned space), it should (but need not) set the +``wasMarked`` field to ``FALSE`` in the passed ``ScanState``. + +_`.was-marked.otherwise`: Otherwise, the fix method must +leave the ``wasMarked`` field unchanged. + +_`.was-marked.finalizable`: The MRG pool (design.mps.poolmrg_) +uses the value of the ``wasMarked`` field to determine whether an +object is finalizable. + +.. _design.mps.poolmrg: poolmrg + + +Implementation +--------------- + +_`.fix.nailed`: In a copying collection, a non-ambiguous fix to a +broken heart should be snapped out *even if* there is a ``RankAMBIG`` +ref to same object (that is, if the broken heart is nailed); the +``RankAMBIG`` reference must either be stale (no longer in existence) +or bogus. + + +Document History +---------------- + +- 1995-08-25 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-14 GDR_ Converted to reStructuredText. + +- 2018-06-18 GDR_ Simplify the ``wasMarked`` protocol. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/freelist.txt b/mps/design/freelist.txt new file mode 100644 index 00000000000..efb510cc28d --- /dev/null +++ b/mps/design/freelist.txt @@ -0,0 +1,194 @@ +.. mode: -*- rst -*- + +Free list allocator +=================== + +:Tag: design.mps.freelist +:Author: Gareth Rees +:Date: 2013-05-18 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: free list allocator; design + + +Introduction +------------ + +_`.intro`: This is the design of the free list allocator. + +_`.readership`: Any MPS developer. + + +Overview +-------- + +_`.overview`: The free list allocator is an "emergency" allocator. It +is intended for use as a fallback allocation strategy in low memory +situations, when memory is not available for the control structures +needed by other allocators. In these situations the free list allocator +ensures that memory is not lost, but with several disadvantages: + +#. operations on the free list take time proportional to the number of + free blocks; +#. the data structures are stored in client memory and so are + vulnerable to corruption; +#. the data structures have poor locality (and thus potentially poor + cache performance). + +When memory becomes available again to allocate control structures, +the free lists can be "flushed" back into the more efficient data +structures. + + +Requirements +------------ + +In addition to the generic land requirements (see design.mps.land_), +free lists must satisfy: + +.. _design.mps.land: land + +_`.req.zero-overhead`: Must have zero space overhead for the storage +of any set of free blocks, so that it can be used to manage memory +when no memory can be allocated for control structures. + + +Interface +--------- + +_`.land`: Free lists are an implementation of the *land* abstract data +type, so the interface consists of the generic functions for lands. +See design.mps.land_. + + +Types +..... + +``typedef struct FreelistStruct *Freelist`` + +_`.type.freelist`: The type of free lists. A ``FreelistStruct`` is +typically embedded in another structure. + + +Classes +....... + +_`.class`: ``CLASS(Freelist)`` is the free list class, a subclass of +``CLASS(Land)`` suitable for passing to ``LandInit()``. + + +Keyword arguments +................. + +When initializing a free list, ``LandInit()`` takes no keyword +arguments. Pass ``mps_args_none``. + + +Implementation +-------------- + +_`.impl.list`: The isolated contiguous free address ranges are kept on +an address-ordered singly linked free list. (As in traditional +``malloc()`` implementations.) + +_`.impl.block`: If the free address range is large enough to contain +an inline block descriptor consisting of two pointers, then the two +pointers stored are to the next free range in address order (or +``freelistEND`` if there are no more ranges), and to the limit of the +current free address range, in that order. + +_`.impl.grain`: Otherwise, the free address range must be large enough +to contain a single pointer. The pointer stored is to the next free +range in address order, or ``freelistEND`` if there are no more +ranges. + +_`.impl.tag`: Grains and blocks are distinguished by a one-bit tag in +the low bit of the first word (the one containing the pointer to the +next range). Grains have this bit set; blocks have this bit reset. + +_`.impl.invariant`: The ranges stored in the free list are *isolated*: +no two ranges are adjacent or overlapping. + +_`.impl.merge`: When a free address range is added to the free list, +it is merged with adjacent ranges so as to maintain +`.impl.invariant`_. + +_`.impl.rule.break`: The use of ``freelistEND`` to mark the end of the +list violates the rule that exceptional values should not be used to +distinguish exceptional situations. This infraction allows the +implementation to meet `.req.zero-overhead`_. (There are other ways to +do this, such as using another tag to indicate the last block in the +list, but these would be more complicated.) + + +Testing +------- + +_`.test`: The following testing will be performed on this module: + +_`.test.land`: A generic test for land implementations. See +design.mps.land.test_. + +.. _design.mps.land.test: land#.test + +_`.test.pool`: Two pools (MVT_ and MVFF_) use free lists as a fallback +when low on memory. These are subject to testing in development, QA, +and are heavily exercised by customers. + +.. _MVT: poolmvt +.. _MVFF: poolmvff + + + +Opportunities for improvement +----------------------------- + +_`.improve.length`: When iterating over the list, we could check that +the number of elements visited in the course of the iteration does not +exceed the recorded size of the list. + +_`.improve.maxsize`: We could maintain the maximum size of any range +on the list, and use that to make an early exit from +``freelistFindLargest()``. It's not clear that this would actually be +an improvement. + + + +Document History +---------------- + +- 2013-05-18 GDR_ Initial draft based on CBS "emergency block" design. + +- 2014-04-01 GDR_ Moved generic material to design.mps.land_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/guide.developer.txt b/mps/design/guide.developer.txt new file mode 100644 index 00000000000..b9407ace7ca --- /dev/null +++ b/mps/design/guide.developer.txt @@ -0,0 +1,88 @@ +.. mode: -*- rst -*- + +New developer guide +=================== + +:Tag: guide.developer +:Status: draft documentation +:Author: Gareth Rees +:Organization: Ravenbrook Limited +:Date: 2018-09-18 +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: developer; guide + + +Introduction +------------ + +_`.intro`: This is an introduction to the Memory Pool System (MPS) for +new developers. + +_`.source`: This is based on [APT_2018-09-17]_. + + +What to read first +------------------ + +* `manual/build.txt`_ -- how to build the MPS. +* `manual/guide`_ -- tutorial for the public interface. +* `manual/topic`_ -- reference manual for the public interface. +* `manual/code-index`_ -- description and purpose of each file of source code. +* design.mps.config_ -- build configuration. +* design.mps.tests_ -- how to run test cases. +* design.mps.doc_ -- how to write and edit documentation. + +.. _manual/build.txt: https://www.ravenbrook.com/project/mps/master/manual/build.txt +.. _manual/guide: https://www.ravenbrook.com/project/mps/master/manual/html/guide/index.html +.. _manual/topic: https://www.ravenbrook.com/project/mps/master/manual/html/topic/index.html +.. _manual/code-index: https://www.ravenbrook.com/project/mps/master/manual/html/code-index.html +.. _design.mps.config: config +.. _design.mps.tests: tests +.. _design.mps.doc: doc + + +References +---------- + +.. [APT_2018-09-17] + "Procedure for new developers"; + Alistair Turnbull; Ravenbrook Limited; 2018-09-17; + + + +Document History +---------------- + +- 2018-09-18 GDR_ Created based on [APT_2018-09-17]_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2015–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/guide.hex.trans.txt b/mps/design/guide.hex.trans.txt new file mode 100644 index 00000000000..411bf37aaaf --- /dev/null +++ b/mps/design/guide.hex.trans.txt @@ -0,0 +1,170 @@ +.. mode: -*- rst -*- + +Transliterating the alphabet into hexadecimal +============================================= + +:Tag: guide.hex.trans +:Status: incomplete documentation +:Author: Gavin Matthews +:Organization: Harlequin +:Date: 1997-04-11 +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: hexadecimal; transliterating + + +Introduction +------------ + +_`.scope`: This document explains how to represent the alphabet as +hexadecimal digits. + +_`.readership`: This document is intended for anyone devising +arbitrary constants which may appear in hex-dumps. + +_`.sources`: This transliteration was supplied by Richard Kistruck +[RHSK-1997-04-07]_ based on magic number encodings for object signatures +used by Richard Brooksby [RB-1996-02-12]_, the existence of which was +inspired by the structure marking used in the Multics operating system +[THVV-1995]_. + + +Transliteration +--------------- + +_`.forward`: The chosen transliteration is as follows:: + + ABCDEFGHIJKLMNOPQRSTUVWXYZ + ABCDEF9811C7340BC6520F3812 + +_`.backward`: The backwards transliteration is as follows:: + + 0 OU + 1 IJY + 2 TZ + 3 MW + 4 N + 5 S + 6 R + 7 L + 8 HX + 9 G + A A + B BP + C CKQ + D D + E E + F FV + +_`.pad`: If padding is required (to fill a hex constant length), you +should use 9's, because G is rare and can usually be inferred from +context. + +_`.punc`: There is no formal scheme for spaces, or punctuation. It is +suggested that you use 9 (as `.pad`_). + + +Justification +-------------- + +_`.letters`: The hexadecimal letters (A-F) are all formed by +similarity of sound. B and P sound similar, as do F and V, and C, K, & +Q can all sound similar. + +_`.numbers`: The numbers (0-9) are all formed by similarity of shape +(but see `.trans.t`_). Nevertheless, 1=IJY retains some similarity of +sound. + +_`.trans.t`: T is an exception to `.numbers`_, but is such a common +letter that it deserves it. + + +Notes +----- + +_`.change`: This transliteration differs from the old transliteration +used for signatures (see design.mps.sig_), as follows: J:6->1; +L:1->7; N:9->4; R:4->6; W:8->3; X:5->8; Y:E->I. + +.. _design.mps.sig: sig + +_`.problem.mw`: There is a known problem that M and W are both common, +map to the same digit (3), and are hard to distinguish in context. + +_`.find.c`: It is possible to find all 8-digit hexadecimal constants +and how many times they're used in C files, using the following Perl +script:: + + perl5 -n -e 'BEGIN { %C=(); } if(/0x([0-9A-Fa-f]{8})/) { $C{$1} = +[] if( + !defined($C{$1})); push(@{$C{$1}}, $ARGV); } END { foreach $H (sort(keys(%C))) + { printf "%3d %s %s\n", scalar(@{$C{$H}}), $H, join(", ", @{@C{$H}}); } }' *.c + *.h + +_`.comment`: It is a good idea to add a comment to any constant +declaration indicating the English version and which letters were +selected (by capitalisation), e.g.:: + + #define SpaceSig ((Sig)0x5195BACE) /* SIGnature SPACE */ + + +References +---------- + +.. [RB-1996-02-12] + "Signature magic numbers" (e-mail message); + `Richard Brooksby`_; + Harlequin; + 1996-12-02 12:05:30Z. + +.. _`Richard Brooksby`: mailto:rb@ravenbrook.com + +.. [RHSK-1997-04-07] + "Alpha-to-Hex v1.0 beta"; + Richard Kistruck; + Ravenbrook; + 1997-04-07 14:42:02+0100; + . + +.. [THVV-1995] + "Structure Marking"; + Tom Van Vleck; + multicians.org_; + . + +.. _multicians.org: http://www.multicians.org/ + + +Document History +---------------- +2013-05-10 RB_ Converted to reStructuredText and imported to MPS design. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/guide.impl.c.format.txt b/mps/design/guide.impl.c.format.txt new file mode 100644 index 00000000000..07e317f66d5 --- /dev/null +++ b/mps/design/guide.impl.c.format.txt @@ -0,0 +1,405 @@ +.. mode: -*- rst -*- + +C Style -- formatting +===================== + +:Tag: guide.impl.c.format +:Author: Richard Brooksby +:Date: 1995-08-07 +:Status: complete guide +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: C language; formatting guide + pair: C language formatting; guide + + +Introduction +------------ + +_`.scope`: This document describes the Ravenbrook conventions for the +general format of C source code in the MPS. + +_`.readership`: This document is intended for anyone working on or with the +C source code. + + +General formatting conventions +------------------------------ + +Line width +.......... + +_`.width`: Lines should be no wider than 72 characters. _`.width.why`: Many +people use 80 column terminal windows so that multiple windows can be +placed side by side. Restricting lines to 72 characters allows line +numbering to be used (in vi for example) and also allows diffs to be +displayed without overflowing the terminal. + +White space +........... + +_`.space.notab`: No tab characters should appear in the source files. +Ordinary spaces should be used to indent and format the sources. + +_`.space.notab.why`: Tab characters are displayed differently on different +platforms, and sometimes translated back and forth, destroying layout +information. + +_`.space.punct`: There should always be whitespace after commas and +semicolons and similar punctuation. + +_`.space.op`: Put white space around operators in expressions, except when +removing it would make the expression clearer by binding certain +sub-expressions more tightly. For example:: + + foo = x + y*z; + +_`.space.control`: One space between a control-flow keyword +(``switch``, ``while``, ``for``, ``if``) and the following opening +parenthesis. + +_`.space.control.why`: This distinguishes control statements lexically +from function calls, making it easier to distinguish them visually and +when searching with tools like ``grep``. + +_`.space.function.not`: No space between a function name and the opening +parenthesis beginning its argument list. + +Sections and paragraphs +....................... + +_`.section`: Source files can be thought of as breaking down into +"sections" and "paragraphs". A section might be the leader comment of a +file, the imports, or a set of declarations which are related. + +_`.section.space`: Precede sections by two blank lines (except the first +one in the file, which should be the leader comment in any case). + +_`.section.comment`: Each section should start with a banner comment (see +`.comment.banner`_) describing what the section contains. + +_`.para`: Within sections, code often breaks down into natural units called +"paragraphs". A paragraph might be a set of strongly related +declarations (Init and Finish, for example), or a few lines of code +which it makes sense to consider together (the assignment of fields into +a structure, for example). + +_`.para.space`: Precede paragraphs by a single blank line. + +Statements +.......... + +_`.statement.one`: Generally only have at most one statement per line. In +particular the following are deprecated:: + + if (thing) return; + + a=0; b=0; + + case 0: f = inRampMode ? AMCGen0RampmodeFrequency : AMCGen0Frequency; + +_`.statement.one.why`: Debuggers can often only place breakpoints on lines, +not expressions or statements within a line. The ``if (thing) return;`` is +a particularly important case, if thing is a reasonably rare return +condition then you might want to breakpoint it in a debugger session. +Annoying because ``if (thing) return;`` is quite compact and pleasing +otherwise. + +Indentation +........... + +_`.indent`: Indent the body of a block by two spaces. For formatting +purposes, the "body of a block" means: + +- statements between braces, +- a single statement following a lone ``if``; +- statements in a switch body; see .switch. + +(_`.indent.logical`: The aim is to group what we think of as logical +blocks, even though they may not exactly match how "block" is used in +the definition of C syntax). + +Some examples:: + + if (res != ResOK) { + SegFinish(&span->segStruct); + PoolFreeP(MV->spanPool, span, sizeof(SpanStruct)); + return res; + } + + if (res != ResOK) + goto error; + + if (j == block->base) { + if (j+step == block->limit) { + if (block->thing) + putc('@', stream); + } + } else if (j+step == block->limit) { + putc(']', stream); + pop_bracket(); + } else { + putc('.', stream); + } + + switch (c) { + case 'A': + c = 'A'; + p += 1; + break; + } + +_`.indent.goto-label`: Place each goto-label on a line of its own, +outdented to the same level as the surrounding block. Then indent the +non-label part of the statement normally. :: + + result foo(void) + { + statement(); + if (error) + goto foo; + statement(); + return OK; + + foo: + unwind(); + return ERROR; + } + +_`.indent.case-label`: Outdent case- and default-labels in a switch +statement in the same way as `.indent.goto-label`_. See `.switch`_. + +_`.indent.cont`: If an expression or statement won't fit on a single line, +indent the continuation lines by two spaces, apart from the following +exception: + +_`.indent.cont.parens`: if you break a statement inside a parameter list or +other parenthesized expression, indent so that the continuation lines up +just after the open parenthesis. For example:: + + res = ChunkInit(chunk, arena, alignedBase, + AddrAlignDown(limit, ArenaGrainSize(arena)), + AddrOffset(base, limit), boot); + +_`.indent.cont.expr`: Note that when breaking an expression it is clearer +to place the operator at the start of the continuation line:: + + CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages)) + <= PageIndexBase(chunk, chunk->allocBase)); + +This is particularly useful in long conditional expressions that use && +and ||. For example:: + + if (BufferRankSet(buffer) != RankSetEMPTY + && (buffer->mode & BufferModeFLIPPED) == 0 + && !BufferIsReset(buffer)) + +_`.indent.hint`: Usually, it is possible to determine the correct +indentation for a line by looking to see if the previous line ends with +a semicolon. If it does, indent to the same amount, otherwise indent by +two more spaces. The main exceptions are lines starting with a close +brace, goto-labels, and line-breaks between parentheses. + +Positioning of braces +..................... + +_`.brace.otb`: Use the "One True Brace" (or OTB) style. This places the +open brace after the control word or expression, separated by a space, +and when there is an else, places that after the close brace. For +example:: + + if (buffer->mode & BufferModeFLIPPED) { + return buffer->initAtFlip; + } else { + return buffer->ap_s.init; + } + +The same applies to ``struct``, ``enum``, and ``union``. + +_`.brace.otb.function.not`: OTB is never used for function definitions. + +_`.brace.always`: Braces are always required after ``if``, ``else``, ``switch``, +``while``, ``do``, and ``for``. + +_`.brace.always.except`: Except that a lone ``if`` with no ``else`` is allowed +to drop its braces when its body is a single simple statement. Typically +this will be a ``goto`` or an assignment. For example:: + + if (res != ResOK) + goto failStart; + +Note in particular that an ``if`` with an ``else`` must have braces on both +paths. + +Switch statements +................. + +_`.switch`: format switch statements like this:: + + switch (SplaySplay(splay, oldKey, splay->compare)) { + default: + NOTREACHED; + /* fall through */ + case CompareLESS: + return SplayTreeRoot(splay); + + case CompareGREATER: + case CompareEQUAL: + return SplayTreeSuccessor(splay); + } + +The component rules that result in this style are: + +_`.switch.break`: The last line of every case-clause body must be an +unconditional jump statement (usually ``break``, but may be ``goto``, +``continue``, or ``return``), or if a fall-through is intended, the +comment ``/* fall through */``. (Note: if the unconditional jump +should never be taken, because of previous conditional jumps, use +``NOTREACHED`` on the line before it.) This rule is to prevent +accidental fall-throughs, even if someone makes a editing mistake that +causes a conditional jump to be missed. This rule is automatically +checked by GCC and Clang with the ``-Wimplicit-fallthrough`` option. + +_`.switch.default`: It is usually a good idea to have a +default-clause, even if all it contains is ``NOTREACHED`` and +``break`` or ``/* fall through */``. Remember that ``NOTREACHED`` +doesn't stop the process in all build varieties. + + +Comments +........ + +_`.comment`: There are three types of comments: banners, paragraph +comments, and column comments. + +_`.comment.banner`: Banner comments come at the start of sections. A banner +comment consists of a heading usually composed of a symbol, an em-dash +(--) and a short explanation, followed by English text which is +formatted using conventional text documentation guidelines (see +guide.text). The open and close comment tokens (``/*`` and ``*/``) are +placed at the top and bottom of a column of asterisks. The text is +separated from the asterisks by one space. Place a blank line between +the banner comment and the section it comments. For example:: + + /* BlockStruct -- Block descriptor + * + * The pool maintains a descriptor structure for each + * contiguous allocated block of memory it manages. + * The descriptor is on a simple linked-list of such + * descriptors, which is in ascending order of address. + */ + + typedef struct BlockStruct { + +_`.comment.para`: Paragraph comments come at the start of paragraphs +in the code. A paragraph comment consists of formatted English text. +For example:: + + /* If the freed area is in the base sentinel then insert + the new descriptor after it, otherwise insert before. */ + if (isBase) { + +_`.comment.para.precede`: Paragraph comments, even one-liners, precede the +code to which they apply. + +_`.comment.column`: Column comments appear in a column to the right of +the code. They should be used sparingly, since they clutter the code and +make it hard to edit. Use them on variable declarations and structure, +union, or enum declarations. They should start at least at column 32 +(counting from 0, that is, on a tab-stop), and should be terse +descriptive text. Abandon English sentence structure if this makes the +comment clearer. Don't write more than one line. Here's an example:: + + typedef struct MVFFStruct { /* MVFF pool outer structure */ + PoolStruct poolStruct; /* generic structure */ + LocusPrefStruct locusPrefStruct; /* the preferences for allocation */ + Size extendBy; /* size to extend pool by */ + Size avgSize; /* client estimate of allocation size */ + double spare; /* spare space fraction, see MVFFReduce */ + MFSStruct cbsBlockPoolStruct; /* stores blocks for CBSs */ + CBSStruct totalCBSStruct; /* all memory allocated from the arena */ + CBSStruct freeCBSStruct; /* free memory (primary) */ + FreelistStruct flStruct; /* free memory (secondary, for emergencies) */ + FailoverStruct foStruct; /* free memory (fail-over mechanism) */ + Bool firstFit; /* as opposed to last fit */ + Bool slotHigh; /* prefers high part of large block */ + Sig sig; /* */ + } MVFFStruct; + + +Macros +...... + +_`.macro.careful`: Macros in C are a real horror bag, be extra careful. +There's lots that could go here, but proper coverage probably deserves a +separate document. Which isn't written yet. + +_`.macro.general`: Do try and follow the other formatting conventions for +code in macro definitions. + +_`.macro.backslash`: Backslashes used for continuation lines in macro +definitions should be put on the right somewhere where they will be less +in the way. Example:: + + #define RAMP_RELATION(X) \ + X(RampOUTSIDE, "outside ramp") \ + X(RampBEGIN, "begin ramp") \ + X(RampRAMPING, "ramping") \ + X(RampFINISH, "finish ramp") \ + X(RampCOLLECTING, "collecting ramp") + + +Document History +---------------- + +- 2007-06-04 DRJ_ Adopted from Harlequin MMinfo version and edited. + +- 2007-06-04 DRJ_ Changed .width from 80 to 72. Banned space between + ``if`` and ``(``. Required braces on almost everything. Clarified that + paragraph comments precede the code. + +- 2007-06-13 RHSK_ Removed .brace.block, because MPS source always + uses .brace.otb. Remove .indent.elseif because it is obvious (ahem) and + showing an example is sufficient. New rules for .switch.*: current MPS + practice is a mess, so lay down a neat new law. + +- 2007-06-27 RHSK_ Added `.space.function.not`_. + +- 2007-07-17 DRJ_ Added .macro.\* + +- 2012-09-26 RB_ Converted to Markdown and reversed inconsistent + switch "law". + +.. _DRJ: https://www.ravenbrook.com/consultants/drj +.. _RHSK: https://www.ravenbrook.com/consultants/rhsk +.. _RB: https://www.ravenbrook.com/consultants/rb + + +Copyright and License +--------------------- + +Copyright © 2002–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/guide.impl.c.naming.txt b/mps/design/guide.impl.c.naming.txt new file mode 100644 index 00000000000..12cf264ff07 --- /dev/null +++ b/mps/design/guide.impl.c.naming.txt @@ -0,0 +1,128 @@ +.. mode: -*- rst -*- + +C Style -- naming +================= + +:Tag: guide.impl.c.naming +:Author: Gareth Rees +:Date: 2014-10-07 +:Status: incomplete guide +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: C language; naming guide + pair: C language naming; guide + + +Introduction +------------ + +_`.scope`: This document describes the conventions for naming in C +source code that's internal in the MPS. See design.mps.interface-c_ +for the corresponding conventions for the public interface. + +.. _design.mps.interface-c: interface-c + +_`.readership`: This document is intended for anyone working on or +with the C source code. + + +Capitalization +-------------- + +_`.capital.macro`: Statement-like macros have names consisting of +uppercase words separated by underscores, for example +``ARG_DEFINE_KEY``. + +_`.capital.constant`: Constants have names consisting of a type (named +according to `.capital.program`_ or `.capital.other`_), concatenated +with an identifier in uppercase with underscores, for example +``BufferFramePOP_PENDING``. + +_`.capital.program`: Other names with program scope consist of +concatenated title-case words, for example ``BufferFramePush``. + +_`.capital.other`: Other names (including function parameters, names +with block scope, and names with file scope) consist of concatenated +words, the first of which is lowercase and the remainder are +uppercase. For example, ``poolReturn``. + + +Prefixes +-------- + +_`.prefix.program`: Any name with program scope must start with the +name of the module to which it belongs. For example, names belonging +to the buffer module must start with ``buffer`` or ``Buffer`` or +``BUFFER``. Justification: the C language lacks a namespace facility +so the only way to avoid name clashes is for each name to be globally +unique. + +_`.prefix.file`: Any name with file scope should start with the name +of the module to which it belongs. Justification: makes it easy to +tell which module a function belongs to; makes it easy to set +breakpoints in the debugger. + + +Suffixes +-------- + +_`.suffix.struct`: The type of a structure must be the same as the +structure tag, and must consist of the type of the pointer to the +structure concatenated with ``Struct``. For example, ``ArenaStruct``. + +_`.suffix.union`: The type of a union must be the same as the union +tag, and must consist of the type of the pointer to the union +concatenated with ``Union``. For example, ``PageUnion``. + +_`.suffix.class`: The type of a class (see design.mps.protocol_) +must end with ``Class``. For example, ``ArenaClass``. + +.. _design.mps.protocol: protocol + +_`.suffix.method`: The type of a method in a class must end with +``Method``. For example, ``PoolFixMethod``. + +_`.suffix.visitor`: The type of a visitor function must end with +``Visitor``. For example, ``TreeVisitor``. + +_`.suffix.function`: The type of other functions must end with +``Function``. For example, ``TreeKeyFunction``. + + +Document History +---------------- + +- 2014-10-07 GDR_ Created based on job003693_. + +.. _job003693: https://www.ravenbrook.com/project/mps/issue/job003693/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr + + +Copyright and License +--------------------- + +Copyright © 2002–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/guide.review.txt b/mps/design/guide.review.txt new file mode 100644 index 00000000000..27655838aa7 --- /dev/null +++ b/mps/design/guide.review.txt @@ -0,0 +1,82 @@ +.. mode: -*- rst -*- + +Review checklist +================ + +:Tag: guide.review +:Status: incomplete documentation +:Author: Gareth Rees +:Organization: Ravenbrook Limited +:Date: 2015-08-10 +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: review; checklist + + +Introduction +------------ + +_`.scope`: This document contains a list of checks to apply when +reviewing code or other documents in the Memory Pool System. + +_`.readership`: This document is intended for reviewers. + +_`.example`: The "example" links are issues caused by a failure to +apply the checklist item. + +_`.diff`: Some items in the checklist are particularly susceptible to +being ignored if one reviews only via the version control diff. These +items refer to this tag. + + +Checklist +--------- + +_`.test`: If a new feature has been added to the code, is there a test +case? Example: job003923_. + +.. _job003923: https://www.ravenbrook.com/project/mps/issue/job003923/ + +_`.unwind`: If code has been updated in a function that unwinds its +state in failure cases, have the failure cases been updated to +correspond? Example: job003922_. See `.diff`_. + +.. _job003922: https://www.ravenbrook.com/project/mps/issue/job003922/ + + + +Document History +---------------- + +2015-08-10 GDR_ Created. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2015–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/index.txt b/mps/design/index.txt new file mode 100644 index 00000000000..fcd59cf5873 --- /dev/null +++ b/mps/design/index.txt @@ -0,0 +1,265 @@ +.. mode: -*- rst -*- + +MPS Design Documents +==================== +:Author: Richard Brooksby +:Organization: Ravenbrook Limited +:Date: 2002-05-23 +:Revision: $Id$ +:Readership: developers +:Confidentiality: public +:Type: index +:Copyright: See `Copyright and License`_. + + +Introduction +------------ + +This is the catalogue of design documents of the Memory Pool System +product. + +.. warning:: + + This is not a complete set of design documents for the MPS. We have + many hundreds of documents from before the MPS was acquired by + Ravenbrook on 2001-08-10, many of which contain confidential + information [RB_2002-06-18]_. We are sorting through these and will + include more as time goes on. We have tried to select the key + documents for inclusion in the open source release, by including + those documents referenced by the source code. + + If there is a document you think might exist and want to see it, + please write to mps-questions@ravenbrook.com and we will try to dig + it up. + + +Designs +------- + +.. class:: index + +====================== ================================================ +abq_ Fixed-length queues +alloc-frame_ Allocation frame protocol +an_ Generic modules +arena_ Arena +arenavm_ Virtual memory arena +bootstrap_ Bootstrapping +bt_ Bit tables +buffer_ Allocation buffers and allocation points +cbs_ Coalescing block structures +check_ Checking +clock_ Fast high-resolution clock +collection_ Collection framework +config_ MPS configuration +critical-path_ The critical path through the MPS +diag_ Diagnostic feedback +doc_ Documentation +exec-env_ Execution environment +failover_ Fail-over allocator +finalize_ Finalization +fix_ The generic fix function +freelist_ Free list allocator +guide.developer_ Guide for new developers +guide.hex.trans_ Transliterating the alphabet into hexadecimal +guide.impl.c.format_ Coding standard: conventions for the general format of C source code in the MPS +guide.impl.c.naming_ Coding standard: conventions for internal names +guide.review_ Review checklist +interface-c_ C interface +io_ I/O subsystem +keyword-arguments_ Keyword arguments +land_ Lands (collections of address ranges) +lib_ Library interface +lock_ Lock module +locus_ Locus manager +message_ Client message protocol +message-gc_ GC messages +monitor_ Monitor +nailboard_ Nailboards for ambiguously referenced segments +object-debug_ Debugging features for client objects +pool_ Pool classes +poolamc_ Automatic Mostly-Copying pool class +poolams_ Automatic Mark-and-Sweep pool class +poolawl_ Automatic Weak Linked pool class +poollo_ Leaf Object pool class +poolmfs_ Manual Fixed Small pool class +poolmrg_ Manual Rank Guardian pool class +poolmvt_ Manual Variable Temporal pool class +poolmvff_ Manual Variable First-Fit pool class +prmc_ Mutator context +prot_ Memory protection +protix_ POSIX implementation of protection module +protocol_ Protocol inheritance +pthreadext_ POSIX thread extensions +range_ Ranges of addresses +ring_ Ring data structure +root_ Root manager +scan_ The generic scanner +seg_ Segment data structure +shield_ Shield +sig_ Signatures in the MPS +sp_ Stack probe +splay_ Splay trees +stack-scan_ Stack and register scanning +strategy_ Collection strategy +telemetry_ Telemetry +tests_ Tests +testthr_ Multi-threaded testing +thread-manager_ Thread manager +thread-safety_ Thread safety in the MPS +trace_ Tracer +transform_ Transforms +type_ General MPS types +version-library_ Library version mechanism +vm_ Virtual mapping +walk_ Walking formatted objects +write-barrier_ Write Barrier +writef_ The WriteF function +====================== ================================================ + +.. _abq: abq +.. _alloc-frame: alloc-frame +.. _an: an +.. _arena: arena +.. _arenavm: arenavm +.. _bootstrap: bootstrap +.. _bt: bt +.. _buffer: buffer +.. _cbs: cbs +.. _check: check +.. _clock: clock +.. _collection: collection +.. _config: config +.. _critical-path: critical-path +.. _diag: diag +.. _doc: doc +.. _exec-env: exec-env +.. _failover: failover +.. _finalize: finalize +.. _fix: fix +.. _freelist: freelist +.. _guide.developer: guide.developer +.. _guide.hex.trans: guide.hex.trans +.. _guide.impl.c.format: guide.impl.c.format +.. _guide.impl.c.naming: guide.impl.c.naming +.. _guide.review: guide.review +.. _interface-c: interface-c +.. _io: io +.. _keyword-arguments: keyword-arguments +.. _land: land +.. _lib: lib +.. _lock: lock +.. _locus: locus +.. _message: message +.. _message-gc: message-gc +.. _monitor: monitor +.. _nailboard: nailboard +.. _object-debug: object-debug +.. _pool: pool +.. _poolamc: poolamc +.. _poolams: poolams +.. _poolawl: poolawl +.. _poollo: poollo +.. _poolmfs: poolmfs +.. _poolmrg: poolmrg +.. _poolmvt: poolmvt +.. _poolmvff: poolmvff +.. _prmc: prmc +.. _prot: prot +.. _protix: protix +.. _protocol: protocol +.. _pthreadext: pthreadext +.. _range: range +.. _ring: ring +.. _root: root +.. _scan: scan +.. _seg: seg +.. _shield: shield +.. _sig: sig +.. _sp: sp +.. _splay: splay +.. _stack-scan: stack-scan +.. _strategy: strategy +.. _telemetry: telemetry +.. _tests: tests +.. _testthr: testthr +.. _thread-manager: thread-manager +.. _thread-safety: thread-safety +.. _trace: trace +.. _transform: transform +.. _type: type +.. _version-library: version-library +.. _vm: vm +.. _walk: walk +.. _write-barrier: write-barrier +.. _writef: writef + + +References +---------- + +.. [RB_2002-06-18] + "The Obsolete Memory Management Information System"; + Richard Brooksby; + Ravenbrook Limited; + 2002-06-18; + . + + +Document History +---------------- + +- 2002-05-23 RB_ Created empty catalogue based on P4DTI design document catalogue. +- 2002-06-07 RB_ Added a bunch of design documents referenced by the source code. +- 2002-06-21 NB_ Remove P4DTI reference, which doesn't fit here. Maybe one day we'll have a corporate design document procedure. +- 2002-06-24 RB_ Added fix, object-debug, thread-manager, and thread-safety. +- 2007-02-08 RHSK Added message-gc and shield. +- 2007-06-12 RHSK Added cstyle. +- 2007-06-28 RHSK Added diag. +- 2008-12-04 RHSK Added tests. +- 2008-12-10 RHSK Correct description of message-gc: gc begin or end. +- 2012-09-14 RB_ Added link to critical-path +- 2013-05-10 RB_ Fixed link to sig and added guide.hex.trans +- 2013-05-22 GDR_ Add link to keyword-arguments. +- 2013-05-25 RB_ Replacing "cstyle" with reworked "guide.impl.c.format". +- 2013-06-07 RB_ Converting to reST_. Linking to [RB_2002-06-18]_. +- 2014-01-29 RB_ The arena no longer manages generation zonesets. +- 2014-01-17 GDR_ Add abq, nailboard, range. +- 2016-03-22 RB_ Add write-barier. +- 2016-03-27 RB_ Goodbye pool MV *sniff*. +- 2020-08-31 GDR_ Add walk. +- 2023-06-16 RB_ Add transform. + +.. _RB: https://www.ravenbrook.com/consultants/rb +.. _NB: https://www.ravenbrook.com/consultants/nb +.. _GDR: https://www.ravenbrook.com/consultants/gdr +.. _reST: http://docutils.sourceforge.net/rst.html + + +Copyright and License +--------------------- + +Copyright © 2002–2023 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/interface-c.txt b/mps/design/interface-c.txt new file mode 100644 index 00000000000..7525d3743ee --- /dev/null +++ b/mps/design/interface-c.txt @@ -0,0 +1,431 @@ +.. mode: -*- rst -*- + +C interface design +================== + +:Tag: design.mps.interface.c +:Author: Richard Brooksby +:Date: 1996-07-29 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: C interface; design + + +Introduction +------------ + +_`.scope`: This document is the design for the Memory Pool System +(MPS) interface to the C Language, impl.h.mps. + +_`.bg`: See `mail.richard.1996-07-24.10-57`_. + +.. _mail.richard.1996-07-24.10-57: https://info.ravenbrook.com/project/mps/mail/1996/07/24/10-57/0.txt + + +Analysis +-------- + +Goals +..... + +_`.goal.c`: The file impl.h.mps is the C external interface to the +MPS. It is the default interface between client code written in C and +the MPS. + +_`.goal.cpp`: impl.h.mps is not specifically designed to be +an interface to C++, but should be usable from C++. + + +Requirements +............ + +_`.req`: The interface must provide an interface from client code +written in C to the functionality of the MPS required by the product +(see req.product), and Open Dylan (req.dylan). + +_`.req.separation`: The external interface may not include internal +MPS header files (such as ``pool.h``). + +_`.req.flexibility`: It is essential that the interface cope well with +change, in order to avoid restricting possible future MPS +developments. This means that the interface must be "open ended" in +its definitions. This accounts for some of the apparently tortuous +methods of doing things (such as the keyword argument mechanism; see +design.mps.keyword-arguments_). The requirement is that the MPS should +be able to add new functionality, or alter the implementation of +existing functionality, without affecting existing client code. A +stronger requirement is that the MPS should be able to change without +*recompiling* client code. This is not always possible. + +.. _design.mps.keyword-arguments: keyword-arguments + +_`.req.name.iso`: The interface shall not conflict in terms of +naming with any interfaces specified by ISO C and all reasonable +future versions. + +_`.req.name.general`: The interface shall use a documented and +reasonably small portion of the namespace so that clients can use the +MPS C interface in combination with other interfaces without name +conflicts. + + +Architecture +------------ + +_`.fig.arch`: The architecture of the MPS Interface + +[missing figure] + +Just behind ``mps.h`` is the file ``mpsi.c``, the "MPS interface +layer" which does the job of converting types and checking parameters +before calling through to the MPS proper, using internal MPS methods. + + +Naming conventions +------------------ + +_`.naming`: The external interface names should adhere to the +documented interface conventions; these are found in the “`Interface +conventions `_” chapter of the Reference Manual. They are +paraphrased/recreated here. + +.. _interface.html: ../topic/interface.html + +_`.naming.file`: All files in the external interface have names +starting with ``mps``. + +_`.naming.unixy`: The external interface does not follow the same +naming conventions as the internal code. The interface is designed to +resemble a more conventional C, Unix, or Posix naming convention. + +_`.naming.case`: Identifiers are in lower case, except +non-function-like macros, which are in upper case. + +_`.naming.global`: All documented identifiers begin ``mps_`` or +``MPS_``. + +_`.naming.all`: All identifiers defined by the MPS begin ``mps_`` or +``MPS_`` or ``_mps_``. + +_`.naming.type`: Types are suffixed ``_t``, except for structure and union types. + +_`.naming.struct`: Structure types and tags are suffixed ``_s``. + +_`.naming.union`: Unions types and tags are suffixed ``_u``. + +_`.naming.scope`: The naming conventions apply to all identifiers (see +ISO C §6.1.2); this includes names of functions, variables, types +(through typedef), structure and union tags, enumeration members, +structure and union members, macros, macro parameters, labels. + +_`.naming.scope.labels`: labels (for ``goto`` statements) should be +rare, only in special block macros and probably not even then. + +_`.naming.scope.other`: The naming convention would also extend to +enumeration types and parameters in functions prototypes but both of +those are prohibited from having names in an interface file. + + +Type conventions +---------------- + +_`.type.gen`: The interface defines memory addresses as ``void *`` and +sizes as ``size_t`` for compatibility with standard C (in particular, +with ``malloc()``). These types must be binary compatible with the +internal types ``Addr`` and ``Size`` respectively. Note that this +restricts the definitions of the internal types ``Addr`` and ``Size`` +when the MPS is interfaced with C, but does not restrict the MPS in +general. + +_`.type.opaque`: Opaque types are defined as pointers to structures +which are never defined. These types are cast to the corresponding +internal types in ``mpsi.c``. + +_`.type.trans`: Some transparent structures are defined. The client is +expected to read these, or poke about in them, under documented +restrictions. The most important is the allocation point structure +(``mps_ap_s``) which is part of allocation buffers. The transparent +structures must be binary compatible with corresponding internal +structures. For example, the fields of ``mps_ap_s`` must correspond +with ``APStruct`` internally. This is checked by ``mpsi.c`` in +``mps_check()``. + +_`.type.pseudo`: Some pseudo-opaque structures are defined. These only +exist so that code can be inlined using macros. The client code +shouldn't mess with them. The most important case of this is the scan +state (``mps_ss_s``) which is accessed by the in-line scanning macros, +``MPS_SCAN_*`` and ``MPS_FIX*``. + +_`.type.enum`: There are no enumeration types in the interface. Note +that enum specifiers (to declare integer constants) are fine as long +as no type is declared. See guide.impl.c.misc.enum.type. + +_`.type.fun`: Whenever function types or derived function types (such +as pointer to function) are declared a prototype should be used and +the parameters to the function should not be named. This includes the +case where you are declaring the prototype for an interface function. + +_`.type.fun.example`: So use:: + + extern mps_res_t mps_alloc(mps_addr_t *, mps_pool_t, size_t, ...); + +rather than:: + + extern mps_res_t mps_alloc(mps_addr_t *addr_return, mps_pool_t pool , size_t size, ...); + +and:: + + typedef mps_addr_t (*mps_fmt_class_t)(mps_addr_t); + +rather than:: + + typedef mps_addr_t (*mps_fmt_class_t)(mps_addr_t object); + +See guide.impl.c.misc.prototype.parameters. + + +Checking +-------- + +_`.check.testt`: Before any use of a parameter ``foo`` belonging to a +pointer type ``Foo``, it is checked using ``TESTT(Foo, foo)``. The +macro ``TESTT()`` in impl.h.check performs simple thread-safe checking +of ``foo``, so it can be called outside of ``ArenaEnter()`` and +``ArenaLeave()``. + +_`.check.avert`: With the arena lock held, ``foo`` is checked using +``AVERT(Foo, foo)``. This macro has different definitions depending on +how the MPS is compiled (see design.mps.config.def.var_). It may +expand to ``TESTT()``, or it may call the full checking function for +the type. + +.. _design.mps.config.def.var: config#.def.var + +_`.check.types`: We use definitions of types in both our external +interface and our internal code, and we want to make sure that they +are compatible. (The external interface changes less often and hides +more information.) This checking uses the following macros, originally +from `mail.richard.1996-08-07.09-49`_. + +.. _mail.richard.1996-08-07.09-49: https://info.ravenbrook.com/project/mps/mail/1996/08/07/09-49/0.txt + +``COMPATLVALUE(lvalue1, lvalue2)`` + +_`.check.types.compat.lvalue`: This macro checks the assignment +compatibility of two lvalues. It uses ``sizeof`` to ensure that the +assignments have no effect. :: + + #define COMPATLVALUE(lv1, lv2) \ + ((void)sizeof((lv1) = (lv2)), (void)sizeof((lv2) = (lv1)), TRUE) + +``COMPATTYPE(type1, type2)`` + +_`.check.types.compat.type`: This macro checks that two types are +assignment-compatible and equal in size. The hack here is that it +generates an lvalue for each type by casting zero to a pointer to the +type. The use of ``sizeof`` avoids the undefined behaviour that +would otherwise result from dereferencing a null pointer. :: + + #define COMPATTYPE(t1, t2) \ + (sizeof(t1) == sizeof(t2) && \ + COMPATLVALUE(*((t1 *)0), *((t2 *)0))) + +``COMPATFIELDAPPROX(structure1, field1, structure2, field2)`` + +_`.check.types.compat.field.approx`: This macro checks that the offset +and size of two fields in two structure types are the same. :: + + #define COMPATFIELDAPPROX(s1, f1, s2, f2) \ + (sizeof(((s1 *)0)->f1) == sizeof(((s2 *)0)->f2) && \ + offsetof(s1, f1) == offsetof(s2, f2)) + +``COMPATFIELD(structure1, field1, structure2, field2)`` + +_`.check.types.compat.field`: This macro checks the offset, size, and +assignment-compatibility of two fields in two structure types. :: + + #define COMPATFIELD(s1, f1, s2, f2) \ + (COMPATFIELDAPPROX(s1, f1, s2, f2) && \ + COMPATLVALUE(((s1 *)0)->f1, ((s2 *)0)->f2)) + + +Binary compatibility issues +--------------------------- + +As in, "Enumeration types are not allowed" (see +`mail.richard.1995-09-08.09-28`_). + +.. _mail.richard.1995-09-08.09-28: https://info.ravenbrook.com/project/mps/mail/1995/09/08/09-28/0.txt + +_`.compat`: There are two main aspects to run-time compatibility: +binary interface and protocol. + +_`.compat.binary`: The binary interface is all the information needed +to correctly use the library, and includes external symbol linkage, +calling conventions, type representation compatibility, structure +layouts, etc. + +_`.compat.binary.unneeded`: Binary compatibility is not required by +the open source MPS: we expect (and indeed, recommend) that a client +program is compiled against the MPS sources. Nonetheless we try to +maintain binary compatibility in case the capability is required in +future. + +_`.compat.binary.dependencies`: The binary interface is determined +completely by the header file and the target. The header file +specifies the external names and the types, and the target platform +specifies calling conventions and type representation. There is +therefore a many-to-one mapping between the header file version and +the binary interface. + +_`.compat.protocol`: The protocol is how the library is actually used +by the client code -- whether this is called before that -- and +determines the semantic correctness of the client with respect to the +library. + +_`.compat.protocol.dependencies`: The protocol is determined by the +implementation of the library. + + +Constraints +----------- + +_`.cons`: The MPS C Interface constrains the MPS in order to provide +useful memory management services to a C or C++ program. + +_`.cons.addr`: The interface constrains the MPS address type, Addr +(design.mps.type.addr_), to being the same as C's generic pointer type, +``void *``, so that the MPS can manage C objects in the natural way. + +.. _design.mps.type.addr: type#.addr + +_`.pun.addr`: We pun the type of ``mps_addr_t`` (which is ``void *``) +into ``Addr`` (an incomplete type, see design.mps.type.addr_). This +happens in the call to the scan state's fix function, for example. + +_`.cons.size`: The interface constrains the MPS size type, ``Size`` +(design.mps.type.size_), to being the same as C's size type, +``size_t``, so that the MPS can manage C objects in the natural way. + +.. _design.mps.type.size: type#.size + +_`.pun.size`: We pun the type of ``size_t`` in mps.h into ``Size`` in +the MPM, as an argument to the format methods. We assume this works. + +_`.cons.word`: The MPS assumes that ``Word`` (design.mps.type.word_) +and ``Addr`` (design.mps.type.addr_) are the same size, and the +interface constrains ``Word`` to being the same size as C's generic +pointer type, ``void *``. + +.. _design.mps.type.word: type#.word + + +Implementation +-------------- + +_`.impl`: The external interface consists of the following header +files: + +_`.impl.mps`: ``mps.h`` is the main external interface, containing of +type and function declarations needed by all clients of the MPS. + +_`.impl.mpstd`: ``mpstd.h`` is the MPS target detection header. It +decodes preprocessor symbols which are predefined by build +environments in order to determine the target platform (see +design.mps.config_), and then defines uniform symbols, such as +``MPS_ARCH_I3``, for use externally and internally by the MPS. +``mpstd.h`` is not included by any of the other external headers, as +it relies on exact set of preprocessor constants defined by compilers. + +.. _design.mps.config: config + +_`.impl.mpsio`: ``mpsio.h`` is the interface to the MPS I/O subsystem, +part of the plinth. See design.mps.io_. + +.. _design.mps.io: io + +_`.impl.mpslib`: ``mpslib.h`` is the interface to the MPS Library +Interface, part of the plinth. See design.mps.lib_. + +.. _design.mps.lib: lib + +_`.impl.mpsa`: Interfaces to arena classes are in files with names +starting ``mpsa``: for example, the interface to the Virtual Memory +arena class is in ``mpsavm.h``. + +_`.impl.mpsc`: Interfaces to pool classes are in files with names +starting ``mpsc``: for example, the interface to the MVFF pool class +is in ``mpscmvff.h``. + + +Notes +----- + +_`.fmt.extend`: ``mps_fmt_A_t`` is so called because new pool classes +might require new format methods, but these methods cannot be added to +the format structure without breaking binary compatibility. Therefore +these new pool classes would use new format structures named +``mps_fmt_B_t`` and so on. + +_`.thread-safety`: Most calls through this interface lock the arena +and therefore make the MPM single-threaded. In order to do this they +must recover the arena from their parameters. Methods such as +``FormatArena()`` and ``ThreadArena()`` must therefore be callable +when the arena is *not* locked. These methods are tagged with the tag +of this note. + +_`.lock-free`: Certain functions inside the MPM are thread-safe and do +not need to be serialized by using locks. They are marked with the tag +of this note. + +_`.form`: Almost all functions in this implementation simply cast +their arguments to the equivalent internal types, and cast results +back to the external type, where necessary. Only exceptions are noted +in comments. + + +Document History +---------------- + +- 1996-07-29 RB_ Incomplete document. The first draft of this document + was generated in response to review.impl.h.mps.10 which revealed the + lack of a detailed design document and also the lack of conventions + for external interfaces. The aim of the draft was to record this + information, even if it isn't terribly well structured. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/io.txt b/mps/design/io.txt new file mode 100644 index 00000000000..bfedb2b4aa0 --- /dev/null +++ b/mps/design/io.txt @@ -0,0 +1,457 @@ +.. mode: -*- rst -*- + +I/O subsystem +============= + +:Tag: design.mps.io +:Author: Richard Brooksby +:Date: 1996-08-30 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: I/O subsystem; design + + +Introduction +------------ + +_`.intro`: This document is the design of the MPS I/O Subsystem, a +part of the plinth. + +_`.readership`: This document is intended for MPS developers. + + +Background +---------- + +_`.bg`: This design is partly based on the design of the Internet User +Datagram Protocol (UDP). Mainly I used this to make sure I hadn't left +out anything which we might need. + + +Purpose +------- + +_`.purpose`: The purpose of the MPS I/O Subsystem is to provide a +means to measure, debug, control, and test a memory manager build +using the MPS. + +_`.purpose.measure`: Measurement consists of emitting data which can +be collected and analysed in order to improve the attributes of +application program, quite possibly by adjusting parameters of the +memory manager (see overview.mps.usage). + +_`.purpose.control`: Control means adjusting the behaviour of the MM +dynamically. For example, one might want to adjust a parameter in +order to observe the effect, then transfer that adjustment to the +client application later. + +_`.purpose.test`: Test output can be used to ensure that the memory +manager is behaving as expected in response to certain inputs. + + +Requirements +------------ + +General +....... + +_`.req.fun.non-hosted`: The MPM must be a host-independent system. + +_`.req.attr.host`: It should be easy for the client to set up the MPM +for a particular host (such as a washing machine). + +Functional +.......... + +_`.req.fun.measure`: The subsystem must allow the MPS to transmit +quantitative measurement data to an external tool so that the system +can be tuned. + +_`.req.fun.debug`: The subsystem must allow the MPS to transmit +qualitative information about its operation to an external tool so +that the system can be debugged. + +_`.req.fun.control`: The subsystem must allow the MPS to receive +control information from an external tool so that the system can be +adjusted while it is running. + +_`.req.dc.env.no-net`: The subsystem should operate in environments +where there is no networking available. + +_`.req.dc.env.no-fs`: The subsystem should operate in environments +where there is no filesystem available. + + +Architecture +------------ + +_`.arch.diagram`: I/O Architecture Diagram + +[missing diagram] + +_`.arch.int`: The I/O Interface is a C function call interface by +which the MPM sends and receives "messages" to and from the hosted I/O +module. + +_`.arch.module`: The modules are part of the MPS but not part of the +freestanding core system (see design.mps.exec-env_). The I/O module is +responsible for transmitting those messages to the external tools, and +for receiving messages from external tools and passing them to the +MPM. + +.. _design.mps.exec-env: exec-env + +_`.arch.module.example`: For example, the "file implementation" might +just send/write telemetry messages into a file so that they can be +received/read later by an off-line measurement tool. + +_`.arch.external`: The I/O Interface is part of interface to the +freestanding core system (see design.mps.exec-env_). This is so that +the MPS can be deployed in a freestanding environment, with a special +I/O module. For example, if the MPS is used in a washing machine the +I/O module could communicate by writing output to the seven-segment +display. + + +Example configurations +...................... + +_`.example.telnet`: This shows the I/O subsystem communicating with a +telnet client over a TCP/IP connection. In this case, the I/O +subsystem is translating the I/O Interface into an interactive text +protocol so that the user of the telnet client can talk to the MM. + +[missing diagram] + +_`.example.file`: This shows the I/O subsystem dumping measurement +data into a file which is later read and analysed. In this case the +I/O subsystem is simply writing out binary in a format which can be +decoded. + +[missing diagram] + +_`.example.serial`: This shows the I/O subsystem communicating with a +graphical analysis tool over a serial link. This could be useful for a +developer who has two machines in close proximity and no networking +support. + +_`.example.local`: In this example the application is talking directly +to the I/O subsystem. This is useful when the application is a +reflective development environment (such as MLWorks) which wants to +observe its own behaviour. + +[missing diagram] + + +Interface +--------- + +_`.if.msg`: The I/O interface is oriented around opaque binary +"messages" which the I/O module must pass between the MPM and external +tools. The I/O module need not understand or interpret the contents of +those messages. + +_`.if.msg.opaque`: The messages are opaque in order to minimize the +dependency of the I/O module on the message internals. It should be +possible for clients to implement their own I/O modules for unusual +environments. We do not want to reveal the internal structure of our +data to the clients. Nor do we want to burden them with the details of +our protocols. We'd also like their code to be independent of ours, so +that we can expand or change the protocols without requiring them to +modify their modules. + +_`.if.msg.dgram`: Neither the MPM nor the external tools should assume +that the messages will be delivered in finite time, exactly once, or +in order. This will allow the I/O modules to be implemented using +unreliable transport layers such as the Internet User Datagram Protocol +(UDP). It will also give the I/O module the freedom to drop +information rather than block on a congested network, or stop the +memory manager when the disk is full, or similar events which really +shouldn't cause the memory manager to stop working. The protocols we +need to implement at the high level can be design to be robust against +lossage without much difficulty. + + +I/O module state +................ + +_`.if.state`: The I/O module may have some internal state to preserve. +The I/O Interface defines a type for this state, ``mps_io_t``, a +pointer to an incomplete structure ``mps_io_s``. The I/O module is at +liberty to define this structure. + + +Message types +............. + +_`.if.type`: The I/O module must be able to deliver messages of +several different types. It will probably choose to send them to +different destinations based on their type: telemetry to the +measurement tool, debugging output to the debugger, etc. :: + + typedef int mps_io_type_t; + enum { + MPS_IO_TYPE_TELEMETRY, + MPS_IO_TYPE_DEBUG + }; + + +Limits +...... + +_`.if.message-max`: The interface will define an unsigned integral +constant ``MPS_IO_MESSAGE_MAX`` which will be the maximum size of +messages that the MPM will pass to ``mps_io_send()`` (`.if.send`_) and +the maximum size it will expect to receive from ``mps_io_receive()``. + + +Interface set-up and tear-down +.............................. + +_`.if.create`: The MPM will call ``mps_io_create()`` to set up the I/O +module. On success, this function should return ``MPS_RES_OK``. It may +also initialize a "state" value which will be passed to subsequent +calls through the interface. + +_`.if.destroy`: The MPM will call ``mps_io_destroy()`` to tear down +the I/O module, after which it guarantees that the state value will +not be used again. The ``state`` parameter is the state previously +returned by ``mps_io_create()`` (`.if.create`_). + +Message send and receive +........................ + +``extern mps_res_t mps_io_send(mps_io_t state, mps_io_type_t type, void *message, size_t size)`` + +_`.if.send`: The MPM will call ``mps_io_send()`` when it wishes to +send a message to a destination. The ``state`` parameter is the state +previously returned by ``mps_io_create()`` (`.if.create`_). The +``type`` parameter is the type (`.if.type`_) of the message. The +``message`` parameter is a pointer to a buffer containing the message, +and ``size`` is the length of that message, in bytes. The I/O module +must make an effort to deliver the message to the destination, but is +not expected to guarantee delivery. The function should return +``MPS_RES_IO`` only if a serious error occurs that should cause the +MPM to return with an error to the client application. Failure to +deliver the message does not count. + +.. note:: + + Should there be a timeout parameter? What are the timing + constraints? ``mps_io_send()`` shouldn't block. + +``extern mps_res_t mps_io_receive(mps_io_t state, void **buffer_o, size_t *size_o)`` + +_`.if.receive`: The MPM will call ``mps_io_receive()`` when it wants +to see if a message has been sent to it. The ``state`` parameter is +the state previously returned by ``mps_io_create()`` (`.if.create`_). +The ``buffer_o`` parameter is a pointer to a value which should be +updated with a pointer to a buffer containing the message received. +The ``size_o`` parameter is a pointer to a value which should be +updated with the length of the message received. If there is no +message ready for receipt, the length returned should be zero. + +.. note:: + + Should we be able to receive truncated messages? How can this be + done neatly? + + +I/O module implementations +-------------------------- + +Routeing +........ + +The I/O module must decide where to send the various messages. A +file-based implementation could put them in different files based on +their types. A network-based implementation must decide how to address +the messages. In either case, any configuration must either be +statically compiled into the module, or else read from some external +source such as a configuration file. + + +Notes +----- + +The external tools should be able to reconstruct stuff from partial +info. For example, you come across a fragment of an old log containing +just a few old messages. What can you do with it? + +Here's some completely untested code which might do the job for UDP. +:: + + #include "mpsio.h" + + #include + #include + #include + #include + #include + #include + + typedef struct mps_io_s { + int sock; + struct sockaddr_in mine; + struct sockaddr_in telemetry; + struct sockaddr_in debugging; + } mps_io_s; + + static mps_bool_t inited = 0; + static mps_io_s state; + + + mps_res_t mps_io_create(mps_io_t *mps_io_o) + { + int sock, r; + + if(inited) + return MPS_RES_LIMIT; + + state.mine = /* setup somehow from config */; + state.telemetry = /* setup something from config */; + state.debugging = /* setup something from config */; + + /* Make a socket through which to communicate. */ + sock = socket(AF_INET, SOCK_DGRAM, 0); + if(sock == -1) return MPS_RES_IO; + + /* Set socket to non-blocking mode. */ + r = fcntl(sock, F_SETFL, O_NDELAY); + if(r == -1) return MPS_RES_IO; + + /* Bind the socket to some UDP port so that we can receive messages. */ + r = bind(sock, (struct sockaddr *)&state.mine, sizeof(state.mine)); + if(r == -1) return MPS_RES_IO; + + state.sock = sock; + + inited = 1; + + *mps_io_o = &state; + return MPS_RES_OK; + } + + + void mps_io_destroy(mps_io_t mps_io) + { + assert(mps_io == &state); + assert(inited); + + (void)close(state.sock); + + inited = 0; + } + + + mps_res_t mps_io_send(mps_io_t mps_io, mps_type_t type, + void *message, size_t size) + { + struct sockaddr *toaddr; + + assert(mps_io == &state); + assert(inited); + + switch(type) { + MPS_IO_TYPE_TELEMETRY: + toaddr = (struct sockaddr *)&state.telemetry; + break; + + MPS_IO_TYPE_DEBUGGING: + toaddr = (struct sockaddr *)&state.debugging; + break; + + default: + assert(0); + return MPS_RES_UNIMPL; + } + + (void)sendto(state.sock, message, size, 0, toaddr, sizeof(*toaddr)); + + return MPS_RES_OK; + } + + + mps_res_t mps_io_receive(mps_io_t mps_io, + void **message_o, size_t **size_o) + { + int r; + static char buffer[MPS_IO_MESSAGE_MAX]; + + assert(mps_io == &state); + assert(inited); + + r = recvfrom(state.sock, buffer, sizeof(buffer), 0, NULL, NULL); + if(r == -1) + switch(errno) { + /* Ignore interrupted system calls, and failures due to lack */ + /* of resources (they might go away.) */ + case EINTR: case ENOMEM: case ENOSR: + r = 0; + break; + + default: + return MPS_RES_IO; + } + + *message_o = buffer; + *size_o = r; + return MPS_RES_OK; + } + + +Attachments +----------- + +"O Architecture Diagram" +"O Configuration Diagrams" + + +Document History +---------------- + +- 1996-08-30 RB_ Document created from paper notes. + +- 1997-06-10 RB_ Updated with `mail.richard.1997-05-30.16-13`_ and + subsequent discussion in the Pool Hall at Longstanton. (See also + `mail.drj.1997-06-05.15-20`_.) + + .. _mail.richard.1997-05-30.16-13: https://info.ravenbrook.com/project/mps/mail/1997/05/30/16-13/0.txt + .. _mail.drj.1997-06-05.15-20: https://info.ravenbrook.com/project/mps/mail/1997/06/05/15-20/0.txt + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/keyword-arguments.txt b/mps/design/keyword-arguments.txt new file mode 100644 index 00000000000..711e2343ca8 --- /dev/null +++ b/mps/design/keyword-arguments.txt @@ -0,0 +1,191 @@ +.. mode: -*- rst -*- + +Keyword arguments in the MPS +============================ + +:Author: Richard Brooksby +:Organization: Ravenbrook Limited +:Date: 2013-05-09 +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: keyword arguments; design + + +Introduction +------------ +Up to version 1.111, the `Memory Pool System +`_ used varags to pass arguments +to arena and pool classes, because the general MPS interface can't +specify what arguments those classes might need in C prototypes. This +mechanism was error-prone and did not allow for any optional arguments, +meaning that the client had to specify or predict esoteric tuning +parameters. + +Starting with version 1.112, the MPS uses an idiom for keyword arguments. + +The keyword argument design was originally proposed in [RB_2012-05-24]_. + + +Overview +-------- +The basic design is not specific to the MPS. The keyword argument list is +passed as an array of argument structures which look like this:: + + typedef struct mps_key_s *mps_key_t; + typedef struct mps_arg_s { + mps_key_t key; + union { + int i; + char c; + void *p; + size_t size; + /* etc. */ + } val; + } mps_arg_s; + +The argument list is assembled and passed like this:: + + mps_arg_s args[3]; + args[0].key = MPS_KEY_MIN_SIZE; + args[0].val.size = 32; + args[1].key = MPS_KEY_MAX_SIZE; + args[1].val.size = 1024; + args[2].key = MPS_KEY_ARGS_END; + mps_pool_create_k(&pool, some_pool_class(), args); + +This can be written quite concisely in C99:: + + mps_pool_create_k(&pool, some_pool_class(), + (mps_arg_s []){{MPS_KEY_MIN_SIZE, {.size = 32}}, + {MPS_KEY_MAX_SIZE, {.size = 1024}}, + {MPS_KEY_ARGS_END}}); + +The arguments that are recognised and used by the function are removed +from the array (and the subsequent arguments moved up) so that if they +are all consumed the array has ``MPS_KEY_ARGS_END`` in slot zero on +return. This can be checked by the caller. + +- It's not a static error to pass excess arguments. This makes it easy to + substitute one pool or arena class for another (which might ignore some + arguments). The caller can check that ``args[0].key`` is + ``MPS_KEY_ARGS_END`` if desired. + +- NULL is not a valid argument list. This is in line with general MPS + design principles to avoid accidental omissions. For convenience, we + provide ``mps_args_none`` as a static empty argument list. + +- NULL is not a valid argument key. This is in line with general MPS + design principles to avoid accidental omissions. Every key points to + a structure with a signature that can be checked. This makes it virtually + impossible to get an argument list with bad keys or that is unterminated + past MPS checking. + + +Internals +--------- +Internally, keys are static constant structures which are signed and contain +a checking method for the argument, like this:: + + typedef struct mps_arg_s *Arg; + typedef struct mps_key_s { + Sig sig; /* Always KeySig */ + const char *name; + Bool check(Arg arg); + } KeyStruct; + +They are mostly declared in the modules that consume them, except for a few +common keys. Declarations look like:: + + const KeyStruct _mps_key_extend_by = {KeySig, "extend_by", ArgCheckSize}; + +but ``arg.h`` provides a macro for this:: + + ARG_DEFINE_KEY(extend_by, Size); + +We define keys as static structures (rather than, say, an enum) because: + +- The set of keys can be extended indefinitely. +- The set of keys can be extended by independently linked modules. +- The structure contents allow strong checking of argument lists. + +In the MPS C Interface, we declare keys like this:: + + extern const struct mps_key_s _mps_key_extend_by; + #define MPS_KEY_EXTEND_BY (&_mps_key_extend_by) + +The underscore on the symbol requests that client code doesn't reference +it, but instead uses the macro. This gives us adaptability to change the +design and replace keys with, say, magic numbers. + + +The varargs legacy +------------------ +For backward compatibility, varargs to arena and pool creation are +converted into keyword arguments by position, using a method in the +arena or pool class. For example:: + + static void MVVarargs(ArgStruct args[], va_list varargs) + { + args[0].key = MPS_KEY_EXTEND_BY; + args[0].val.size = va_arg(varargs, Size); + args[1].key = MPS_KEY_MEAN_SIZE; + args[1].val.size = va_arg(varargs, Size); + args[2].key = MPS_KEY_MAX_SIZE; + args[2].val.size = va_arg(varargs, Size); + args[3].key = MPS_KEY_ARGS_END; + AVER(ArgListCheck(args)); + } + +This leaves the main body of code, and any future code, free to just +handle keyword arguments only. + +Varargs methods must be thread-safe as they are called without taking +the arena lock. + +The use of varargs is deprecated in the manual and the interface and these +methods can be deleted at some point in the future. + + +References +---------- + +.. [RB_2012-05-24] + "Keyword and optional arguments"; Richard Brooksby; + Ravenbrook Limited; 2012-05-24; + . + + +Document History +---------------- +- 2013-05-09 RB_ Created based on [RB_2012-05-24]_. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/land.txt b/mps/design/land.txt new file mode 100644 index 00000000000..a5d01bf1833 --- /dev/null +++ b/mps/design/land.txt @@ -0,0 +1,390 @@ +.. mode: -*- rst -*- + +Lands +===== + +:Tag: design.mps.land +:Author: Gareth Rees +:Date: 2014-04-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: lands; design + + +Introduction +------------ + +_`.intro`: This is the design of the *land* abstract data type, which +represents a collection of contiguous address ranges. + +_`.readership`: This document is intended for any MPS developer. + +_`.source`: design.mps.cbs_, design.mps.freelist_. + +_`.overview`: Collections of address ranges are used in several places +in the MPS: the arena stores a set of mapped address ranges; pools +store sets of address ranges which have been acquired from the arena +and sets of address ranges that are available for allocation. The +*land* abstract data type makes it easy to try out different +implementations with different performance characteristics and other +attributes. + +_`.name`: The name is inspired by *rangeland* meaning *group of +ranges* (where *ranges* is used in the sense *grazing areas*). + + +Definitions +----------- + +_`.def.range`: A (contiguous) *range* of addresses is a semi-open +interval on address space. + +_`.def.isolated`: A contiguous range is *isolated* with respect to +some property it has, if adjacent elements do not have that property. + + +Requirements +------------ + +_`.req.set`: Must maintain a set of addresses. + +_`.req.add`: Must be able to add address ranges to the set. + +_`.req.remove`: Must be able to remove address ranges from the set. + +_`.req.size`: Must report concisely to the client when isolated +contiguous ranges of at least a certain size appear and disappear. + +_`.req.iterate`: Must support the iteration of all isolated +contiguous ranges. + +_`.req.protocol`: Must detect protocol violations. + +_`.req.debug`: Must support debugging of client code. + +_`.req.align`: Must support an alignment (the alignment of all +addresses specifying ranges) of down to ``sizeof(void *)`` without +losing memory. + + +Interface +--------- + +Types +..... + +``typedef LandStruct *Land`` + +_`.type.land`: The type of a generic land instance. + +``typedef Bool (*LandVisitor)(Land land, Range range, void *closure)`` + +_`.type.visitor`: Type ``LandVisitor`` is a callback function that may +be passed to ``LandIterate()``. It is called for every isolated +contiguous range in address order. The function must return a ``Bool`` +indicating whether to continue with the iteration. + +``typedef Bool (*LandDeleteVisitor)(Bool *deleteReturn, Land land, Range range, void *closure)`` + +_`.type.deletevisitor`: Type ``LandDeleteVisitor`` is a callback function that may +be passed to ``LandIterateAndDelete()``. It is called for every isolated +contiguous range in address order. The function must return a ``Bool`` +indicating whether to continue with the iteration. It may additionally +update ``*deleteReturn`` to ``TRUE`` if the range must be deleted from +the land, or ``FALSE`` if the range must be kept. (The default is to +keep the range.) + + +Generic functions +................. + +``Res LandInit(Land land, LandClass class, Arena arena, Align alignment, void *owner, ArgList args)`` + +_`.function.init`: ``LandInit()`` initializes the land structure for +the given class. The land will perform allocation (if necessary -- not +all land classes need to allocate) in the supplied arena. The +``alignment`` parameter is the alignment of the address ranges that +will be stored and retrieved from the land. The parameter ``owner`` is +output as a parameter to the ``LandInit`` event. The newly initialized +land contains no ranges. + +``Res LandCreate(Land *landReturn, Arena arena, LandClass class, Align alignment, void *owner, ArgList args)`` + +_`.function.create`: ``LandCreate()`` allocates memory for a land +structure of the given class in ``arena``, and then passes all +parameters to ``LandInit()``. + +``void LandFinish(Land land)`` + +_`.function.finish`: ``LandFinish()`` finishes the land structure and +discards any other resources associated with the land. + +``void LandSize(Land land)`` + +_`.function.size`: ``LandSize()`` returns the total size of the ranges +stored in the land. + +``Res LandInsert(Range rangeReturn, Land land, Range range)`` + +_`.function.insert`: If any part of ``range`` is already in the land, +then leave the land unchanged and return ``ResFAIL``. Otherwise, +attempt to insert ``range`` into the land. If the insertion succeeds, +then update ``rangeReturn`` to describe the contiguous isolated range +containing the inserted range (this may differ from ``range`` if there +was coalescence on either side) and return ``ResOK``. If the insertion +fails, return a result code indicating allocation failure. + +_`.function.insert.fail`: Insertion of a valid range (that is, one +that does not overlap with any range in the land) can only fail if the +new range is isolated and the allocation of the necessary data +structure to represent it failed. + +_`.function.insert.alias`: It is acceptable for ``rangeReturn`` and +``range`` to share storage. + +``Res LandInsertSteal(Range rangeReturn, Land land, Range rangeIO)`` + +_`.function.insert-steal`: If any part of ``rangeIO`` is already in +the land, then leave the land unchanged and return ``ResFAIL``. +Otherwise, insert ``rangeIO`` into the land, update ``rangeReturn`` to +describe the contiguous isolated range containing the inserted range +(this may differ from ``range`` if there was coalescence on either +side), and return ``ResOK``. + +_`.function.insert-steal.steal`: If insertion requires allocation for +the land's internal data structures, steal some of the memory in +``rangeIO``, use it to satisfy the allocation, update ``rangeIO`` so +that it describes the remaining part of of the range, and insert the +remainder into the land as described above. + +_`.function.insert-steal.allocated`: In order for stealing to work, +the inserted range must be allocated from the arena to some pool or +pools. + +_`.function.insert-steal.empty`: After stealing memory, ``rangeIO`` +might be empty, in which case ``rangeReturn`` will be a copy of +``rangeIO``. + +_`.function.insert-steal.alias.not`: It is not acceptable for +``rangeReturn`` and ``rangeIO`` to share storage. + +``Res LandDelete(Range rangeReturn, Land land, Range range)`` + +_`.function.delete`: If any part of the range is not in the land, +then leave the land unchanged and return ``ResFAIL``. Otherwise, update +``rangeReturn`` to describe the contiguous isolated range that +contains ``range`` (this may differ from ``range`` if there are +fragments on either side) and attempt to delete the range from the +land. If the deletion succeeds, return ``ResOK``. If the deletion +fails, return a result code indicating allocation failure. + +_`.function.delete.fail`: Deletion of a valid range (that is, one +that is wholly contained in the land) can only fail if there are +fragments on both sides and the allocation of the necessary data +structures to represent them fails. + +_`.function.delete.return`: ``LandDelete()`` returns the contiguous +isolated range that contains ``range`` even if the deletion fails. +This is so that the caller can try deleting the whole block (which is +guaranteed to succeed) and managing the fragments using a fallback +strategy. + +_`.function.delete.alias`: It is acceptable for ``rangeReturn`` and +``range`` to share storage. + +``Res LandDeleteSteal(Range rangeReturn, Land land, Range range)`` + +_`.function.delete-steal`: If any part of the range is not in the +land, then leave the land unchanged and return ``ResFAIL``. Otherwise, +update ``rangeReturn`` to describe the contiguous isolated range that +contains ``range`` (this may differ from ``range`` if there are +fragments on either side), delete the range from the land, and return +``ResOK``. + +_`.function.delete-steal.steal`: If deletion requires allocation for +the land's internal data structures, steal some of the memory in the +contiguous isolated range that contains ``range``, and use it to +satisfy the allocation. + +_`.function.delete-steal.allocated`: In order for stealing to work, +the addresses stored in the land must be allocated from the arena to +some pool or pools. + +_`.function.delete-steal.alias`: It is acceptable for ``rangeReturn`` +and ``range`` to share storage. + +``Bool LandIterate(Land land, LandVisitor visitor, void *closure)`` + +_`.function.iterate`: ``LandIterate()`` is the function used to +iterate all isolated contiguous ranges in a land. It receives a +visitor function to invoke on every range, and a closure pointer +to pass on to the visitor function. If the visitor +function returns ``FALSE``, then iteration is terminated and +``LandIterate()`` returns ``FALSE``. If all iterator method calls +return ``TRUE``, then ``LandIterate()`` returns ``TRUE`` + +``Bool LandIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closure)`` + +_`.function.iterate.and.delete`: As ``LandIterate()``, but the visitor +function additionally returns a Boolean indicating whether the range +should be deleted from the land. + +_`.function.iterate.and.delete.justify`: The reason for having both +``LandIterate()`` and ``LandIterateAndDelete()`` is that it may be +possible to use a more efficient algorithm, or to preserve more +properties of the data structure, when it is known that the land will +not be modified during the iteration. For example, in the CBS +implementation, ``LandIterate()`` uses ``TreeTraverse()`` which +preserves the tree structure, whereas ``LandIterateAndDelete()`` uses +``TreeTraverseAndDelete()`` which flattens the tree structure, losing +information about recently accessed nodes. + +``Bool LandFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.first`: Locate the first block (in address order) +within the land of at least the specified size, update ``rangeReturn`` +to describe that range, and return ``TRUE``. If there is no such +block, it returns ``FALSE``. + +In addition, optionally delete the top, bottom, or all of the found +range, depending on the ``findDelete`` argument. This saves a separate +call to ``LandDelete()``, and uses the knowledge of exactly where we +found the range. The value of ``findDelete`` must come from this +enumeration:: + + enum { + FindDeleteNONE, /* don't delete after finding */ + FindDeleteLOW, /* delete size bytes from low end of block */ + FindDeleteHIGH, /* delete size bytes from high end of block */ + FindDeleteENTIRE /* delete entire range */ + }; + +The original contiguous isolated range in which the range was found is +returned via the ``oldRangeReturn`` argument. (If ``findDelete`` is +``FindDeleteNONE`` or ``FindDeleteENTIRE``, then this will be +identical to the range returned via the ``rangeReturn`` argument.) + +``Bool LandFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.last`: Like ``LandFindFirst()``, except that it +finds the last block in address order. + +``Bool LandFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.largest`: Locate the largest block within the +land, and if that block is at least as big as ``size``, return its +range via the ``rangeReturn`` argument, and return ``TRUE``. If there +are no blocks in the land at least as large as ``size``, return +``FALSE``. Pass 0 for ``size`` if you want the largest block +unconditionally. + +Like ``LandFindFirst()``, optionally delete the range (specifying +``FindDeleteLOW`` or ``FindDeleteHIGH`` has the same effect as +``FindDeleteENTIRE``), and return the original contiguous isolated +range in which the range was found via the ``oldRangeReturn`` +argument. + +``Res LandFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high)`` + +_`.function.find.zones`: Locate a block at least as big as ``size`` +that lies entirely within the ``zoneSet``, return its range via the +``rangeReturn`` argument, set ``*foundReturn`` to ``TRUE``, and return +``ResOK``. (The first such block, if ``high`` is ``FALSE``, or the +last, if ``high`` is ``TRUE``.) If there is no such block, set +``*foundReturn`` to ``FALSE``, and return ``ResOK``. + +Delete the range as for ``LandFindFirst()`` and ``LastFindLast()`` +(with the effect of ``FindDeleteLOW`` if ``high`` is ``FALSE`` and the +effect of ``FindDeleteHIGH`` if ``high`` is ``TRUE``), and return the +original contiguous isolated range in which the range was found via +the ``oldRangeReturn`` argument. + +_`.function.find.zones.fail`: It's possible that the range can't be +deleted from the land because that would require allocation, in which +case the result code indicates the cause of the failure. + +``Res LandDescribe(Land land, mps_lib_FILE *stream)`` + +_`.function.describe`: ``LandDescribe()`` prints a textual +representation of the land to the given stream. It is provided for +debugging purposes only. + +``void LandFlush(Land dest, Land src)`` + +_`.function.flush`: Delete ranges of addresses from ``src`` and insert +them into ``dest``, so long as ``LandInsert()`` remains successful. + + +Implementations +--------------- + +There are three land implementations: + +#. CBS (Coalescing Block Structure) stores ranges in a splay tree. It + has fast (logarithmic in the number of ranges) insertion, deletion + and searching, but has substantial space overhead. See + design.mps.cbs_. + +#. Freelist stores ranges in an address-ordered free list, as in + traditional ``malloc()`` implementations. Insertion, deletion, and + searching are slow (proportional to the number of ranges) but it + does not need to allocate. See design.mps.freelist_. + +#. Failover combines two lands, using one (the *primary*) until it + fails, and then falls back to the other (the *secondary*). See + design.mps.failover_. + +.. _design.mps.cbs: cbs +.. _design.mps.freelist: freelist +.. _design.mps.failover: failover + + +Testing +------- + +_`.test`: There is a stress test for implementations of this interface +in impl.c.landtest. This allocates a large block of memory and then +simulates the allocation and deallocation of ranges within this block +using both a ``Land`` and a ``BT``. It makes both valid and invalid +requests, and compares the ``Land`` response to the correct behaviour +as determined by the ``BT``. It iterates the ranges in the ``Land``, +comparing them to the ``BT``. It invokes the ``LandDescribe()`` +generic function, but makes no automatic test of the resulting output. + + +Document History +---------------- + +- 2014-04-01 GDR_ Created based on design.mps.cbs_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/lib.txt b/mps/design/lib.txt new file mode 100644 index 00000000000..01395c27441 --- /dev/null +++ b/mps/design/lib.txt @@ -0,0 +1,121 @@ +.. mode: -*- rst -*- + +Library interface +================= + +:Tag: design.mps.lib +:Author: Richard Brooksby +:Date: 1996-09-03 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: library interface; design + + +Introduction +------------ + +_`.intro`: This document is the design of the MPS Library Interface, a +part of the plinth. + +_`.readership`: Any MPS developer. Any clients that are prepared to +read this in order to get documentation. + + +Goals +----- + +_`.goal`: The goals of the MPS library interface are: + +_`.goal.host`: To control the dependency of the MPS on the hosted ISO +C library so that the core MPS remains freestanding (see +design.mps.exec-env_). + +.. _design.mps.exec-env: exec-env + +_`.goal.free`: To allow the core MPS convenient access to ISO C +functionality that is provided on freestanding platforms (see +design.mps.exec-env_). + + +Description +----------- + +Overview +........ + +_`.overview.access`: The core MPS needs to access functionality that +could be provided by an ISO C hosted environment. + +_`.overview.hosted`: The core MPS must not make direct use of any +facilities in the hosted environment (design.mps.exec-env_). However, +it is sensible to make use of them when the MPS is deployed in a +hosted environment. + +_`.overview.hosted.indirect`: The core MPS does not make any direct +use of hosted ISO C library facilities. Instead, it indirects through +the MPS Library Interface, impl.h.mpslib. + +_`.overview.provision.client`: In a freestanding environment the +client is expected to provide functions meeting this interface to the +MPS. + +_`.overview.provision.hosted`: In a hosted environment, +impl.c.mpsliban may be used. It just maps impl.h.mpslib directly onto +the ISO C library equivalents. + + +Implementation +-------------- + +_`.impl`: The MPS Library Interface comprises a header file +impl.h.mpslib and some documentation. + +_`.impl.decl`: The header file defines the interface to definitions +which parallel those parts of the non-freestanding ISO headers which +are used by the MPS. + +_`.impl.include`: The header file also includes the freestanding +header ````. + + +Document History +---------------- + +- 1996-09-03 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/lock.txt b/mps/design/lock.txt new file mode 100644 index 00000000000..b823b96675f --- /dev/null +++ b/mps/design/lock.txt @@ -0,0 +1,327 @@ +.. mode: -*- rst -*- + +Lock module +=========== + +:Tag: design.mps.lock +:Author: David Moore +:Date: 1995-11-21 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: locking; design + + +Introduction +------------ + +_`.intro`: This is the design of the lock module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + + +Background +---------- + +_`.need`: In an environment where multiple threads are accessing +shared data, threads need to cooperate to maintain consistency. Locks +provide a simple mechanism for doing this. + +_`.ownership`: A lock is an object which may be "owned" by a single +thread at a time. By claiming ownership of a lock before executing +some piece of code, a thread can guarantee that no other thread owns +the lock during execution of that code. If some other thread holds a +claim on a lock, the thread trying to claim the lock will suspend +until the lock is released by the owning thread. + +_`.data`: A simple way of using this behaviour is to associate a lock +with a shared data structure. By claiming that lock around accesses to +the data, a consistent view of the structure can be seen by the +accessing thread. More generally any set of operations which are +required to be mutually exclusive may be performed so by using locks. + + +Requirements +------------ + +_`.req.thread-safety`: Support the locking needs of +design.mps.thread-safety_. + +.. _design.mps.thread-safety: thread-safety + +_`.req.binary`: Provide *binary* locks: that is, locks that can be +claimed, and until released, other attempts to claim them block. (This +is needed to implement the arena lock.) + +_`.req.recursive`: Provide *recursive* locks: that is, locks that can +be claimed again by the thread currently holding them, without +blocking or deadlocking. (This is needed to implement the global +recursive lock.) + +_`.req.held`: Provide a means to test if a lock is held. (This is +needed for debugging a dynamic function table callback on Windows on +x86-64. See ``mps_arena_busy()`` for a detailed description of this +use case. Note that in this use case the program is running +single-threaded and so there is no need for this feature to be +thread-safe.) + +_`.req.global`: Provide *global* locks: that is locks that need not be +allocated or initialized by the user. + +_`.req.global.binary`: Provide a global binary lock. (This is required +to protect the data structure allowing multiple arenas to coordinate +handling of protection faults: see +design.mps.thread-safety.sol.global.mutable_.) + +.. _design.mps.thread-safety.sol.global.mutable: thread-safety#.sol.global.mutable + +_`.req.global.recursive`: Provide a global recursive lock. (This is +required to protect protocol class initialization: see +design.mps.thread-safety.sol.global.once_.) + +.. _design.mps.thread-safety.sol.global.once: thread-safety#.sol.global.once + +_`.req.deadlock.not`: There is no requirement to provide protection +against deadlock. (Clients are able to avoid deadlock using +traditional strategies such as ordering of locks; see +design.mps.thread-safety.sol.deadlock_.) + +.. _design.mps.thread-safety.sol.deadlock: thread-safety#.sol.deadlock + + +Interface +--------- + +``typedef LockStruct *Lock`` + +An opaque type representing a lock. Clients that needs to allocate +space for a lock should dynamically allocate space for the structure, +calling ``LockSize()`` to determine the size. + +``size_t LockSize(void)`` + +Return the size of a ``LockStruct`` for allocation purposes. + +``void LockInit(Lock lock)`` + +Initialize the lock. This must be called before any use of the lock. +After initialization, the lock is not owned by any thread. + +``void LockFinish(Lock lock)`` + +Finish the lock. The lock must not be owned by any thread. + +``void LockClaim(Lock lock)`` + +Wait, if necessary, until the lock is not owned by any thread. Then +claim ownership of the lock by the current thread. + +``void LockRelease(Lock lock)`` + +Releases ownership of a lock that is currently owned. + +``void LockClaimRecursive(Lock lock)`` + +Remembers the previous state of the lock with respect to the current +thread and claims the lock (if not already held). + +``void LockReleaseRecursive(Lock lock)`` + +Restores the previous state of the lock remembered by the +corresponding ``LockClaimRecursive()`` call. + +``Bool LockIsHeld(Lock lock)`` + +Return true if the lock is held by any thread, false otherwise. Note +that this function need not be thread-safe (see `.req.held`_). + +``void LockInitGlobal(void)`` + +Initialize (or re-initialize) the global locks. This should only be +called in the following circumstances: the first time either of the +global locks is claimed; and in the child process after a ``fork()``. +See design.mps.thread-safety.sol.fork.lock_. + +.. _design.mps.thread-safety.sol.fork.lock: thread-safety#.sol.fork.lock + +``void LockClaimGlobal(void)`` + +Claims ownership of the binary global lock which was previously not +held by current thread. + +``void LockReleaseGlobal(void)`` + +Releases ownership of the binary global lock that is currently owned. + +``void LockClaimGlobalRecursive(void)`` + +Remembers the previous state of the recursive global lock with respect +to the current thread and claims the lock (if not already held). + +``void LockReleaseGlobalRecursive(void)`` + +Restores the previous state of the recursive global lock remembered by +the corresponding ``LockClaimGlobalRecursive()`` call. + +``void LockSetup(void)`` + +One-time initialization function, intended for calling +``pthread_atfork()`` on the appropriate platforms: see design.mps.thread-safety.sol.fork.lock_. + + +Implementation +-------------- + +_`.impl.recursive`: For recursive claims, the list of previous states +can be implemented by keeping a count of the number of claims made by +the current thread so far. In the multi-threaded implementations this +is handled by the operating system interface, but a count is still +kept and used to check correctness. + +_`.impl.recursive.limit`: The implementation imposes a limit on the +number of recursive claims (see issue.lock-claim-limit_). On Windows, +the critical section object contains the field ``LONG +RecursionCount``. In typical POSIX Threads implementations, +``pthread_mutex_t`` uses an ``int`` for the count of recursive claims. + +.. _issue.lock-claim-limit: https://info.ravenbrook.com/project/mps/import/2001-09-27/mminfo/issue/lock-claim-limit + +_`.impl.global`: The binary and recursive global locks are typically +implemented using the same mechanism as normal locks. (But an +operating system-specific mechanism is used, if possible, to ensure +that the global locks are initialized just once.) + +_`.impl.an`: Single-threaded generic implementation ``lockan.c``: + +- single-threaded; +- no need for locking; +- locking structure contains count; +- provides checking in debug version; +- otherwise does nothing except keep count of claims. + +_`.impl.w3`: Windows implementation ``lockw3.c``: + +- supports Windows threads; +- uses critical section objects [cso]_; +- locking structure contains a critical section object; +- recursive and non-recursive calls use the same Windows function; +- also performs checking. + +_`.impl.ix`: POSIX implementation ``lockix.c``: + +- supports [POSIXThreads]_; +- locking structure contains a mutex, initialized to check for + recursive locking; +- locking structure contains a count of the number of active claims; +- non-recursive locking calls ``pthread_mutex_lock()`` and expects + success; +- recursive locking calls ``pthread_mutex_lock()`` and expects either + success or ``EDEADLK`` (indicating a recursive claim); +- also performs checking. + + +Example +------- + +_`.example.init`: An example of allocating and initializing a lock:: + + #include "lock.h" + + static Lock lock; + + void init() + { + mps_addr_t p; + if (mps_alloc(&p, pool, LockSize()) != MPS_RES_OK) + exit(1); + lock = p; + LockInit(lock); + } + +_`.example.binary`: An example of using a binary lock:: + + void binaryUse() + { + /* lock must not be owned by this thread, or else this deadlocks. */ + LockClaim(lock); + /* lock is now owned by this thread. */ + /* cannot call binaryUse() at this point. */ + /* only one thread at a time may be at this point. */ + LockRelease(lock); + /* lock not owned by this thread. */ + } + +_`.example.recursive`: An example of using a recursive lock:: + + void recursiveUse() + { + /* lock may or may not already be owned by this thread. */ + LockClaimRecursive(lock); + /* lock is now owned by this thread. */ + /* cannot call binaryUse() at this point. */ + /* can call recursiveUse() at this point. */ + /* only one thread at a time may be at this point. */ + LockReleaseRecursive(lock); + /* lock is still owned by this thread if it was before. */ + } + + +References +---------- + +.. [cso] + Microsoft Developer Network; + "Critical Section Objects"; + + +.. [POSIXThreads] + The Open Group; + "The Single UNIX Specification, Version 2---Threads"; + + + +Document History +---------------- + +- 1995-11-21 David Moore. Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-14 GDR_ Converted to reStructuredText. + +- 2014-10-21 GDR_ Brought up to date. + +- 2018-06-14 GDR_ Added ``LockInitGlobal()``. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/locus.txt b/mps/design/locus.txt new file mode 100644 index 00000000000..7ab02c427b6 --- /dev/null +++ b/mps/design/locus.txt @@ -0,0 +1,726 @@ +.. mode: -*- rst -*- + +Locus manager +============= + +:Tag: design.mps.locus +:Author: Gavin Matthews +:Date: 1998-02-27 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: locus manager; design + + +Introduction +------------ + +_`.intro`: The locus manager coordinates between the pools and takes +the burden of having to be clever about tract/group placement away +from the pools, preserving trace differentiability and contiguity +where appropriate. + +_`.source`: `mail.gavinm.1998-02-05.17-52`_, +`mail.ptw.1998-02-05.19-53`_, `mail.pekka.1998-02-09.13-58`_, and +`mail.gavinm.1998-02-09.14-05`_. + +.. _mail.gavinm.1998-02-05.17-52: https://info.ravenbrook.com/project/mps/mail/1998/02/05/17-52/0.txt +.. _mail.ptw.1998-02-05.19-53: https://info.ravenbrook.com/project/mps/mail/1998/02/05/19-53/0.txt +.. _mail.pekka.1998-02-09.13-58: https://info.ravenbrook.com/project/mps/mail/1998/02/09/13-58/0.txt +.. _mail.gavinm.1998-02-09.14-05: https://info.ravenbrook.com/project/mps/mail/1998/02/09/14-05/0.txt + +_`.readership`: Any MPS developer. + + +Overview +-------- + +The MPS manages three main resources: + +#. storage; +#. address space; +#. time. + +The locus manager manages address space at the arena level. + +.. note:: + + Tucker was right: see `mail.ptw.1998-11-02.14-25`_. Richard + Kistruck, 2007-04-24. + + .. _mail.ptw.1998-11-02.14-25: https://info.ravenbrook.com/project/mps/mail/1998/11/02/14-25/0.txt + +When a pool wants some address space, it expresses some preferences to +the locus manager. The locus manager and the arena (working together) +try to honour these preferences, and decide what address space the +pool gets. + +Preferences are expressed by the ``LocusPref`` argument to +``SegAlloc()``. Note that, when they call ``SegAlloc()``, pools are +asking for address space and writeable storage simultaneously, in a +single call. There is currently no way for pools to reserve address +space without requesting storage. + + +Why is it important to manage address space? +............................................ + +#. Trace differentiability + + Carefully chosen addresses are used by reference tracing systems + (ie. automatic pools), to categorise objects into clumps; and to + summarise and cheaply find references between clumps. + + Different clumps will become worth collecting at different times + (the classic example, of course, is generations in a generational + collector). For these partial collections to be efficient, it must + be cheap to keep these clumps differentiable, cheap to condemn + (Whiten) a particular clump, and cheap to find a good conservative + approximation to all inward references to a clump (both initially + to construct the Grey set, and to make scanning the Grey set + efficient). + + This is what the MPS zone mechanism is all about. + + The locus manager manages the mapping from clumps to zones. + + To specify a clump, pools can pass ``LocusPrefZONESET`` and a set + of zones to ``LocusPrefExpress()``. + +#. Prevent address space fragmentation (within the arena) + + Address space is not infinite. + + In some use cases, the MPS is required to remain efficient when + using very nearly all available address space and storage. For + example, with the client-arena class, where the only address space + available is that of the storage available. + + Even with the VM arena class, typical storage sizes (as of 2007) + can make 32-bit address space constrained: the client may need + several gigabytes, which leaves little spare address space. + + Address space fragmentation incurs failure when there is no way to + allocate a big block of address space. The big block may be + requested via the MPS (by the client), or by something else in the + same process, such as a third-party graphics library, image + library, etc. + + Address space fragmentation incurs cost when: + + - desired large-block requests (such as for buffering) are denied, + causing them to be re-requested as a smaller block, or as several + smaller blocks; + + - possible operating-system costs in maintaining a fragmented + mapping? + +#. Prevent storage fragmentation (within tracts and segments) + + Storage is not infinite: it is allocated in multiples of a + fixed-size tract. Small lonely objects, each retaining a whole + tract, cause storage fragmentation. + + Non-moving pools manage this fragmentation with placement + strategies that use: + + - co-located death (in space and time); + - segment merging and splitting. + + These pool-level strategies always care about contiguity of object + storage. They also often care about the *ordering* of addresses, + because pool code uses an address-ordered search when choosing + where to place a new object. For these two reasons, the address + chosen (by the locus manager and arena) for new tracts is + important. + + Certain specialised pools, and/or some client programs that use + them, have carefully tuned segment sizes, positioning, and search + order. Be careful: seemingly inconsequential changes can + catastrophically break this tuning. + + Pools can specify a preference for High and Low ends of address + space, which implies a search-order. Pools could also specify + clumping, using ``LocusPrefZONESET``. + + +Discovering the layout +...................... + +The locus manager is not given advance notice of how much address +space will be required with what preferences. Instead, the locus +manager starts with an empty layout, and adapts it as more requests +come in over time. It is attempting to discover a suitable layout by +successive refinement. This is ambitious. + + +Definitions +----------- + +_`.note.cohort`: We use the word "cohort" in its usual sense here, but +we're particularly interested in cohorts that have properties relevant +to tract placement. It is such cohorts that the pools will try to +organize using the services of the locus manager. Typical properties +would be trace differentiability or (en masse) death-time +predictability. Typical cohorts would be instances of a +non-generational pool, or generations of a collection strategy. + +_`.def.trace.differentiability`: Objects (and hence tracts) that are +collected, may or may not have "trace differentiability" from each +other, depending on their placement in the different zones. Objects +(or pointers to them) can also have trace differentiability (or not) +from non-pointers in ambiguous references; in practice, we will be +worried about low integers, that may appear to be in zones 0 or -1. + + +Requirements +------------ + +_`.req.cohort`: Tract allocations must specify the cohort they +allocate in. These kind of cohorts will be called loci, and they will +have such attributes as are implied by the other requirements. +Critical. + +_`.req.counter.objects`: As a counter-requirement, pools are expected +to manage objects. Objects the size of a tract allocation request +(segment-sized) are exceptional. Critical. +_`.req.counter.objects.just`: This means the locus manager is not +meant to solve the problems of allocating large objects, and it isn't +required to know what goes on in pools. + +_`.req.contiguity`: Must support a high level of contiguity within +cohorts when requested. This means minimizing the number of times a +cohort is made aware of discontiguity. Essential (as we've effectively +renegotiated this in SW, down to a vague hope that certain critical +cohorts are not too badly fragmented). _`.req.contiguity.just`: TSBA. + +_`.req.contiguity.specific`: It should be possible to request another +allocation next to a specific tract on either side (or an extension in +that direction, as the case may be). Such a request can fail, if +there's no space there. Nice. It would also be nice to have one for +"next to the largest free block". + +_`.req.differentiable`: Must support the trace differentiability of +segments that may be condemned separately. Due to the limited number +of zones, it must be possible to place several cohorts into the same +zone. Essential. + +_`.req.differentiable.integer`: It must be possible to place +collectable allocations so that they are trace-differentiable from +small integers. Essential. + +_`.req.disjoint`: Must support the disjointness of pages that have +different VM properties (such as mutable/immutable, +read-only/read-write, and different lifetimes). Optional. + +.. note:: + + I expect the implementation will simply work at page or larger + granularity, so the problem will not arise, but Tucker insisted on + stating this as a requirement. Pekka P. Pirinen, 1998-10-28. + +_`.req.low-memory`: The architecture of the locus manager must not +prevent the design of efficient applications that often use all +available memory. Critical. _`.req.low-memory.expl`: This basically +says it must be designed to perform well in low-memory conditions, but +that there can be configurations where it doesn't do as well, as long +as this is documented for the application programmer. Note that it +doesn't say all applications are efficient, only that if you manage to +design an otherwise efficient application, the locus manager will not +sink it. + +_`.req.address`: Must conserve address space in VM arenas to a +reasonable extent. Critical. + +_`.req.inter-pool`: Must support the association of sets of tracts in +different pools into one cohort. Nice. + +_`.req.ep-style`: Must support the existing EP-style of allocation +whereby allocation is from one end of address space either upwards or +downwards (or a close approximation thereto with the same behavior). +_`.req.ep-style.just`: We cannot risk disrupting a policy with +well-known properties when this technology is introduced. + +_`.req.attributes`: There should be a way to inform the locus manager +about various attributes of cohorts that might be useful for +placement: deathtime, expected total size, and so on. Optional. It's a +given that the cohorts must then have these attributes, within the +limits set in the contract of the appropriate interface. +_`.req.attributes.action`: The locus manager should use the attributes +to guide its placement decisions. Nice. + +_`.req.blacklisting`: There should be a way of maintaining at least +one blacklist for pages (or some other small unit), that can +not/should not be allocated to collectable pools. Optional. + +.. note:: + + How to do blacklist breaking for ambiguous refs? + +_`.req.hysteresis`: There should be a way to indicate which cohorts +fluctuate in size and by how much, to guide the arena hysteresis to +hold on to suitable pages. Optional. + + +Analysis +-------- + +_`.analysis.sw`: Almost any placement policy would be an improvement on +the current SW one. + +_`.analysis.cause-and-effect`: The locus manager doesn't usually need to +know *why* things need to be differentiable, disjoint, contiguous, and +so on. Abstracting the reason away from the interface makes it more +generic, more likely to have serendipitous new uses. Attributes +described by a quantity (deathtime, size, etc.) are an exception to +this, because we can't devise a common measure. + +_`.analysis.stable`: The strategy must be stable: it must avoid repeated +recomputation, especially the kind that switches between alternatives +with a short period (repeated "bites" out the same region or +flip-flopping between two regions). + +_`.analysis.fragmentation`: There's some call to avoid fragmentation in +cohorts that don't need strict contiguity, but this is not a separate +requirement, since fragmentation is a global condition, and can only +be ameliorated if there's a global strategy that clumps allocations +together. + +_`.analysis.deathtime`: Cohorts with good death-time clumping of their +objects could use some locality of tract allocation, because it +increases the chances of creating large holes in the address space +(for other allocation to use). OTOH. many cohorts will not do multiple +frees in short succession, or at least cannot reasonably be predicted +to do so. This locality is not contiguity, nor is it low +fragmentation, it's just the requirement to place the new tracts next +to the tract where the last object was allocated in the cohort. Note +that the placement of objects is under the control of the pool, and +the locus manager will not know it, therefore this requirement should +be pursued by requesting allocation next to a particular tract (which +we already have a requirement for). + +_`.analysis.asymmetrical`: The strategy has to be asymmetrical with +respect to cohorts growing and shrinking. The reason of this asymmetry +is that it can choose where to grow, but it cannot choose where to +shrink (except in a small way by growing with good locality). + + +Interface +--------- + +_`.interface.locus`: A cohort will typically reside on multiple tracts +(and the pools will avoid putting objects of other cohorts on them), +so there should be an interface to describe the properties of the +cohort, and associate each allocation request with the cohort. We +shall call such an object, created to represent a cohort, a locus (pl. +loci). + +_`.interface.locus.pool`: Loci will usually be created by the pool +that uses it. Some of the locus attributes will be inherited from +client-specified pool attributes [this means there will be additional +pool attributes]. + +_`.interface.detail`: This describes interface in overview; for +details, see implementation section and code, or user doc. + + +Loci +.... + +``Res LocusCreate(Locus *locusReturn, LocusAttrs attrs, ZoneGroup zg, LocusAllocDesc adesc)`` + +_`.function.create`: A function to create a locus: ``adesc`` contains +the information about the allocation sequences in the locus, ``zg`` is +used for zone differentiability, and ``attrs`` encodes the following: + +- _`.locus.contiguity`: A locus can be contiguous. This means + performing as required in `.req.contiguity`_, non-contiguous + allocations can be freely placed anywhere (but efficiency dictates + that similar allocations are placed close together and apart from + others). + +- _`.locus.blacklist`: Allocations in the locus will avoid blacklisted + pages (for collectable segments). + +- _`.locus.zero`: Allocations in the locus are zero-filled. + +.. note:: + + Other attributes will be added, I'm sure. + +_`.interface.zone-group`: The locus can be made a member of a zone +group. Passing ``ZoneGroupNONE`` means it's not a member of any group +(allocations will be placed without regard to zone, except to keep +them out of stripes likely to be needed for some group). + +.. note:: + + I propose no mechanism for managing zone groups at this time, + since it's only used internally for one purpose. Pekka P. Pirinen, + 2000-01-17. + +_`.interface.size`: An allocation descriptor (``LocusAllocDesc``) +contains various descriptions of how the locus will develop over time +(inconsistent specifications are forbidden, of course): + +- _`.interface.size.typical-alloc`: Size of a typical allocation in + this locus, in bytes. This will mainly affect the grouping of + non-contiguous loci. + +- _`.interface.size.large-alloc`: Typical large allocation that the + manager should try to allow for (this allows some relief from + `.req.counter.objects`_), in bytes. This will mainly affect the size + of gaps that will be allotted adjoining this locus. + +- _`.interface.size.direction`: Direction of growth: up/down/none. + Only useful if the locus is contiguous. + +- _`.interface.size.lifetime`: Some measure of the lifetime of tracts + (not objects) in the cohort. + + .. note:: + + Don't know the details yet, probably only useful for placing + similar cohorts next to each other, so the details don't + actually matter. Pekka P. Pirinen, 2000-01-17. + +- _`.interface.size.deathtime`: Some measure of the deathtime of + tracts (not objects) in the cohort. + + .. note:: + + Ditto. Pekka P. Pirinen, 2000-01-17. + +_`.function.init`: ``LocusInit()`` is like ``LocusCreate()``, but +without the allocation. This is the usual interface, since most loci +are embedded in a pool or something. + +_`.function.alloc`: ``ArenaAlloc()`` to take a locus argument. +``ArenaAllocHere()`` is like it, plus it takes a tract and a +specification to place the new allocation immediately above/below a +given tract; if that is not possible, it returns ``ResFAIL`` (this +will make it useful for reallocation functionality). + +``void ArenaSetTotalLoci(Arena arena, Size nLoci, Size nZoneGroups)`` + +_`.function.set-total`: A function to tell the arena the expected +number of (non-miscible client) loci, and of zone groups. + + +Peaks +..... + +``mps_res_t mps_peak_create(mps_peak_t*, mps_arena_t)`` + +_`.function.peak.create`: A function to create a peak. A newly-created +peak is open, and will not be used to guide the strategy of the locus +manager. + +``mps_res_t mps_peak_describe_pool(mps_peak_t, mps_pool_t, mps_size_desc_t)`` + +_`.function.peak.add`: A function to add a description of the state of +one pool into the peak. Calling this function again for the same peak and pool instance will replace +the earlier description. + +_`.function.peak.add.size`: The size descriptor contains a total size +in bytes or percent of arena size. + +.. note:: + + Is this right? Pekka P. Pirinen, 2000-01-17. + +_`.function.peak.add.remove`: Specifying a ``NULL`` size will remove +the pool from the peak. The client is not allowed to destroy a pool +that is mentioned in any peak; it must be first removed from the peak, +or the peak must be destroyed. This is to ensure that the client +adjusts the peaks in a manner that makes sense to the application; the +locus manager can't know how to do that. + +``mps_res_t mps_peak_close(mps_peak_t)`` + +_`.function.peak.close`: A function to indicate that all the +significant pools have been added to the peak, and it can now be used +to guide the locus manager. For any pool not described in the peak, +the locus manager will take its current size at any given moment as +the best prediction of its size at the peak. + +_`.function.peak.close.after`: It is legal to add more descriptions to +the peak after closing, but this will reopen the peak, and it will +have to be closed before the locus manager will use it again. The +locus manager uses the previous closed state of the peak, while this +is going on. + +``void mps_peak_destroy(mps_peak_t)`` + +_`.function.peak.destroy`: A function to destroy a peak. + +_`.interface.ep-style`: This satisfies `.req.ep-style`_ by allowing SW +to specify zero size for most pools (which will cause them to be place +next to other loci with the same growth direction). + +.. note:: + + Not sure this is good enough, but we'll try it first. Pekka P. + Pirinen, 2000-01-17. + + +Architecture +------------ + +Data objects +............ + +_`.arch.locus`: To represent the cohorts, we have locus objects. +Usually a locus is embedded in a pool instance, but generations are +separate loci. + +_`.arch.locus.attr`: contiguity, blacklist, zg, current region, @@@@ + +_`.arch.locus.attr.exceptional`: The client can define a typical large +allocation for the locus. Requests substantially larger than that are +deemed exceptional. + +_`.arch.zone-group`: To satisfy `.req.differentiable`_, we offer zone +groups. Each locus can be a member of a zone group, and the locus +manager will attempt to place allocations in this locus in different +zones from all the other zone groups. A zone-group is represented as +@@@@. + +_`.arch.page-table`: A page table is maintained by the arena, as usual +to track association between tracts, pools and segments, and mapping +status for VM arenas. + +_`.arch.region`: All of the address space is divided into disjoint +regions, represented by region objects. These objects store their +current limits, and high and low watermarks of currently allocated +tracts (we hope there's usually a gap of empty space between regions). +The limits are actually quite porous and flexible. + +_`.arch.region.assoc`: Each region is associated with one contiguous +locus or any number of non-contiguous loci (or none). We call the +first kind of region "contiguous". _`.arch.locus.assoc`: Each locus +remembers all regions where it has tracts currently, excepting the +badly-placed allocations (see below). It is not our intention that any +locus would have very many, or that loci that share regions would have +any reason to stop doing do. + +_`.arch.region.more`: Various quantities used by the placement +computation are also stored in the regions and the loci. Regions are +created (and destroyed) by the placement recomputation. Regions are +located in stripes (if it's a zoned region), but they can extend into +neighboring stripes if an exceptionally large tract allocation is +requested (to allow for large objects). + +_`.arch.chunk`: Arenas may allocate more address space in additional +chunks, which may be disjoint from the existing chunks. Inter-chunk +space will be represented by dummy regions. There are also sentinel +regions at both ends of the address space. See +design.mps.arena.chunk_. + +.. _design.mps.arena.chunk: arena#.chunk + + +Overview of strategy +.................... + +_`.arch.strategy.delay`: The general strategy is to delay placement +decisions until they have to be made, but no later. + +_`.arch.strategy.delay.until`: Hence, the locus manager only makes +placement decisions when an allocation is requested (frees and other +operations might set a flag to cause the next allocation to redecide). +This also allows the client to change the peak and pool configuration +in complicated ways without causing a lot of recomputation, by doing +all the changes without allocating in the middle (unless the control +pool needs more space because of the changes). + +_`.arch.strategy.normal`: While we want the placement to be +sophisticated, we do not believe it is worth the effort to consider +all the data at each allocation. Hence, allocations are usually just +placed in one of the regions used previously (see `.arch.alloc`_) +without reconsidering the issues. + +_`.arch.strategy.normal.limit`: However, the manager sets +precautionary limits on the regions to ensure that the placement +decisions are revisited when an irrevocable placement is about to be +made. + +_`.arch.strategy.create`: The manager doesn't create new regions until +they are needed for allocation (but it might compute where they could +be placed to accommodate a peak). + + +Allocation +.......... + +_`.arch.alloc`: Normally, each allocation to a locus is placed in its +current region. New regions are only sought when necessary to fulfill +an allocation request or when there is reason to think the situation +has changed significantly (see `.arch.significant`_). + +_`.arch.alloc.same`: An allocation is first attempted next to the +previous allocation in the same locus, respecting growth direction. If +that is not possible, a good place in the current region is sought. +_`.arch.alloc.same.hole`: At the moment, for finding a good place +within a region, we just use the current algorithm, limited to the +region. In future, the placement within regions will be more clever. + +_`.arch.alloc.extend`: If there's no adequate hole in the current +region and the request is not exceptional, the neighboring regions are +examined to see if the region could be extended at one border. (This +will basically only be done if the neighbor has shrunk since the last +placement recomputation, because the limit was set on sophisticated +criteria, and should not be changed without justification.) +_`.arch.alloc.extend.here`: When an allocation is requested next to a +specific tract (``ArenaAllocHere()``), we try to extend a little +harder (at least for ``change_size``, perhaps not for locality). + +_`.arch.alloc.other`: If no way can be found to allocate in the +current region, other regions used for this locus are considered in +the same way, to see if space can be found there. [Or probably look at +other regions before trying to extend anything?] + +_`.arch.alloc.recompute`: When no region of this locus has enough +space for the request, or when otherwise required, region placement is +recomputed to find a new region for the request (which might be the +same region, after extension). + +_`.arch.alloc.current`: This region where the allocation was placed +then becomes the current region for this locus, except when the +request was exceptional, or when the region chosen was "bad" (see +@@@@). + +_`.arch.significant`: Significant changes to the parameters affecting +placement are deemed to have happened at certain client calls and when +the total allocation has changed substantially since the last +recomputation. Such conditions set a flag that causes the next +allocation to recompute even if its current region is not full +(possibly second-guess the decision to recompute after some +investigation of the current state?). + + +Deallocation +............ + +_`.arch.free`: Deallocation simply updates the counters in the region +and the locus. For some loci, it will make the region of the +deallocation the current region. _`.arch.free.remove`: If a region +becomes entirely empty, it is deleted (and the neighbors limits might +be adjusted). + +.. note:: + + This is quite tricky to get right. + + +Region placement recomputation +.............................. + +_`.arch.gap`: When doing placement computations, we view the arena as +a sequence of alternating region cores and gaps (which can be small, +even zero-sized). Initially, we'll take the core of a region to be the +area between the high and low watermark, but in the future we might be +more flexible about that. + +.. note:: + + Edge determination is actually a worthwhile direction to explore. + +_`.arch.reach`: The gap between two cores could potentially end up +being allocated to either region, if they grow in that direction, or +one or neither, if they don't. The set of states that the region +assignment could reach by assigning the gaps to their neighbors is +called the reach of the current configuration. + +_`.arch.placement.object`: The object of the recomputation is to find +a configuration of regions that is not too far from the current +configuration and that keeps all the peaks inside its reach; if that +is not possible, keep the nearest ones in the reach and then minimize +the total distance from the rest. + +_`.arch.placement.hypothetical`: The configurations that are +considered will include hypothetical placements for new regions for +loci that cannot fit in their existing regions at the peak. This is +necessary to avoid choosing a bad alternative. + +_`.arch.placement.interesting`: The computation will only consider new +regions of loci that are deemed interesting, that is, far from their +peak state. This will reduce the computational burden and avoid +jittering near a peak. + +.. note:: + + Details missing. + + +Implementation +-------------- + +[missing] + + +Notes +----- + +_`.idea.change`: Even after the first segment, be prepared to change +your mind, if by the second segment a lot of new loci have been +created. + +_`.distance`: If the current state is far from a peak, there's time to +reassign regions and for free space to appear (in fact, under the +steady arena assumption, enough free space *will* appear). + +_`.clear-pool`: Need to have a function to deallocate all objects in a +pool, so that ``PoolDestroy()`` won't have to be used for that +purpose. + + +Document History +---------------- + +- 1998-02-27 Gavin Matthews. Incomplete design. Originally written as + part of change.dylan.box-turtle.170569. Much developed since. + +- 1998-10-28 Pekka P. Pirinen. Wrote the real requirements after some + discussion. + +- 1998-12-15 Pekka P. Pirinen. Deleted Gavin's design and wrote a new one. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2007-04-24 Richard Kistruck. Added Guide: Manage arena address + space, why, discover layout. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/message-gc.txt b/mps/design/message-gc.txt new file mode 100644 index 00000000000..33b8dafc8b4 --- /dev/null +++ b/mps/design/message-gc.txt @@ -0,0 +1,343 @@ +.. mode: -*- rst -*- + +GC messages +=========== + +:Tag: design.mps.config +:Author: Richard Kistruck +:Date: 2008-12-19 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: garbage collection messages; design + + +Introduction +------------ + +_`.intro`: This document describes the design of the MPS garbage +collection messages. For a guide to the MPS message system in general, +see design.mps.message. + +_`.readership`: Any MPS developer. + + +Overview +-------- + +The MPS provides two types of GC messages: + +- ``mps_message_type_gc_start()``; +- ``mps_message_type_gc()``. + +They are called "trace start" and "trace end" messages in this +document and in most MPS source code. + + +Introduction +------------ + +The MPS posts a trace start message (``mps_message_type_gc_start()``) +near the start of every trace (but after calculating the condemned +set, so we can report how large it is). + +The MPS posts a trace end message (``mps_message_type_gc()``) near the +end of every trace. + +These messages are extremely flexible: they can hold arbitrary +additional data simply by writing new accessor functions. If there is +more data to report at either of these two events, then there is a +good argument for adding it into these existing messages. + +.. note:: + + In previous versions of this design document, there was a partial + unimplemented design for an ``mps_message_type_gc_generation()`` + message. This would not have been a good design, because managing + and collating multiple messages is much more complex for both MPS + and client than using a single message. Richard Kistruck, + 2008-12-19. + + +Purpose +------- + +_`.purpose`: The purpose of these messages is to allow the client +program to be aware of GC activity, in order to: + +- adjust its own behaviour programmatically; +- show or report GC activity in a custom way, such as an in-client + display, in a log file, etc. + +The main message content should be intelligible and helpful to +client-developers (with help from MPS staff if necessary). There may +be extra content that is only meaningful to MPS staff, to help us +diagnose client problems. + +While there is some overlap with the Diagnostic Feedback system +(design.mps.diag_) and the Telemetry system (design.mps.telemetry_), the +main contrasts are that these GC messages are present in release +builds, are stable from release to release, and are designed to be +parsed by the client program. + +.. _design.mps.telemetry: telemetry +.. _design.mps.diag: diag + + +Names and parts +--------------- + +Here's a helpful list of the names used in the GC message system: + +Implementation is mostly in the source file ``traceanc.c`` (trace +ancillary). + +============================= ============================== ====================== +Internal name "trace start" "trace end" +Internal type ``TraceStartMessage`` ``TraceMessage`` +``ArenaStruct`` member ``tsMessage[]`` ``tMessage`` +Message type ``MessageTypeGCSTART`` ``MessageTypeGC`` +External name ``mps_message_type_gc_start`` ``mps_message_type_gc`` +============================= ============================== ====================== + +.. note:: + + The names of these messages are unconventional; they should + properly be called "gc (or trace) *begin*" and "gc (or trace) + *end*". But it's much too late to change them now. Richard + Kistruck, 2008-12-15. + +Collectively, the trace-start and trace-end messages are called the +"trace id messages", and they are managed by the functions +``TraceIdMessagesCheck()``, ``TraceIdMessagesCreate()``, and ``TraceIdMessagesDestroy()``. + +The currently supported message-field accessor methods are: +``mps_message_gc_start_why()``, ``mps_message_gc_live_size()``, +``mps_message_gc_condemned_size()``, and +``mps_message_gc_not_condemned_size()``. These are documented in the +Reference Manual. + + +Lifecycle +--------- + +_`.lifecycle`: for each trace id, pre-allocate a pair of start/end +messages by calling ``ControlAlloc()``. Then, when a trace runs using +that trace id, fill in and post these messages. As soon as the trace +has posted both messages, immediately pre-allocate a new pair of +messages, which wait in readiness for the next trace to use that +trace id. + + +Requirements +............ + +_`.req.no-start-alloc`: Should avoid attempting to allocate memory at +trace start time. _`.req.no-start-alloc.why`: There may be no free +memory at trace start time. Client would still like to hear about +collections in those circumstances. + +_`.req.queue`: Must support a client that enables, but does not +promptly retrieve, GC messages. Messages that have not yet been +retrieved must remain queued, and the client must be able to retrieve +them later without loss. It is not acceptable to stop issuing GC +messages for subsequent collections merely because messages from +previous collections have not yet been retrieved. _`.req.queue.why`: +This is because there is simply no reasonable way for a client to +guarantee that it always promptly collects GC messages. + +_`.req.match`: Start and end messages should always match up: never +post one of the messages but fail to post the matching one. + +_`.req.match.why`: This makes client code much simpler -- it does not +have to handle mismatching messages. + +_`.req.errors-not-direct`: Errors (such as a ``ControlAlloc()`` +failure) cannot be reported directly to the client, because +collections often happen automatically, without an explicit client +call to the MPS interface. + +_`.req.multi-trace`: Up to ``TraceLIMIT`` traces may be running, and +emitting start/end messages, simultaneously. + +_`.req.early`: Nice to tell client as much as possible about the +collection in the start message, if we can. + +_`.req.similar`: Start and end messages are conceptually similar -- it +is quite okay, and may be helpful to the client, for the same datum +(for example: the reason why the collection occurred) to be present in +both the start and end message. + + +Storage +....... + +For each trace-id (`.req.multi-trace`_) a pair (`.req.match`_) of +start/end messages is dynamically allocated (`.req.queue`_) in advance +(`.req.no-start-alloc`_). Messages are allocated in the control pool +using ``ControlAlloc()``. + +.. note:: + + Previous implementations of the trace start message used static + allocation. This does not satisfy `.req.queue`_. See also + job001570_. Richard Kistruck, 2008-12-15. + + .. _job001570: https://www.ravenbrook.com/project/mps/issue/job001570/ + +Pointers to these messages are stored in ``tsMessage[ti]`` and +``tMessage[ti]`` arrays in the ``ArenaStruct``. + +.. note:: + + We must not> keep the pre-allocated messages, or pointers to them, + in ``TraceStruct``: the memory for these structures is statically + allocated, but the values in them are re-initialised by + ``TraceCreate()`` each time the trace id is used, so the + ``TraceStruct()`` is invalid (that is: to be treated as random + uninitialised memory) when not being used by a trace. See also + job001989_. Richard Kistruck, 2008-12-15. + + .. _job001989: https://www.ravenbrook.com/project/mps/issue/job001989/ + + +Creating and Posting +.................... + +In ``ArenaCreate()`` we use ``TRACE_SET_ITER`` to initialise the +``tsMessage[ti]`` and ``tMessage[ti]`` pointers to ``NULL``, and then +(when the control pool is ready) ``TRACE_SET_ITER`` calling +``TraceIdMessagesCreate()``. This performs the initial pre-allocation +of the trace start/end messages for each trace id. Allocation failure +is not tolerated here: it makes ``ArenaCreate()`` fail with an error +code, because the arena is deemed to be unreasonably small. + +When a trace is running using trace id ``ti``, it finds a +pre-allocated message via ``tsMessage[ti]`` or ``tMessage[ti]`` in the +``ArenaStruct()``, fills in and posts the message, and nulls-out the +pointer. (If the pointer was null, no message is sent; see below.) The +message is now reachable only from the arena message queue (but the +control pool also knows about it). + +When the trace completes, it calls ``TraceIdMessagesCreate()`` for its +trace id. This performs the ongoing pre-allocation of the trace +start/end messages for the next use of this trace id. The expectation +is that, after a trace has completed, some memory will have been +reclaimed, and the ``ControlAlloc()`` will succeed. + +But allocation failure here is permitted: if it happens, both the +start and the end messages are freed (if present). This means that, +for the next collection using this trace id, neither a start nor an +end message will be sent (`.req.match`_). There is no direct way to +report this failure to the client (`.req.errors-not-direct`_), so we +just increment the ``droppedMessages`` counter in the ``ArenaStruct``. +This counter is available via the ``MessagesDropped`` telemetry event. + + +Getting and discarding +...................... + +If the client has not enabled that message type, the message is +discarded immediately when posted, calling ``ControlFree()`` and +reclaiming the memory. + +If the client has enabled but never gets the message, it remains on +the message queue until ``ArenaDestroy()``. Theoretically these +messages could accumulate forever until they exhaust memory. This is +intentional: the client should not enable a message type and then +never get it! + +Otherwise, when the client gets a message, it is dropped from the +arena message queue: now only the client (and the control pool) hold +references to it. The client must call ``mps_message_discard()`` once +it has finished using the message. This calls ``ControlFree()`` and +reclaims the memory. + +If the client simply drops its reference, the memory will not be +reclaimed until ``ArenaDestroy()``. This is intentional: the control +pool is not garbage-collected. + + +Final clearup +............. + +Final clearup is performed at ``ArenaDestroy()``, as follows: + +- Unused and unsent pre-allocated messages (one per trace id) are + freed with ``TRACE_SET_ITER`` calling ``TraceIdMessagesDestroy()`` + which calls the message Delete functions (and thereby + ``ControlFree()``) on anything left in ``tsMessage[ti]`` and + ``tMessage[ti]``. + +- Unretrieved messages are freed by emptying the arena message queue + with ``MessageEmpty()``. + +- Retrieved but undiscarded messages are freed by destroying the + control pool. + + +Testing +------- + +The main test is "``zmess.c``". See notes there. + +Various other tests, including ``amcss.c``, also collect and report +``mps_message_type_gc()`` and ``mps_message_type_gc_start()``. + + +Coverage +........ + +Current tests do not check: + +- The less common why-codes (reasons why a trace starts). These should + be added to ``zmess.c``. + + +Document History +---------------- + +- 2003-02-17 David Jones. Created. + +- 2006-12-07 Richard Kistruck. Remove + ``mps_message_type_gc_generation()`` (not implemented). + +- 2007-02-08 Richard Kistruck. Move historically interesting + requirements for ``mps_message_type_gc_generation()`` (not + implemented) to the end of the doc. + +- 2008-12-19 Richard Kistruck. Complete re-write, for new GC message + lifecycle. See job001989. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/message.txt b/mps/design/message.txt new file mode 100644 index 00000000000..2e93e695165 --- /dev/null +++ b/mps/design/message.txt @@ -0,0 +1,430 @@ +.. mode: -*- rst -*- + +Client message protocol +======================= + +:Tag: design.mps.message +:Author: David Jones +:Date: 1997-02-13 +:Status: complete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: messages; design + single: client message protocol + + +Introduction +------------ + +_`.intro`: The client message protocol provides a means by which +clients can receive messages from the MPS. The motivating use case is +finalization notification (see design.mps.finalize_), but the +mechanism is also used for feedback about collections. + +.. _design.mps.finalize: finalize + +_`.contents`: This document describes the design of the external and +internal interfaces and concludes with a sketch of an example design +of an internal client. The example is that of implementing +finalization using the MRG pool. + +_`.readership`: Any MPS developer. + + +Requirements +------------ + +_`.req.synchronous`: The message protocol must be synchronous with the +client program: that is, the client program must be able to choose +when to collect and act on messages. Justification: [Boehm_2002]_ +shows that asynchronous finalization is impossible to implement +correctly. + +_`.req.reliable`: Posting a message must be reliable: that is, it must +not fail for a dynamic reason such as running out memory to store the +message. Justification: messages can't be used to implement +finalization unless the messages can be delivered reliably. + +_`.req.extensible.types`: The message mechanism must be extensible +with new types of message in future versions of the MPS, without +breaking client programs that do not receive those types of message. + +_`.req.resources`: It follows from `.req.extensible.types`_ that +messages must not use resources unless the client program has +requested them (otherwise resources would leak in client programs that +have not been updated to handle new types of message). + +_`.req.extensible.fields`: It must be possible to add new fields to +existing types of message in future versions of the MPS, without +breaking client programs that do not receive those types of message. + + +Design +------ + +_`.sol.synchronous`: Messages are stored on a ring belonging to the +arena. An interface is provided that allows the client program to +collect messages from the ring at a time of its choosing. + +_`.sol.reliable`: The memory needed for the message is allocated at an +earlier point in time, when it possible to communicate an allocation +failure via a result code. In particular, space for a finalization +message is allocated when the client program calls ``mps_finalize()``, +and space for trace messages is allocated in the arena (there can be +at most one instance of each message per trace, and the maximum number +of traces is known statically). + +_`.sol.resources`: Messages are not posted unless they belong to a +type that has been enabled by the client program calling +``mps_message_enable()``. This means that message types that are not +understood by the client program are not posted and use no resources. + +_`.sol.extensible.fields`: Message fields are retrieved by calling +accessor functions. + + +External interface +------------------ + + +Functions +......... + +_`.if.fun`: The following functions are provided: + +_`.if.fun.poll`: ``mps_message_poll()`` sees whether there are any +messages pending. Returns 1 only if there is a message on the queue of +arena. Returns 0 otherwise. + +_`.if.fun.enable`: ``mps_message_type_enable()`` enables the flow of +messages of a certain type. The queue of messages of a arena will +contain only messages whose types have been enabled. Initially all +message types are disabled. Effectively this function allows the +client to declare to the MPS what message types the client +understands. + +_`.if.fun.disable`: ``mps_message_type_disable()`` disables the flow +of messages of a certain type. The antidote to +``mps_message_type_enable()``. Disables the specified message type. +Flushes any existing messages of that type on the queue, and stops any +further generation of messages of that type. This permits clients to +dynamically decline interest in a message type, which may help to +avoid a memory leak or bloated queue when the messages are only +required temporarily. + +_`.if.fun.get`: ``mps_message_get()`` begins a message "transaction". +If there is a message of the specified type on the queue then the +first such message will be removed from the queue and a handle to it +will be returned to the client via the ``messageReturn`` argument; in +this case the function will return ``TRUE``. Otherwise it will return +``FALSE``. Having obtained a handle on a message in this way, the +client can use the type-specific accessors to find out about the +message. When the client is done with the message the client should +call ``mps_message_discard()``; failure to do so will result in a +resource leak. + +_`.if.fun.discard`: ``mps_message_discard()`` ends a message +"transaction". It indicates to the MPS that the client is done with +this message and its resources may be reclaimed. + +_`.if.fun.type.any`: ``mps_message_queue_type()`` determines the type +of a message in the queue. Returns ``TRUE`` only if there is a message +on the queue of arena, and in this case updates the ``typeReturn`` +argument to be the type of a message in the queue. Otherwise returns +``FALSE``. + +_`.if.fun.type`: ``mps_message_type()`` determines the type of a +message (that has already been got). Only legal when inside a message +transaction (that is, after ``mps_message_get()`` and before +``mps_message_discard()``). Note that the type will be the same as the +type that the client passed in the call to ``mps_message_get()``. + + +Types of messages +................. + +_`.type`: The type governs the "shape" and meaning of the message. + +_`.type.int`: A message type is an integer belonging to the +``MessageType`` enumeration. + +_`.type.semantics`: A type indicates the semantics of the message. + +_`.type.semantics.interpret`: The semantics of a message are +interpreted by the client by calling various accessor methods on the +message. + +_`.type.accessor`: The type of a message governs which accessor +methods are legal to apply to the message. + +_`.type.finalization`: There is a finalization type, +``MessageTypeFINALIZATION``. + +_`.type.finalization.semantics`: A finalization message indicates that +an object has been discovered to be finalizable (see +design.mps.poolmrg.def.final.object_ for a definition of finalizable). + +.. _design.mps.poolmrg.def.final.object: poolmrg#.def.final.object + +_`.type.finalization.ref`: The accessor function +``mps_message_finalization_ref()`` retrieves the reference to the +object which is finalizable. + +_`.type.finalization.ref.scan`: Note that the reference returned +must be stored in scanned memory. + + + +Internal interface +------------------ + +Types +..... + +``typedef struct MessageStruct *Message`` + +_`.message.type`: ``Message`` is the type of messages. + +_`.message.instance`: Messages are instances of Message Classes. + +_`.message.concrete`: Concretely a message is represented by a +``MessageStruct``. A ``MessageStruct`` has the usual signature field +(see design.mps.sig_). A ``MessageStruct`` has a type field which +defines its type, a ring node, which is used to attach the message to +the queue of pending messages, a class field, which identifies a +``MessageClass`` object. + +.. _design.mps.sig: sig + +_`.message.intent`: The intention is that a ``MessageStruct`` will be +embedded in some richer object which contains information relevant to +that specific type of message. + +_`.message.struct`: The structure is declared as follows:: + + typedef struct mps_message_s { + Sig sig; /* */ + Arena arena; /* owning arena */ + MessageClass klass; /* Message Class Structure */ + Clock postedClock; /* mps_clock() at post time, or 0 */ + RingStruct queueRing; /* Message queue ring */ + } MessageStruct; + +``typedef struct MessageClassStruct *MessageClass`` + +_`.class`: A message class is an encapsulation of methods. It +encapsulates methods that are applicable to all types of messages +(generic) and methods that are applicable to messages only of a +certain type (type-specific). + +_`.class.concrete`: Concretely a message class is represented by a +``MessageClassStruct`` (a struct). Clients of the Message module are +expected to allocate storage for and initialise the +``MessageClassStruct``. It is expected that such storage will be +allocated and initialised statically. + +_`.class.one-type`: A message class implements exactly one message +type. The identifier for this type is stored in the ``type`` field of +the ``MessageClassStruct``. Note that the converse is not true: a +single message type may be implemented by two (or more) different +message classes (for example: for two pool classes that require +different implementations for that message type). + +_`.class.methods.generic`: The generic methods are as follows: + +* ``delete`` -- used when the message is destroyed (by the client + calling ``mps_message_discard()``). The class implementation should + finish the message (by calling ``MessageFinish()``) and storage for + the message should be reclaimed (if applicable). + +_`.class.methods.specific`: The type specific methods are: + +_`.class.methods.specific.finalization`: Specific to +``MessageTypeFINALIZATION``: + +* ``finalizationRef`` -- returns a reference to the finalizable object + represented by this message. + +_`.class.methods.specific.gc`: Specific to ``MessageTypeGC``: + +* ``gcLiveSize`` -- returns the number of bytes (of objects) that were + condemned by the trace but survived. + +* ``gcCondemnedSize`` -- returns the number of bytes condemned by the + trace. + +* ``gcNotCondemnedSize`` -- returns the number of bytes (of + objects) that are collectable but were not condemned by the trace. + +_`.class.methods.specific.gcstart`: Specific to ``MessageTypeGCSTART``: + +* ``gcStartWhy`` -- returns an English-language description of the + reason why the trace was started. + +_`.class.sig.double`: The ``MessageClassStruct`` has a signature field +at both ends. This is so that if the ``MessageClassStruct`` changes +size (by adding extra methods for example) then any static +initializers will generate errors from the compiler (there will be a +type error causes by initialising a non-signature type field with a +signature) unless the static initializers are changed as well. + +_`.class.struct`: The structure is declared as follows:: + + typedef struct MessageClassStruct { + Sig sig; /* */ + const char *name; /* Human readable Class name */ + + MessageType type; /* Message Type */ + + /* generic methods */ + MessageDeleteMethod delete; /* terminates a message */ + + /* methods specific to MessageTypeFINALIZATION */ + MessageFinalizationRefMethod finalizationRef; + + /* methods specific to MessageTypeGC */ + MessageGCLiveSizeMethod gcLiveSize; + MessageGCCondemnedSizeMethod gcCondemnedSize; + MessageGCNotCondemnedSizeMethod gcNotCondemnedSize; + + /* methods specific to MessageTypeGCSTART */ + MessageGCStartWhyMethod gcStartWhy; + + Sig endSig; /* */ + } MessageClassStruct; + +_`.space.queue`: The arena structure is augmented with a structure for +managing for queue of pending messages. This is a ring in the +``ArenaStruct``:: + + struct ArenaStruct + { + ... + RingStruct messageRing; + ... + } + + +Functions +......... + +``void MessageInit(Arena arena, Message message, MessageClass klass, MessageType type)`` + +_`.fun.init`: Initializes the ``MessageStruct`` pointed to by +``message``. The caller of this function is expected to manage the +store for the ``MessageStruct``. + +``void MessageFinish(Message message)`` + +_`.fun.finish`: Finishes the ``MessageStruct`` pointed to by +``message``. The caller of this function is expected to manage the +store for the ``MessageStruct``. + +``void MessagePost(Arena arena, Message message)`` + +_`.fun.post`: Places a message on the queue of an arena. + +_`.fun.post.precondition`: Prior to calling the function, the +``queueRing`` field of the message must be a singleton +(design.mps.ring.def.singleton_). After the call to the function the +message will be available for MPS client to access. After the call to +the function the message fields must not be manipulated except from +the message's class's method functions (that is, you mustn't poke +about with the ``queueRing`` field in particular). + +.. _design.mps.ring.def.singleton: ring#.def.singleton + +``void MessageEmpty(Arena arena)`` + +_`.fun.empty`: Empties the message queue. This function has the same +effect as discarding all the messages on the queue. After calling this +function there will be no messages on the queue. + +_`.fun.empty.internal-only`: This functionality is not exposed to +clients. We might want to expose this functionality to our clients in +the future. + + +Message life cycle +------------------ + +_`.life.alloc`: Space for the message structure is allocated at the +earliest point in time when the MPS knows that the message might be +needed. + +_`.life.init`: The message structure is initialized by calling +``MessageInit()``. + +_`.life.post`: The message is posted on the arena's message queue by +calling ``MessagePost()``. + +_`.life.get`: The client program retrieves the message by calling ``mps_message_get()``. + +_`.life.discard`: The client program indicates that it is finished +with the message by calling ``mps_message_discard()``. + +_`.life.reuse`: The MPS may reuse the message structure, in which case +the lifecycle continues from `.life.post`_. + +_`.life.delete`: When the MPS no longer needs the message structure, +its ``delete`` method is called. + + +References +---------- + +.. [Boehm_2002] Hans-J. Boehm. 2002. "`Destructors, Finalizers, and + Synchronization + `_". HP + Labs technical report HPL-2002-335. + + +Document History +---------------- + +- 1997-02-13 David Jones. incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2006-10-25 Richard Kistruck. Created guide. + +- 2006-12-11 Richard Kistruck. More on lifecycle; unmention evil hack + in initial design. + +- 2008-12-19 Richard Kistruck. Simplify and clarify lifecycle. Remove + description of and deprecate re-use of messages. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/monitor.png b/mps/design/monitor.png new file mode 100644 index 00000000000..09e68cabaa7 Binary files /dev/null and b/mps/design/monitor.png differ diff --git a/mps/design/monitor.txt b/mps/design/monitor.txt new file mode 100644 index 00000000000..0d4bbeee76c --- /dev/null +++ b/mps/design/monitor.txt @@ -0,0 +1,192 @@ +.. mode: -*- rst -*- + +Monitor +======= + +:Tag: design.mps.monitor +:Author: Gareth Rees +:Date: 2018-09-15 +:Status: draft design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: monitor; design + + +Introduction +------------ + +_`.intro`: This is the design of the MPS monitor, a graphical user +interface for inspecting the behaviour of the MPS in a client program +by collating the program's telemetry output. + +_`.readership`: This document is intended for any MPS user. + +_`.source`: This is based on [GDR_2018-06-27]_. + + +Requirements +------------ + +It should be possible to analyze the behaviour of the +MPS in a client program: + +_`.req.state.running`: that is currently running (job003960_); or + +.. _job003960: https://www.ravenbrook.com/project/mps/issue/job003960/ + +_`.req.state.stopped`: that has finished running. + +It should be possible to see: + +_`.req.memory.total`: the total memory in use by the client program +[GR_2004-12-02]_; + +_`.req.memory.pool`: the memory in use by each pool (job003960_); + +_`.req.trace`: when traces take place (job003960_); + +_`.req.trace.generation`: which generations get collected by each +trace (job003960_); + +_`.req.time-fraction`: the fraction of runtime spent in collections; + +_`.req.barriers`: the rate of barrier hits, to indicate how the barriers +are working (job003921_). + +.. _job003921: https://www.ravenbrook.com/project/mps/issue/job003921/ + + + +Installation and usage +---------------------- + +These are placeholder instructions, to be revised when we figure out +the best way to automate them. + +#. Build the ``mpseventpy`` program:: + + cd code + nmake /f w3i6mv.nmk VARIETY=cool mpseventpy.exe # Windows + make -f xci6ll.gmk VARIETY=cool mpseventpy # macOS + make -f lii6ll.gmk VARIETY=cool mpseventpy # Linux + +#. Run ``mpseventpy`` program and redirect the output to ``tool/mpsevent.py``:: + + w3i6mv/cool/mpseventpy.exe > ../tool/mpsevent.py # Windows + xci6ll/cool/mpseventpy > ../tool/mpsevent.py # macOS + lii6ll/cool/mpseventpy > ../tool/mpsevent.py # Linux + +#. Install Python 3.6 (or later). On Windows, there are installers_ + named like ``python-3.6.6-amd64.exe``. On other platforms, you + probably want to use your package manager, for example:: + + sudo port install python36 # macPorts + sudo apt install python3.6 # Linux + + .. _installers: https://www.python.org/ftp/python/3.6.5/python-3.6.5-amd64.exe + +#. On Windows, you’ll want to edit the system environment variables to + put Python 3.6 on the path. + +#. Install Matplotlib and PyQt5. On Windows, the easiest way to do + this is to launch a command prompt (possibly as administrator, if + you installed Python somewhere like ``C:/Program Files``) and + then:: + + python -m ensurepip + python -m pip install matplotlib pyqt5 + + On other platforms, you’ll want to use the package manager, for example:: + + sudo port install py36-matplotlib py36-pyqt5 # macPorts + sudo apt install python3-matplotlib python3-pyqt5 # Linux + +#. Now, from the ``tool`` subdirectory, you should be able to run the + monitor:: + + cd tool + ./monitor [FILENAME] + + where FILENAME defaults to mpsio.log. So for example, you could + compile the ``amcss`` smoke test:: + + cd code + nmake /f w3i6mv.nmk VARIETY=cool amcss.exe # Windows + make -f xci6ll.gmk VARIETY=cool amcss # macOS + make -f lli6ll.gmk VARIETY=cool amcss # Linux + + and then run ``amcss`` generating telemetry output:: + + cd tool + MPS_TELEMETRY_FILENAME=mpsio.log MPS_TELEMETRY_CONTROL="arena pool user” ../code/w3i6mv/cool/amcss.exe > /dev/null # Windows + MPS_TELEMETRY_FILENAME=mpsio.log MPS_TELEMETRY_CONTROL="arena pool user" ../code/xci6ll/cool/amcss > /dev/null # macOS + MPS_TELEMETRY_FILENAME=mpsio.log MPS_TELEMETRY_CONTROL="arena pool user" ../code/lli6ll/cool/amcss > /dev/null # Linux + + and then launch the monitor on the file you just created:: + + cd tool + ./monitor + + which should show you something like this (the exact graphs will + depend on the random choices made by ``amcss``): + + .. figure:: monitor.png + :align: center + :alt: Screenshot of the MPS monitor showing a run of the amcss smoke test. + +#. The monitor is capable of monitoring an application in real-time. + The pause button on the toolbar pauses the updating of the display + (but not the application). The zoom and pan tools automatically + pause the updating too, so after zooming you’ll need to unpause in + order to resume updating the display. + + +References +---------- + +.. [GDR_2018-06-27] + "Setting up and running the monitor"; Gareth Rees; + Ravenbrook Limited; 2018-06-27; + . + +.. [GR_2004-12-02] + "RE: MPS, working set, and address space"; Göran Rydqvist; + Configura Sverige AB; 2004-12-02; + . + + +Document History +---------------- + +- 2018-09-14 GDR_ Created based on [GDR_2018-06-27]_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2018–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/nailboard-1.svg b/mps/design/nailboard-1.svg new file mode 100644 index 00000000000..cb260e2f614 --- /dev/null +++ b/mps/design/nailboard-1.svg @@ -0,0 +1,567 @@ + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + + + + + base + limit + + + + ibase + ilimit + + diff --git a/mps/design/nailboard-2.svg b/mps/design/nailboard-2.svg new file mode 100644 index 00000000000..feb69c25cdd --- /dev/null +++ b/mps/design/nailboard-2.svg @@ -0,0 +1,564 @@ + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + + + + base + limit + + + + ibase + ilimit + + diff --git a/mps/design/nailboard-3.svg b/mps/design/nailboard-3.svg new file mode 100644 index 00000000000..6d53a805071 --- /dev/null +++ b/mps/design/nailboard-3.svg @@ -0,0 +1,628 @@ + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + 0 + 0 + + + + + + ibase + ilimit + + + + + leftsplinter + rightsplinter + + + + base + limit + + diff --git a/mps/design/nailboard.txt b/mps/design/nailboard.txt new file mode 100644 index 00000000000..60b402f6fe4 --- /dev/null +++ b/mps/design/nailboard.txt @@ -0,0 +1,240 @@ +.. mode: -*- rst -*- + +Nailboards for ambiguously referenced segments +============================================== + +:Tag: design.mps.nailboard +:Author: Gareth Rees +:Date: 2014-01-15 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: nailboard; design + + +Introduction +------------ + +_`.intro`: This is the design of the nailboard module. + +_`.readership`: Any MPS developer. + +_`.overview`: A nailboard represents a set of addresses to which +ambiguous references have been found. It is implemented as a +specialized bit table that maps addresses within a range to *nails*. +The mapping has granularity, so that all addresses within a word, say, +will map to the same nail. + +_`.purpose`: Nailboards are used by the AMC pool class to record +ambiguous references to grains within a segment. See +design.mps.poolamc.nailboard_. + +.. _design.mps.poolamc.nailboard: poolamc#.nailboard + + +Requirements +------------ + +_`.req.granularity`: A nailboard must be able to set nails for +addresses down to the grain size of the segment. (Because individual +objects may be this small, and we must be able to preserve or reclaim +individual objects.) + +_`.req.set`: A nailboard must be able to set a nail corresponding to +any aligned address in the range covered. (Because ambiguous +references may have arbitrary values.) + +_`.req.reset.not`: A nailboard is *not* required to be able to reset a +nail. (Because resetting a nail would correspond to proving that there +is *no* ambiguous reference to that address, but that can only be +established when the trace is complete.) + +_`.req.range`: A nailboard must be able to determine if any nail is +set in a contiguous range. (Because we must preserve the whole object +if there is any ambiguous reference to it.) + +_`.req.range.cost`: Determining if any nail is set in a continuous +range must be cheap. That is, it must take time that is no more than +logarithmic in the size of the range. (Because scanning overhead must +be proportional to the number of objects, not to their size.) + + +Implementation +-------------- + +_`.impl.table`: The nailboard consists of a header structure and one +or more bit tables. Each bit table covers the whole range of +addresses, but at a different level of detail. + +_`.impl.table.level0`: The level 0 bit table has one bit for each +aligned address in the range. + +_`.impl.align`: The alignment of the nailboard need not be the same as +the pool alignment. This is because nailboards are per-segment, and +the pool may know the minimum size of an object in a particular +segment. + +_`.impl.table.k`: The level *k* bit table has one bit for each ``scale`` +bits in the level *k*\−1 bit table (this bit is set if any bit in the +corresponding word in the level *k*\−1 table is set). + +_`.impl.scale`: Here ``scale`` is an arbitrary scale factor that must +be a power of 2. It could in future be supplied as a parameter when +creating a nailboard, but in the current implementation it is always +``MPS_WORD_WIDTH``. + +_`.impl.table.last`: The last bit table is always shorter than one +word. This is slightly wasteful in some cases (for example, a +nailboard with 64 nails and ``scale`` 64 will have two levels, the +second level having just one bit), but allows the code to support +small nailboards without special cases in the code (consider the case +of a nailboard with just one nail). + +_`.impl.size`: The size of the level *i* bit table is the ceiling of + + (``limit`` − ``base``) / (``align`` × ``scale``\ :superscript:`i`\ ) + +where ``base`` and ``limit`` are the bounds of the address range being +represented in the nailboard and ``align`` is the alignment. + +_`.impl.address`: The address *a* may be looked up in the level *i* +bit table at the bit + + (*a* − ``base``) / (``align`` × ``scale``\ :superscript:`i`\ ) + +and since ``align`` and ``scale`` are powers of 2, that's + + (*a* − ``base``) >> (log\ :subscript:`2`\ ``align`` + *i* log\ :subscript:`2`\ ``scale``) + +_`.impl.set`: Setting a nail for an address *a* in a nailboard is on +the critical path: it is called for every fix of an ambiguous +reference to an address in an AMC pool. When setting a nail, we set +the corresponding bit in every level of the nailboard. + +_`.impl.isresrange`: Testing a range of addresses to see if any nails +are set is also on the critical path: it is called for every object in +any AMC segment with a nailboard when the segment is scanned and when +it is reclaimed. + +_`.impl.isresrange.strategy`: The strategy for testing to see if any +nails are set in a range is to handle the cases that are expected to +be common first. In particular, we expect that there will only be few +nails in a nailboard, so most calls to ``NailboardIsResRange()`` will +return ``TRUE``. + +_`.impl.isresrange.alignment`: When testing a range against a level of +a nailboard, the base and limit of the range will typically not align +exactly to the bits of that level. Therefore we test against a +slightly larger range, as shown in the diagram: + +.. figure:: nailboard-1.svg + :align: center + :alt: Diagram: Testing a range against a level of a nailboard. + + Testing a range against a level of a nailboard. + +_`.impl.isresrange.empty`: If all bits in the range [``ibase``, +``ilimit``) are reset, as shown above, then there are no nails in the +range of addresses [``base``, ``limit``). This provides an early exit +with result ``TRUE``. + +_`.impl.isresrange.level0`: If the "empty" early exit is not taken, +and we are looking at the level 0 bit table, then the range is not +empty. This provides an early exit with result ``FALSE``. + +_`.impl.isresrange.inner`: If any bit in the range [``ibase``\+1, +``ilimit``\−1) is set, as shown below, then there is a nail in the +range of addresses [``base``, ``limit``). This provides an early exit +with result ``FALSE``. + +.. figure:: nailboard-2.svg + :align: center + :alt: Diagram: a nail is set in this range. + + A nail is set in this range. + +_`.impl.isresrange.splinter`: If none of the three early exits is +taken, then we are in a situation like the one shown below, with one +or two *splinters*. In this situation we know that there is a nail, +but it is not clear whether the nail is inside the splinter or not. We +handle this situation by moving up to the previous level and looking +at the range of addresses covered by the splinter. + +.. figure:: nailboard-3.svg + :align: center + :alt: Diagram: it is not clear if a nail is set in this range. + + It is not clear if a nail is set in this range. + +_`.impl.isresrange.splinter.recurse`: When looking at a splinter, we +might reach the same situation: namely, that the interior of the +splinter is empty, but the edge of the splinter is set. We handle this +by reducing the size of the splinter and moving up to the previous +level. + +_`.impl.isresrange.splinter.one-sided`: This splinter-of-a-splinter is +one-sided: that is, we don't need to look at the right splinter of a +left splinter or vice versa, because we know that it is empty. + + +Future +------ + +_`.future.tune`: The implementation makes heavy use of +``BTIsResRange()``, but this function is not well tuned for scanning +small arrays (which we expect to be the common case for nailboards). +Performance might be improved by special-casing the small levels. + +_`.future.limit`: In C and C++, a pointer to "one past the last +element of an array object" (the limit of the object in our +terminology) is a valid pointer and can be used in pointer arithmetic. +See §6.5.6.8–9 of [C1999a]_. So in theory a programmer could have such +a pointer as the only reference keeping an object alive, and still +expect to be able to subtract from it to get back to the object. The +current nailboard implementation does not support this use case. + + +References +---------- + +.. [C1999a] + International Standard ISO/IEC 9899:1999; + "Programming languages — C"; + + + +Document History +---------------- + +- 2014-01-15 GDR_ Initial draft. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/object-debug.txt b/mps/design/object-debug.txt new file mode 100644 index 00000000000..c062a7e2461 --- /dev/null +++ b/mps/design/object-debug.txt @@ -0,0 +1,457 @@ +.. mode: -*- rst -*- + +Debugging features for client objects +===================================== + +:Tag: design.mps.object-debug +:Author: Pekka P. Pirinen +:Date: 1998-09-10 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: debugging; design + + +Introduction +------------ + +_`.intro`: This is the design for all the various debugging features +that MPS clients (and sometimes MPS developers) can use to discover +what is happening to their objects and the memory space. + +_`.readership`: MPS developers. + + +Overview +-------- + +_`.over.fenceposts`: In its current state, this document mostly talks +about fenceposts, straying a little into tagging where theses features +have an effect on each other. + +.. note:: + + There exist other documents that list other required features, and + propose interfaces and implementations. These will eventually be + folded into this one. Pekka P. Pirinen, 1998-09-10. + + +Requirements +------------ + +_`.req.fencepost`: Try to detect overwrites and underwrites of +allocated blocks by adding fenceposts (source req.product.??? VC++, +req.epcore.fun.debug.support). [TODO: Locate the relevant product +requirement. RB 2023-02-23] + +_`.req.fencepost.size`: The fenceposts should be at least 4 bytes on +either side or 8 bytes if on one side only, with an adjustable content +(although VC++ only has 4 bytes with pattern 0xFDFDFDFD, having +unwisely combined the implementation with other debug features). + +_`.req.fencepost.check`: There should be a function to check all the +fenceposts (source req.epcore.fun.debug.support). + +_`.req.free-block`: Try to detect attempts to write and read free +blocks. + +_`.req.walk`: There should be a way to map ("walk") a user function +over all allocated objects (except PS VM objects), possibly only in a +separate debugging variety/mode (source req.epcore.fun.debug.support). + +_`.req.tag`: There should be a way to store at least a word of user +data (a "tag", borrowing the SW term) with every object in debugging +mode, to be used in memory dumps (source req.product.??? VC++). +[TODO: Locate the relevant product requirement. RB 2023-02-23] + +_`.req.tag.walk`: The walking function (as required by `.req.walk`_) +should have access to this data (source req.epcore.fun.debug.support). + +_`.req.dump.aver`: It must be possible to perform a memory dump after +an ``AVER()`` has fired. Naturally, if the information required for +the dump has been corrupted, it will fail, as softly as possible +(source @@@@). + +_`.req.portable`: Client code that uses these features must be easily +portable to all the supported platforms. (Source: job003749_.) + +.. _job003749: https://www.ravenbrook.com/project/mps/issue/job003749/ + +.. note:: + + There are more requirements, especially about memory dumps and + allocation locations. Pekka P. Pirinen, 1998-09-10. + + +Solution ideas +-------------- + +_`.note.assumptions`: I've tried not to assume anything about the +coincidence of manual/automatic, formatted/unformatted, and +ap/mps_alloc. I think those questions deserve to be decided on their +own merits. instead of being constrained by a debug feature. + +_`.fence.content.repeat`: The content of a fencepost could be +specified as a byte/word which used repeatedly to fill the fencepost. + +_`.fence.content.template`: The content could be given as a template +which is of the right size and is simply copied onto the fencepost. + +_`.fence.content.template.repeat`: The content could be given as a +template which is copied repeatedly until the fencepost is full. (This +would avoid the need to specify different templates on different +architectures, and so help meet `.req.portable`_.) + +_`.fence.walk`: `.req.fencepost.check`_ requires the ability to find +all the allocated objects. In formatted pools, this is not a problem. +In unformatted pools, we could use the walker. It's a feasible +strategy to bet that any pool that might have to support fenceposting +will also have a walking requirement. + +_`.fence.tag`: Fenceposting also needs to keep track which objects +have fenceposts. unless we manage to do them all. It would be easiest +to put this in the tags. + +_`.fence.check.object`: A function to check the fenceposts on a given +object would be nice. + +_`.fence.ap`: AP's could support fenceposting transparently by having +a mode where ``mps_reserve()`` always goes out-of-line and fills in the +fenceposts (the pool's ``BufferFill()`` method isn't involved). This +would leave the MPS with more freedom of implementation, especially +when combined with some of the other ideas. We think doing a function +call for every allocation is not too bad for debugging. + +_`.fence.outside-ap`: We could also let the client insert their own +fenceposts outside the MPS allocation mechanism. Even if fenceposting +were done like this, we'd still want it to be an MPS feature, so we'd +offer sample C macros for adding the size of the fencepost and filling +in the fencepost pattern. Possibly something like this (while we could +still store the parameters in the pool or allocation point, there +seems little point in doing so in this case, and having them as +explicit parameters to the macros allows the client to specify +constants to gain efficiency):: + + #define mps_add_fencepost(size, fp_size) + #define mps_fill_fenceposts(obj, size, fp_size, fp_pattern) + +The client would need to supply their own fencepost checking function, +obviously, but again we could offer one that matches the sample +macros. + +_`.fence.tail-only`: In automatic pools, the presence of a fencepost +at the head of the allocated block results in the object reference +being an internal pointer. This means that the format or the pool +would need to know about fenceposting and convert between references +and pointers. This would slow down the critical path when fenceposting +is used. This can be ameliorated by putting a fencepost at the tail of +the block only: this obviates the internal pointer problem and could +provide almost the same degree of checking (provided the size was +twice as large), especially in copying pools, where there are normally +no gaps between allocated blocks. In addition to the inescapable +effects on allocation and freeing (including copying and reclaim +thereunder), only scanning would have to know about fenceposts. + +_`.fence.tail-only.under`: Walking over all the objects in the pool +would be necessary to detect underwrites, as one couldn't be sure that +there is a fencepost before any given object (or where it's located +exactly). If the pool were doing the checking, it could be sure: it +would know about alignments and it could put fenceposts in padding +objects (free blocks will have them because they were once allocated) +so there'd be one on either side of any object (except at the head of +a segment, which is not a major problem, and could be fixed by adding +a padding object at the beginning of every segment). This requires +some cleverness to avoid splinters smaller than the fencepost size, +but it can be done. + +_`.fence.wrapper`: On formatted pools, fenceposting could be +implemented by "wrapping" the client-supplied format at creation time. +The wrapper can handle the conversion from the fenceposted object and +back. This will be invisible to the client and gives the added benefit +that the wrapper can validate fenceposts on every format operation, +should it desire. That is, the pool would see the fenceposts as part +of the client object, but the client would only see its object; the +format wrapper would translate between the two. Note that hiding the +fenceposts from scan methods, which are required to take a contiguous +range of objects, is a bit complicated. + +_`.fence.client-format`: The MPS would supply such a wrapper, but +clients could also be allowed to write their own fenceposted formats +(provided they coordinate with allocation, see below). This would make +scanning fenceposted segments more efficient. + +_`.fence.wrapper.variable`: Furthermore, you could create different +classes of fencepost within a pool, because the fencepost itself could +have a variable format. For instance, you might choose to have the +fencepost be minimal (one to two words) for small objects, and more +detailed/complex for large objects (imagining that large objects are +likely vector-ish and subject to overruns). You could get really fancy +and have the fencepost class keyed to the object class (for example, +different allocation points create different classes of fenceposting). + +_`.fence.wrapper.alloc`: Even with a wrapped format, allocation and +freeing would still have know about the fenceposts. If allocation +points are used, either MPS-side (`.fence.ap`_) or client-side +(`.fence.outside-ap`_) fenceposting could be used, with the obvious +modifications. + +_`.fence.wrapper.alloc.format`: We could add three format methods, to +adjust the pointer and the size for alloc and free, to put down the +fenceposts during alloc, and to check them; to avoid slowing down all +allocation, this would require some MOPping to make the format class +affect the choice of the alloc and free methods (see +`mail.pekka.1998-06-11.18-18`_). + +.. _mail.pekka.1998-06-11.18-18: https://info.ravenbrook.com/project/mps/mail/1998/06/11/18-18/0.txt + +_`.fence.wrapper.alloc.size`: We could just communicate the size of +the fenceposts between the format and the allocation routines, but +then you couldn't use variable fenceposts +(`.fence.wrapper.variable`_). + +.. note:: + + All this applies to copying and reclaim in a straight-forward + manner, I think. + +_`.fence.pool.wrapper`: Pools can be wrapped as well. This could be a +natural way to represent/implement the fenceposting changes to the +Alloc and Free methods. [@@@@alignment] + +_`.fence.pool.new-class`: We could simply offer a debugging version of +each pool class (e.g., ``mps_pool_class_mv_debug()``). As we have seen, +debugging features have synergies which make it advantageous to have a +coordinated implementation, so splitting them up would not just +complicate the client interface, it would also be an implementation +problem; we can turn features on or off with pool init parameters. + +_`.fence.pool.abstract`: We could simply use pool init parameters only +to control all debugging features (optargs would be useful here). +While there might be subclasses and wrappers internally, the client +would only see a single pool class; in the internal view, this would +be an abstract class, and the parameters would determine which +concrete class actually gets instantiated. + +_`.tag.out-of-line`: It would be nice if tags were stored out-of-line, +so they can be used to study allocation patterns and fragmentation +behaviours. Such an implementation of tagging could also easily be +shared among several pools. + + +Architecture +------------ + +_`.pool`: The implementation is at the pool level, because pools +manage allocated objects. A lot of the code will be generic, +naturally, but the data structures and the control interfaces attach +to pools. In particular, clients will be able to use tagging and +fenceposting separately on each pool. + +_`.fence.size`: Having fenceposts of adjustable size and pattern is +useful. Restricting the size to an integral multiple of the [pool or +format?] alignment would simplify the implementation but breaks +`.req.portable`_. + +_`.fence.template`: We use templates (`.fence.content.template`_) to +fill in the fenceposts, but we do not give any guarantees about the +location of the fenceposts. This leaves us the opportunity to do +tail-only fenceposting, if we choose. + +_`.fence.slop`: [see impl.c.dbgpool.FenceAlloc @@@@] + +_`.fence.check.free`: We check the fenceposts when freeing an object. + +_`.unified-walk`: Combine the walking and tagging requirements +(`.req.tag.walk`_ and @@@@) into a generic facility for walking and +tagging objects with just one interface and one name: tagging. Also +combine the existing formatted object walker into this metaphor, but +allowing the format and tag parameters of the step function be +optional. + +.. note:: + + This part has not been implemented yet Pekka P. Pirinen, + 1998-09-10. + +_`.init`: It simplifies the implementation of both tagging and +fenceposting if they are always on, so that we don't have to keep +track of which objects have been fenceposted and which have not, and +don't have to have three kinds of tags: for user data, for +fenceposting, and for both. So we determine this at pool init time +(and let fenceposting turn on tagging, if necessary). + +_`.pool-parameters`: Fencepost templates and tag formats are passed in +as pool parameters. + +_`.modularity`: While a combined generic implementation of tags and +fenceposts is provided, it is structured so that each part of it could +be implemented by a pool-specific mechanism with a minimum of new +protocol. + +.. note:: + + This will be improved, when we figure out formatted pools -- they + don't need tags for fenceposting. + +_`.out-of-space`: If there's no room for tags, we just fail to +allocate the tag. We free the block allocated for the object and fail +the allocation, so that the client gets a chance to do whatever +low-memory actions they might want to do. + +This breaks the one-to-one relationship between tags and objects, so +some checks cannot be made, but we do count the "lost" tags. + +.. note:: + + Need to hash out how to do fenceposting in formatted pools. + + +Client interface +---------------- + +_`.interface.fenceposting.check`: +``mps_pool_check_fenceposts()`` is a function to check all +fenceposts in a pool (``AVER()`` if a problem is found) + +.. note:: + + From here on, these are tentative and incomplete. + +``mps_res_t mps_fmt_fencepost_wrap(mps_fmt_t *format_return, mps_arena_t arena, mps_fmt_t format, ...)`` + +_`.interface.fenceposting.format`: A function to wrap a format +(class) to provide fenceposting. + +``typedef void (*mps_fmt_adjust_fencepost_t)(size_t *size_io)`` + +_`.interface.fenceposting.adjust`: A format method to adjust size of a +block about to be allocated to allow for fenceposts. + +``typedef void (*mps_fmt_put_fencepost_t)(mps_addr_t * addr_io, size_t size)`` + +_`.interface.fenceposting.add`: A format method to add a fencepost +around a block about to be allocated. The ``NULL`` method adds a tail +fencepost. + +``typedef mps_bool_t (*mps_fmt_check_fenceposts_t)(mps_addr_t)`` + +_`.interface.fenceposting.checker`: A format method to check the +fenceposts around an object. The ``NULL`` method checks tails. + +``mps_res_t mps_alloc_dbg(mps_addr_t *, mps_pool_t, size_t, ...)`` +``mps_res_t mps_alloc_dbg_v(mps_addr_t *, mps_pool_t, size_t, va_list)`` + +_`.interface.tags.alloc`: Two functions to extend the existing +``mps_alloc()`` (request.???.??? proposes to remove the varargs) +[TODO: Locate the relevant Harlequin request. RB 2023-02-23] + +``typedef void (*mps_objects_step_t)(mps_addr_t addr, size_t size, mps_fmt_t format, mps_pool_t pool, void *tag_data, void *p)`` + +_`.interface.tags.walker.type`: Type of walker function for +``mps_pool_walk()`` and ``mps_arena_walk()``. + +_`.interface.tags.walker`: Functions to walk all the allocated +objects in an arena (only client pools in this case), +``format`` and ``tag_data`` can be ``NULL`` (``tag_data`` really wants +to be ``void *``, not ``mps_addr_t``, because it's stored +together with the internal tag data in an MPS internal pool) + + +Examples +-------- + +_`.example.debug-alloc`: :: + + #define MPS_ALLOC_DBG(res_io, addr_io, pool, size) + MPS_BEGIN + static mps_tag_A_s _ts = { __FILE__, __LINE__ }; + + *res_io = mps_alloc(addr_io, pool, size, _ts_) + MPS_END + + +Implementation +-------------- + +_`.new-pool`: The client interface to control fenceposting +consists of the new classes ``mps_pool_class_mv_debug()``, +``mps_pool_class_epdl_debug()``, and +``mps_pool_class_epdr_debug()``, and their new init parameter of +type ``mps_pool_debug_option_s``. + +.. note:: + + This is a temporary solution, to get it out without writing lots + of new interface. Pekka P. Pirinen, 1998-09-10. + +_`.new-pool.impl`: The debug pools are implemented using the "class +wrapper" ``EnsureDebugClass()``, which produces a subclass with +modified ``init``, ``finish``, ``alloc``, and ``free`` methods. These +methods are implemented in the generic debug class code +(``impl.c.dbgpool``), and are basically wrappers around the superclass +methods (invoked through the ``pool->class->super`` field). To find +the data stored in the class for the debugging features, they use the +``debugMixin`` method provided by the subclass. So to make a debug +subclass, three things should be provided: a structure definition of +the instance containing a ``PoolDebugMixinStruct``, a pool class +function that uses ``EnsureDebugClass()``, and a ``debugMixin`` method +that locates the ``PoolDebugMixinStruct`` within an instance. + +_`.tags.splay`: The tags are stored in a splay tree of tags +allocated from a subsidiary MFS pool. The client needs to specify the +(maximum) size of the client data in a tag, so that the pool can be +created. + +.. note:: + + Lots more should be said, eventually. Pekka P. Pirinen, + 1998-09-10. + + +Document History +---------------- + +- 1998-09-10 Pekka Pirinen The first draft merely records all the + various ideas about fenceposting that came up in discussions in + June, July and September 1998. This includes the format wrapping + idea from `mail.ptw.1998-06-19.21-13`_. + + .. _mail.ptw.1998-06-19.21-13: https://info.ravenbrook.com/project/mps/mail/1998/06/19/21-13/0.txt + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-14 GDR_ Converted to reStructuredText. + +- 2014-04-09 GDR_ Added newly discovered requirement `.req.portable`_. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/pool.txt b/mps/design/pool.txt new file mode 100644 index 00000000000..5dc7b02736f --- /dev/null +++ b/mps/design/pool.txt @@ -0,0 +1,249 @@ +.. mode: -*- rst -*- + +Pool classes +============ + +:Tag: design.mps.pool +:Author: Richard Brooksby +:Date: 1997-08-19 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: pool classes; design + + +Introduction +------------- + +_`.intro`: This document describes the interface and protocols between +the MPM and the pool classes. + + +Classes and structures +---------------------- + +_`.class`: Each pool belongs to a *pool class*. + +_`.class.name`: Each pool class has a short, pithy, cryptic name for +the pool class. It should start with ``"A"`` (for "automatic") if +memory is managed by the garbage collector, and ``"M"`` (for "manual") +if memory is managed by alloc/free. For example, "AMC", "MVFF". + +_`.class.protocol`: Pool classes use the *protocol* mechanisms (see +design.mps.protocol_) to implement class initialization and +inheritance. + +.. _design.mps.protocol: protocol + +_`.class.structure`: Each pool class has an associated *class +structure*, which is a C object of type ``PoolClass``. This is +initialized and accessed via the ``CLASS()`` macro, for example +``CLASS(MRGPool)`` initializes and accesses the class structure for +the MRG pool class. + +_`.struct.outer`: The *outer structure* of a pool belonging to the ABC +pool class is a C object of type ``ABCPoolStruct``, which is a typedef +for ``struct PoolABCStruct``. + +_`.struct.outer.sig`: See `design.mps.sig.field.end.outer`_. + +.. _design.mps.sig.field.end.outer: sig.txt#field-end-outer + +_`.struct.generic`: The *generic structure* of a pool is a C object of +type ``PoolStruct`` (found embedded in the outer structure), which is +a typedef for ``struct PoolStruct``. + + +Fields +------ + +_`.field`: These fields are provided by pool classes as part of the +``PoolClass`` object (see `.class.structure`_). They form part of the +interface which allows the MPM to treat pools in a uniform manner. + +_`.field.name`: The ``name`` field must be the pool class name +(`.class.name`_). + +_`.field.size`: The ``size`` field is the size of the pool instance +structure. For the ``PoolABC`` class this can reasonably be expected +to be ``sizeof(PoolABCStruct)``. + +_`.field.attr`: The ``attr`` field must be a bitset of pool class +attributes. See design.mps.type.attr_. + +.. _design.mps.type.attr: type#.attr + +_`.field.alignShift`: The ``alignShift`` field is the ``SizeLog2`` of +the pool's alignment. It is computed and initialised when a pool is +created. Mark-and-sweep pool classes use it to compute the number of +grains in a segment, which is the number of bits need in the segment's +mark and alloc bit tables. + +_`.field.format`: The ``format`` field is used to refer to the object +format. The object format is passed to the pool during pool creation. + + +Methods +------- + +_`.method`: These methods are provided by pool classes as part of the +``PoolClass`` object (see `.class.structure`_). They form part of the +interface which allows the MPM to treat pools in a uniform manner. + +_`.method.unused`: If a pool class is not required to provide a +certain method, the class should assign the appropriate ``PoolNo`` +method (which asserts) for that method to ensure that erroneous calls +are detected. It is not acceptable to use ``NULL``. + +_`.method.trivial`: If a pool class if required to provide a certain +method, but the class provides no special behaviour in this case, it +should assign the appropriate ``PoolTriv`` method, which does nothing. + +_`.method.inst`: Pool classes may implement the generic instance +methods (see design.mps.protocol.inst.method_). In particular: + +.. _design.mps.protocol.inst.method: inst#method + +- _`.method.inst.finish`: The ``finish`` method + (design.mps.protocol.inst.method.finish_) must finish the outer + structure and then call its superclass method via the + ``NextMethod()`` macro (thus calling ``PoolAbsFinish()`` which + finishes the generic structure). + + .. _design.mps.protocol.inst.method.finish: inst#method.finish + +- _`.method.inst.describe`: The ``describe`` method + (design.mps.protocol.inst.method.describe_) should print a + description of the pool. Each line should begin with two spaces. + Classes are not required to provide this method. + + .. _design.mps.protocol.inst.method.describe: inst#method.describe + +``typedef void (*PoolVarargsMethod)(ArgStruct args[], va_list varargs)`` + +_`.method.varargs`: The ``varargs`` field decodes the variable +arguments to the deprecated function ``mps_pool_create()`` and +converts them to a list of keyword arguments (see +design.mps.keyword-arguments_). + +.. _design.mps.keyword-arguments: keyword-arguments + +``typedef Res (*PoolInitMethod)(Pool pool, Arena arena, PoolClass klass, ArgList args)`` + +_`.method.init`: The ``init`` method must call its superclass method +via the ``NextMethod()`` macro (thus calling ``PoolAbsInit()`` which +initializes the generic structure), and then initialize the outer +structure. It is called via the generic function ``PoolInit()``. + +``typedef Res (*PoolAllocMethod)(Addr *pReturn, Pool pool, Size size)`` + +_`.method.alloc`: The ``alloc`` method manually allocates a block of +at least ``size`` bytes. It should update ``*pReturn`` with a pointer +to a fresh (that is, not overlapping with any other live object) +object of the required size. Failure to allocate must be indicated by +returning an appropriate error code, and in such a case, ``*pReturn`` +must not be updated. Pool classes are not required to provide this +method. It is called via the generic function ``PoolAlloc()``. + +_`.method.alloc.size.align`: A pool class may allow an unaligned +``size`` (rounding it up to the pool's alignment). + +``typedef void (*PoolFreeMethod)(Pool pool, Addr old, Size size)`` + +_`.method.free`: The ``free`` method manually frees a block. The +parameters are required to correspond to a previous allocation request +(possibly via a buffer, not necessarily via ``PoolAlloc()``). It is an +assertion by the client that the indicated object is no longer +required and the resources associated with it can be recycled. Pool +classes are not required to provide this method. It is called via the +generic function ``PoolFree()``. + +_`.method.free.size.align`: A pool class may allow an unaligned +``size`` (rounding it up to the pool's alignment). + +``typedef BufferClass (*PoolBufferClassMethod)(void)`` + +_`.method.bufferClass`: The ``bufferClass`` method returns the class +of buffers used by the pool. Pool classes are not required to provide +this method. It is called via the generic function +``PoolDefaultBufferClass()``. + +``typedef Res (*PoolBufferFillMethod)(Addr *baseReturn, Addr *limitReturn, Pool pool, Buffer buffer, Size size)`` + +_`.method.bufferFill`: The ``bufferFill`` method should allocate a +region of least ``size`` bytes of memory for attaching to ``buffer``. +The buffer is in the "reset" state (see design.mps.buffer.reset_). If +successful, it must update ``*baseReturn`` and ``*limitReturn`` to the +base and limit of the allocated region and return ``ResOK``. Otherwise +it must leave ``*baseReturn`` and ``*limitReturn`` unchanged and +return a non-OK result code. Pool classes are not required to provide +this method. This method is called by ``BufferFill()``. + +.. _design.mps.buffer.reset: buffer#.reset + +``typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer)`` + +_`.method.bufferEmpty`: The ``bufferEmpty`` method indicates that the +client program has finished with the unused part of the buffer (the +part between init and limit). The buffer is in the "ready" state (see +design.mps.buffer.ready_). This method must be provided if and only if +``bufferFill`` is provided. This method is called by the generic +function ``BufferDetach()``. + +.. _design.mps.buffer.ready: buffer#.ready + +``typedef Size (*PoolSizeMethod)(Pool pool)`` + +_`.method.totalSize`: The ``totalSize`` method must return the total +memory allocated from the arena and managed by the pool. This method +is called by the generic function ``PoolTotalSize()``. + +_`.method.freeSize`: The ``freeSize`` method must return the free +memory allocated from the arena and managed by the pool, but not in +use by the client program. This method is called by the generic +function ``PoolFreeSize()``. + + +Document history +---------------- + +- 1997-08-19 RB_ Initial draft. David Jones added comments about how + accurate this document is. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-12 GDR_ Converted to reStructuredText. + +- 2014-06-08 GDR_ Bring method descriptions up to date. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolamc.txt b/mps/design/poolamc.txt new file mode 100644 index 00000000000..8dd306a4352 --- /dev/null +++ b/mps/design/poolamc.txt @@ -0,0 +1,844 @@ +.. mode: -*- rst -*- + +AMC pool class +============== + +:Tag: design.mps.poolamc +:Author: Richard Brooksby +:Date: 1995-08-25 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: AMC pool class; design + single: pool class; AMC design + + +Guide Introduction +~~~~~~~~~~~~~~~~~~ + +.. The intro and readership tags were found to be duplicated by + changelist 182116 / commit e9841d23a but not referenced. But that + was just a consequence of two documents being smushed together by + RHSK in changelist 168424 / commit b0433b3e9: a guide and a design. + It would be good to sort that out. See also + . RB 2023-01-14 + +_`.guide.intro`: This document contains a guide (`.guide`_) to the MPS AMC +pool class, followed by the historical initial design +(`.initial-design`_). + +_`.guide.readership`: Any MPS developer. + + +Guide +~~~~~ + +_`.guide`: The AMC pool class is a general-purpose automatic +(collecting) pool class. It is intended for most client objects. AMC +is "Automatic, Mostly Copying": it preserves objects by copying, +except when an ambiguous reference 'nails' the object in place. It is +generational. Chain: specify capacity and mortality of generations 0 +to *N* − 1. Survivors from generation *N* − 1 get promoted into an +arena-wide "top" generation (often anachronistically called the +"dynamic" generation, which was the term on the Lisp Machine). + + +Segment states +-------------- + +_`.seg.state`: AMC segments are in one of three states: "mobile", +"boarded", or "stuck". + +_`.seg.state.mobile`: Segments are normally **mobile**: all objects on +the seg are un-nailed, and thus may be preserved by copying. + +_`.seg.state.boarded`: An ambiguous reference to any address within an +segment makes that segment **boarded**: a nailboard is allocated to +record ambiguous references ("nails"), but un-nailed objects on the +segment are still preserved by copying. + +_`.seg.state.stuck`: Stuck segments only occur in emergency tracing: a +discovery fix to an object in a mobile segment is recorded in the only +non-allocating way available: by making the entire segment **stuck**. + + +Pads +---- + +(See job001809_ and job001811_, and mps/branch/2009-03-31/padding.) + +.. _job001809: https://www.ravenbrook.com/project/mps/issue/job001809/ +.. _job001811: https://www.ravenbrook.com/project/mps/issue/job001811/ + +_`.pad`: A pad is logically a trivial client object. Pads are created +by the MPS asking the client's format code to create them, to fill up +a space in a segment. Thereafter, the pad appears to the MPS as a +normal client object (that is: the MPS cannot distinguish a pad from a +client object). + +_`.pad.reason`: AMC creates pads for three reasons: buffer empty +fragment (BEF), large segment padding (LSP), and non-mobile reclaim +(NMR). (Large segment pads were new with job001811_.) + +_`.pad.reason.bef`: Buffer empty fragment (BEF) pads are made by +``amcSegBufferEmpty()`` whenever it detaches a non-empty buffer from +an AMC segment. Buffer detachment is most often caused because the +buffer is too small for the current buffer reserve request (which may +be either a client requested or a forwarding allocation). Detachment +may happen for other reasons, such as trace flip. + +_`.pad.reason.lsp`: Large segment padding (LSP) pads are made by +``AMCBufferFill()`` when the requested fill size is "large" (see `The +LSP payoff calculation`_ below). ``AMCBufferFill()`` fills the buffer +to exactly the size requested by the current buffer reserve operation; +that is: it does not round up to the whole segment size. This prevents +subsequent small objects being placed in the same segment as a single +very large object. If the buffer fill size is less than the segment +size, ``AMCBufferFill()`` fills any remainder with a large segment +pad. + +_`.pad.reason.nmr`: Non-mobile reclaim (NMR) pads are made by +``amcSegReclaimNailed()``, when performing reclaim on a non-mobile (that +is, either boarded or stuck) segment: + +The more common NMR scenario is reclaim of a boarded segment after a +non-emergency trace. Ambiguous references into the segment are +recorded as nails. Subsequent exact references to a nailed object do +nothing further, but exact refs that do not match a nail cause +preserve-by-copy and leave a forwarding object. Unreachable objects +are not touched during the scan+fix part of the trace. On reclaim, +only nailed objects need to be preserved; others (namely forwarding +pointers and unreachable objects) are replaced by an NMR pad. (Note +that a BEF or LSP pad appears to be an unreachable object, and is +therefore overwritten by an NMR pad). + +The less common NMR scenario is after emergency tracing. Boarded +segments still occur; they may have nailed objects from ambiguous +references, forwarding objects from pre-emergency exact fixes, nailed +objects from mid-emergency exact fixes, and unpreserved objects; +reclaim is as in the non-emergency case. Stuck segments may have +forwarding objects from pre-emergency exact fixes, objects from +mid-emergency fixes, and unreachable objects -- but the latter two are +not distinguishable because there is no nailboard. On reclaim, all +objects except forwarding pointers are preserved; each forwarding +object is replaced by an NMR pad. + +If ``amcSegReclaimNailed()`` finds no objects to be preserved then it +calls ``SegFree()`` (new with job001809_). + + +Placement pads are okay +----------------------- + +Placement pads are the BEF and LSP pads created in "to-space" when +placing objects into segments. This wasted space is an expected +space-cost of AMC's naive (but time-efficient) approach to placement +of objects into segments. This is normally not a severe problem. (The +worst case is a client that always requests ``amc->extendBy + 1`` byte +objects: this has an overhead of nearly ``ArenaGrainSize() / amc->extendBy``). + + +Retained pads could be a problem +-------------------------------- + +Retained pads are the NMR pads stuck in "from-space": non-mobile +segments that were condemned but have preserved-in-place objects +cannot be freed by ``amcSegReclaimNailed()``. The space around the +preserved objects is filled with NMR pads. + +In the worst case, retained pads could waste an enormous amount of +space! A small (one-byte) object could retain a multi-page segment for +as long as the ambiguous reference persists; that is: indefinitely. +Imagine a 256-page (1 MiB) segment containing a very large object +followed by a handful of small objects. An ambiguous reference to one +of the small objects will unfortunately cause the entire 256-page +segment to be retained, mostly as an NMR pad; this is a massive +overhead of wasted space. + +AMC mitigates this worst-case behaviour, by treating large segments +specially. + + +Small, medium, and large segments +--------------------------------- + +AMC categorises segments as **small** (up to ``amc->extendBy``), **medium** +(larger than small but smaller than large), or **large** (``amc->largeSize`` or +more):: + + size = SegSize(seg); + if(size < amc->extendBy) { + /* small */ + } else if(size < amc->largeSize) { + /* medium */ + } else { + /* large */ + } + +``amc->extendBy`` defaults to 4096 (rounded up to the arena +alignment), and is settable by using ``MPS_KEY_EXTEND_BY`` keyword +argument. ``amc->largeSize`` is currently 32768 -- see `The LSP payoff +calculation`_ below. + +AMC might treat "Large" segments specially, in two ways: + +- _`.large.single-reserve`: A large segment is only used for a single + (large) buffer reserve request; the remainder of the segment (if + any) is immediately padded with an LSP pad. + +- _`.large.lsp-no-retain`: Nails to such an LSP pad do not cause + ``amcSegReclaimNailed()`` to retain the segment. + +`.large.single-reserve`_ is implemented. See job001811_. + +`.large.lsp-no-retain`_ is **not** currently implemented. + +The point of `.large.lsp-no-retain`_ would be to avoid retention of +the (large) segment when there is a spurious ambiguous reference to +the LSP pad at the end of the segment. Such an ambiguous reference +might happen naturally and repeatably if the preceding large object is +an array, the array is accessed by an ambiguous element pointer (for +example, on the stack), and the element pointer ends up pointing just +off the end of the large object (as is normal for sequential element +access in C) and remains with that value for a while. (Such an +ambiguous reference could also occur by chance, for example, by +coincidence with an ``int`` or ``float``, or when the stack grows to +include old unerased values). + +Implementing `.large.lsp-no-retain`_ is a little tricky. A pad is +indistinguishable from a client object, so AMC has no direct way to +detect, and safely ignore, the final LSP object in the seg. If AMC +could *guarantee* that the single buffer reserve +(`.large.single-reserve`_) is only used for a single *object*, then +``amcSegReclaimNailed()`` could honour a nail at the start of a large +seg and ignore all others; this would be extremely simple to +implement. But AMC cannot guarantee this, because in the MPS +Allocation Point Protocol the client is permitted to make a large +buffer reserve and then fill it with many small objects. In such a +case, AMC must honour all nails (if the buffer reserve request was an +exact multiple of the arena grain size), or all nails except to the +last object (if there was a remainder filled with an LSP pad). Because +an LSP pad cannot be distinguished from a client object, and the +requested allocation size is not recorded, AMC cannot distinguish +these two conditions at reclaim time. Therefore AMC must record +whether or not the last object in the seg is a pad, in order to ignore +nails to it. This could be done by adding a flag to ``AMCSegStruct``. +(This can be done without increasing the structure size, by making the +``Bool new`` field smaller than its current 32 bits.) + + +The LSP payoff calculation +-------------------------- + +The LSP fix for job001811_ treats large segments differently. Without +it, after allocating a very large object (in a new very large +multi-page segment), MPS would happily place subsequent small objects +in any remaining space at the end of the segment. This would risk +pathological fragmentation: if these small objects were systematically +preserved by ambiguous refs, enormous NMR pads would be retained along +with them. + +The payoff calculation is a bit like deciding whether or not to +purchase insurance. For single-page and medium-sized segments, we go +ahead and use the remaining space for subsequent small objects. This +is equivalent to choosing **not** to purchase insurance. If the small +objects were to be preserved by ambiguous refs, the retained NMR pads +would be big, but not massive. We expect such ambiguous refs to be +uncommon, so we choose to live with this slight risk of bad +fragmentation. The benefit is that the remaining space is used. + +For large segments, we decide that the risk of using the remainder is +just too great, and the benefit too small, so we throw it away as an +LSP pad. This is equivalent to purchasing insurance: we choose to pay +a known small cost every time, to avoid risking an occasional +disaster. + +To decide what size of segment counts as "large", we must decide how +much uninsured risk we can tolerate, versus how much insurance cost we +can tolerate. The likelihood of ambiguous references retaining objects +is entirely dependent on client behaviour. However, as a sufficient +"one size fits all" policy, I (RHSK 2009-09-14) have judged that +segments smaller than eight pages long do not need to be treated as +large: the insurance cost to "play safe" would be considerable +(wasting up to one page of remainder per seven pages of allocation), +and the fragmentation overhead risk is not that great (at most eight +times worse than the unavoidable minimum). So ``AMC_LARGE_SIZE_DEFAULT`` is +defined as 32768 in config.h. As long as the assumption that most segments +are not ambiguously referenced remains correct, I expect this policy +will be satisfactory. + +To verify that this threshold is acceptable for a given client, +poolamc.c calculates metrics; see `Feedback about retained pages`_ +below. If this one-size-fits-all approach is not satisfactory, +``amc->largeSize`` is a client-tunable parameter which defaults to +``AMC_LARGE_SIZE_DEFAULT``. It can be tuned by passing an +``MPS_KEY_LARGE_SIZE`` keyword argument to ``mps_pool_create_k()``. + + +Retained pages +-------------- + +The reasons why a segment and its pages might be retained are: + +#. ambiguous reference to first-obj: unavoidable page retention (only + the mutator can reduce this, if they so wish, by nulling out ambig + references); +#. ambiguous reference to rest-obj: tuning MPS LSP policy could + mitigate this, reducing the likelihood of rest-objs being + co-located with large first-objs; +#. ambiguous reference to final pad: implementing + `.large.lsp-no-retain`_ could mitigate this; +#. ambiguous reference to other (NMR) pad: hard to mitigate, as pads + are indistinguishable from client objects; +#. emergency trace; +#. non-object-aligned ambiguous ref: fixed by job001809_; +#. other reason (for example, buffered at flip): not expected to be a + problem. + +This list puts the reasons that are more "obvious" to the client +programmer first, and the more obscure reasons last. + + +Feedback about retained pages +----------------------------- + +(New with job001811_). AMC now accumulates counts of pages condemned +and retained during a trace, in categories according to size and +reason for retention, and emits this via the ``AMCTraceEnd`` telemetry +event. See comments on the ``PageRetStruct`` in ``poolamc.c``. These +page-based metrics are not as precise as actually counting the size of +objects, but they require much less intrusive code to implement, and +should be sufficient to assess whether AMC's page retention policies +and behaviour are acceptable. + + +Initial design +~~~~~~~~~~~~~~ + +_`.initial-design`: This section contains the original design for the +AMC Pool Class. + + +Introduction +------------ + +_`.intro`: This is the design of the AMC Pool Class. AMC stands for +Automatic Mostly-Copying. This design is highly fragmentory and some +may even be sufficiently old to be misleading. + +_`.readership`: The intended readership is any MPS developer. + + +Overview +-------- + +_`.overview`: This class is intended to be the main pool class used by +Harlequin Dylan. It provides garbage collection of objects (hence +"automatic"). It uses generational copying algorithms, but with some +facility for handling small numbers of ambiguous references. Ambiguous +references prevent the pool from copying objects (hence "mostly +copying"). It provides incremental collection. + +.. note:: + + A lot of this design is awesomely old. David Jones, 1998-02-04. + + +Definitions +----------- + +_`.def.grain`: Grain. An quantity of memory which is both aligned to +the pool's alignment and equal to the pool's alignment in size. That +is, the smallest amount of memory worth talking about. + + +Segments +-------- + +_`.seg.class`: AMC allocates segments of class ``AMCSegClass``, which +is a subclass of ``MutatorSegClass`` (see +design.mps.seg.over.hierarchy.mutatorseg_). + +.. _design.mps.seg.over.hierarchy.mutatorseg: seg#.over.hierarchy.mutatorseg + +_`.seg.gen`: AMC organizes the segments it manages into generations. + +_`.seg.gen.map`: Every segment is in exactly one generation. + +_`.seg.gen.ind`: The segment's ``gen`` field indicates which +generation (that the segment is in) (an ``AMCGenStruct`` see blah +below). + +_`.seg.gen.get`: The map from segment to generation is implemented by +``amcSegGen()`` which deals with all this. + + +Fixing and nailing +------------------ + +.. note:: + + This section contains placeholders for design rather than design + really. David Jones, 1998-02-04. + +_`.nailboard`: AMC uses a nailboard structure for recording ambiguous +references to segments. See design.mps.nailboard_. + +.. _design.mps.nailboard: nailboard + +_`.nailboard.create`: A nailboard is allocated dynamically whenever a +segment becomes newly ambiguously referenced. This table is used by +subsequent scans and reclaims in order to work out which objects were +ambiguously referenced. + +_`.nailboard.destroy`: The nailboatrd is deallocated during reclaim. + +_`.nailboard.emergency`: During emergency tracing two things relating +to nailboards happen that don't normally: + +#. _`.nailboard.emergency.nonew`: Nailboards aren't allocated when we + have new ambiguous references to segments. + + _`.nailboard.emergency.nonew.justify`: We could try and allocate a + nailboard, but we're in emergency mode so short of memory so it's + unlikely to succeed, and there would be additional code for yet + another error path which complicates things. + +#. _`.nailboard.emergency.exact`: nailboards are used to record exact + references in order to avoid copying the objects. + + _`.nailboard.hyper-conservative`: Not creating new nailboards + (`.nailboard.emergency.nonew`_ above) means that when we have a new + reference to a segment during emergency tracing then we nail the + entire segment and preserve everything in place. + +_`.fix.nail.states`: Partition the segment states into four sets: + +#. white segment and not nailed (and has no nailboard); +#. white segment and nailed and has no nailboard; +#. white segment and nailed and has nailboard; +#. the rest. + +_`.fix.nail.why`: A segment is recorded as being nailed when either +there is an ambiguous reference to it, or there is an exact reference +to it and the object couldn't be copied off the segment (because there +wasn't enough memory to allocate the copy). In either of these cases +reclaim cannot simply destroy the segment (usually the segment will +not be destroyed because it will have live objects on it, though see +`.nailboard.limitations.middle`_ below). If the segment is nailed then +we might be using a nailboard to mark objects on the segment. +However, we cannot guarantee that being nailed implies a nailboard, +because we might not be able to allocate the nailboard. Hence all +these states actually occur in practice. + +_`.fix.nail.distinguish`: The nailed bits in the segment descriptor +(``SegStruct``) are used to record the set of traces for which a +segment has nailed objects. + +_`.nailboard.limitations.single`: Just having a single nailboard per +segment prevents traces from improving on the findings of each other: +a later trace could find that a nailed object is no longer nailed or +even dead. Until the nailboard is discarded, that is. + +_`.nailboard.limitations.middle`: An ambiguous reference to a segment +that does not point into any object in that segment will cause that +segment to survive even though there are no surviving objects on it. + + +Emergency tracing +----------------- + +_`.emergency.fix`: ``amcSegFixEmergency()`` is at the core of AMC's +emergency tracing policy (unsurprisingly). ``amcSegFixEmergency()`` +chooses exactly one of three options: + +#. use the existing nailboard structure to record the fix; +#. preserve and nail the segment in its entirety; +#. snapout an exact (or high rank) pointer to a broken heart to the + broken heart's forwarding pointer. + +If the rank of the reference is ``RankAMBIG`` then it either does (1) +or (2) depending on whether there is an existing nailboard or not. +Otherwise (the rank is exact or higher) if there is a broken heart it +is used to snapout the pointer. Otherwise it is as for an +``RankAMBIG`` reference: we either do (1) or (2). + +_`.emergency.scan`: This is basically as before, the only complication +is that when scanning a nailed segment we may need to do multiple +passes, as ``amcSegFixEmergency()`` may introduce new marks into the +nail board. + + +Buffers +------- + +_`.buffer.class`: AMC uses buffer of class ``AMCBufClass`` (a subclass +of SegBufClass). + +_`.buffer.gen`: Each buffer allocates into exactly one generation. + +_`.buffer.field.gen`: ``AMCBuf`` buffer contain a gen field which +points to the generation that the buffer allocates into. + +_`.buffer.fill.gen`: ``AMCBufferFill()`` uses the generation (obtained +from the ``gen`` field) to initialise the segment's ``segTypeP`` field +which is how segments get allocated in that generation. + +_`.buffer.condemn`: We condemn buffered segments, but not the contents +of the buffers themselves, because we can't reclaim uncommitted +buffers (see design.mps.buffer_ for details). If the segment has a +forwarding buffer on it, we detach it. + +.. _design.mps.buffer: buffer + +.. note:: + + Why? Forwarding buffers are detached because they used to cause + objects on the same segment to not get condemned, hence caused + retention of garbage. Now that we condemn the non-buffered portion + of buffered segments this is probably unnecessary. David Jones, + 1998-06-01. + + But it's probably more efficient than keeping the buffer on the + segment, because then the other stuff gets nailed -- Pekka P. + Pirinen, 1998-07-10. + +If the segment has a mutator buffer on it, we nail the buffer. If the +buffer cannot be nailed, we give up condemning, since nailing the +whole segment would make it survive anyway. The scan methods skip over +buffers and fix methods don't do anything to things that have already +been nailed, so the buffer is effectively black. + + +Types +----- + +_`.struct`: ``AMCStruct`` is the pool class AMC instance structure. + +_`.struct.pool`: Like other pool class instances, it contains a +``PoolStruct`` containing the generic pool fields. + +_`.struct.format`: The ``format`` field points to a ``Format`` +structure describing the object format of objects allocated in the +pool. The field is initialized by ``AMCInit()`` from a parameter, and +thereafter it is not changed until the pool is destroyed. + +.. note:: + + Actually the format field is in the generic ``PoolStruct`` these + days. David Jones, 1998-09-21. + +.. note:: + + There are lots more fields here. + + +Generations +----------- + +_`.gen`: Generations partition the segments that a pool manages (see +`.seg.gen.map`_ above). + +_`.gen.collect`: Generations are more or less the units of +condemnation in AMC. And also the granularity for forwarding (when +copying objects during a collection): all the objects which are copied +out of a generation use the same forwarding buffer for allocating the +new copies, and a forwarding buffer results in allocation in exactly +one generation. + +_`.gen.rep`: Generations are represented using an ``AMCGenStruct`` +structure. + +_`.gen.create`: All the generations are created when the pool is +created (during ``AMCInitComm()``). + +_`.gen.manage.ring`: An AMC's generations are kept on a ring attached +to the ``AMCStruct`` (the ``genRing`` field). + +_`.gen.manage.array`: They are also kept in an array which is +allocated when the pool is created and attached to the ``AMCStruct`` +(the gens field holds the number of generations, the ``gen`` field +points to an array of ``AMCGen``). + +.. note:: + + it seems to me that we could probably get rid of the ring. David + Jones, 1998-09-22. + +_`.gen.number`: There are ``AMCTopGen + 2`` generations in total. +"normal" generations numbered from 0 to ``AMCTopGen`` inclusive and an +extra "ramp" generation (see `.gen.ramp`_ below). + +_`.gen.forward`: Each generation has an associated forwarding buffer +(stored in the ``forward`` field of ``AMCGen``). This is the buffer +that is used to forward objects out of this generation. When a +generation is created in ``AMCGenCreate()``, its forwarding buffer has +a null ``p`` field, indicating that the forwarding buffer has no +generation to allocate in. The collector will assert out (in +``AMCBufferFill()`` where it checks that ``buffer->p`` is an +``AMCGen``) if you try to forward an object out of such a generation. + +_`.gen.forward.setup`: All the generation's forwarding buffer's are +associated with generations when the pool is created (just after the +generations are created in ``AMCInitComm()``). + + +Ramps +----- + +_`.ramp`: Ramps usefully implement the begin/end +``mps_alloc_pattern_ramp()`` interface. + +_`.gen.ramp`: To implement ramping (request.dylan.170423_), AMC uses a +special "ramping mode", where promotions are redirected. One +generation is designated the "ramp generation" (``amc->rampGen`` in +the code). + +.. _request.dylan.170423: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170423 + +_`.gen.ramp.ordinary`: Ordinarily, that is whilst not ramping, objects +are promoted into the ramp generation from younger generations and are +promoted out to older generations. The generation that the ramp +generation ordinarily promotes into is designated the "after-ramp +generation" (``amc->afterRampGen``). + +_`.gen.ramp.particular`: the ramp generation is the second oldest +generation and the after-ramp generation is the oldest generation. + +_`.gen.ramp.possible`: In alternative designs it might be possible to +make the ramp generation a special generation that is only promoted +into during ramping, however, this is not done. + +_`.gen.ramp.ramping`: The ramp generation is promoted into itself +during ramping mode; + +_`.gen.ramp.after`: after this mode ends, the ramp generation is +promoted into the after-ramp generation as usual. + +_`.gen.ramp.after.once`: Care is taken to +ensure that there is at least one collection where stuff is promoted +from the ramp generation to the after-ramp generation even if ramping +mode is immediately re-entered. + +_`.ramp.mode`: This behaviour is controlled in a slightly convoluted +manner by a state machine. The rampMode field of the pool forms an +important part of the state of the machine. + +There are five states: OUTSIDE, BEGIN, RAMPING, FINISH, and +COLLECTING. These appear in the code as ``RampOUTSIDE`` and so on. + +_`.ramp.state.cycle.usual`: The usual progression of states is a +cycle: OUTSIDE → BEGIN → RAMPING → FINISH → COLLECTING → OUTSIDE. + +_`.ramp.count`: The pool just counts the number of APs that have begun +ramp mode (and not ended). No state changes occur unless this count +goes from 0 to 1 (starting the first ramp) or from 1 to 0 (leaving the +last ramp). In other words, all nested ramps are ignored (see code in +``AMCRampBegin()`` and ``AMCRampEnd()``). + +_`.ramp.state.invariant.count`: In the OUTSIDE state the count must be +zero. In the BEGIN and RAMPING states the count must be greater than +zero. In the FINISH and COLLECTING states the count is not +constrained. + +_`.ramp.state.invariant.forward`: When in OUTSIDE, BEGIN, or +COLLECTING, the ramp generation forwards to the after-ramp generation. +When in RAMPING or FINISH, the ramp generation forwards to itself. + +_`.ramp.outside`: The pool is initially in the OUTSIDE state. The only +transition away from the OUTSIDE state is to the BEGIN state, when a +ramp is entered. + +_`.ramp.begin`: When the count goes up from zero, the state moves from +COLLECTING or OUTSIDE to BEGIN. + +_`.ramp.begin.leave`: We can leave the BEGIN state to either the +OUTSIDE or the RAMPING state. + +_`.ramp.begin.leave.outside`: We go to OUTSIDE if the count drops to 0 +before a collection starts. This shortcuts the usual cycle of states +for small enough ramps. + +_`.ramp.begin.leave.ramping`: We enter the RAMPING state if a +collection starts that condemns the ramp generation (pedantically when +a new GC begins, and a segment in the ramp generation is condemned, we +leave the BEGIN state, see ``amcSegWhiten()``). At this point we +switch the ramp generation to forward to itself +(`.gen.ramp.ramping`_). + +_`.ramp.ramping.leave`: We leave the RAMPING state and go to the +FINISH state when the ramp count goes back to zero. Thus, the FINISH +state indicates that we have started collecting the ramp generation +while inside a ramp which we have subsequently finished. + +_`.ramp.finish.remain`: We remain in the FINISH state until we next +start to collect the ramp generation (condemn it), regardless of +entering or leaving any ramps. This ensures that the ramp generation +will be collected to the after-ramp generation at least once. + +_`.ramp.finish.leave`: When we next condemn the ramp generation, we +move to the COLLECTING state. At this point the forwarding generations +are switched back so that the ramp generation promotes into the +after-ramp generation on this collection. + +_`.ramp.collecting.leave`: We leave the COLLECTING state when the GC +enters reclaim (specifically, when a segment in the ramp generation is +reclaimed), or when we begin another ramp. Ordinarily we enter the +OUTSIDE state, but if the client has started a ramp then we go +directly to the BEGIN state. + +_`.ramp.collect-all` There used to be two flavours of ramps: the +normal one and the collect-all flavour that triggered a full GC after +the ramp end. This was a hack for producing certain Dylan statistics, +and no longer has any effect (the flag is passed to +``AMCRampBegin()``, but ignored there). + + +Headers +------- + +_`.header`: AMC supports a fixed-size header on objects, with the +client pointers pointing after the header, rather than the base of the +memory block. See format documentation for details of the interface. + +_`.header.client`: The code mostly deals in client pointers, only +computing the base and limit of a block when these are needed (such as +when an object is copied). In several places, the code gets a block of +some sort (a segment or a buffer) and creates a client pointer by +adding the header size (``pool->format->headerSize``). + + +Old and aging notes below here +------------------------------ + +``void AMCFinish(Pool pool)`` + +_`.finish.forward`: If the pool is being destroyed it is OK to destroy +the forwarding buffers, as the condemned set is about to disappear. + + +``void amcSegBufferEmpty(Seg seg, Buffer buffer)`` + +_`.flush`: Free the unused part of the buffer to the segment. + +_`.flush.pad`: The segment is padded out with a dummy object so that +it appears full. + +_`.flush.expose`: The segment needs exposing before writing the +padding object onto it. If the segment is being used for forwarding it +might already be exposed, in this case the segment attached to it must +be covered when it leaves the buffer. See `.fill.expose`_. + +_`.flush.cover`: The segment needs covering whether it was being used +for forwarding or not. See `.flush.expose`_. + + +``Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, Pool pool, Buffer buffer, Size size)`` + +_`.fill`: Reserve was called on an allocation buffer which was reset, +or there wasn't enough room left in the buffer. Allocate a group for +the new object and attach it to the buffer. + +_`.fill.expose`: If the buffer is being used for forwarding it may be +exposed, in which case the group attached to it should be exposed. See +`.flush.cover`_. + + +``Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)`` + +_`.fix`: Fix a reference to an AMC segment. + +Ambiguous references lock down an entire segment by removing it +from old-space and also marking it grey for future scanning. + +Exact, final, and weak references are merged because the action for an +already forwarded object is the same in each case. After that +situation is checked for, the code diverges. + +Weak references are either snapped out or replaced with +``ss->weakSplat`` as appropriate. + +Exact and final references cause the referenced object to be copied to +new-space and the old copy to be forwarded (broken-heart installed) so +that future references are fixed up to point at the new copy. + +_`.fix.exact.expose`: In order to allocate the new copy the forwarding +buffer must be exposed. This might be done more efficiently outside +the entire scan, since it's likely to happen a lot. + +_`.fix.exact.grey`: The new copy must be at least as grey as the old +as it may have been grey for some other collection. + + +``Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss1)`` + +_`.scan`: Searches for a group which is grey for the trace and scans +it. If there aren't any, it sets the finished flag to true. + + +``void amcSegReclaim(Seg seg, Trace trace)`` + +_`.reclaim`: After a trace, destroy any groups which are still +condemned for the trace, because they must be dead. + +_`.reclaim.grey`: Note that this might delete things which are grey +for other collections. This is OK, because we have conclusively proved +that they are dead -- the other collection must have assumed they were +alive. There might be a problem with the accounting of grey groups, +however. + +_`.reclaim.buf`: If a condemned group still has a buffer attached, we +can't destroy it, even though we know that there are no live objects +there. Even the object the mutator is allocating is dead, because the +buffer is tripped. + + +Document History +---------------- +- 1995-08-25 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2009-08-11 Richard Kistruck. Fix HTML duplicated anchor names + (caused by auto-conversion to HTML). + +- 2009-08-11 Richard Kistruck. Prepend Guide, using + design/template-with-guide.html. + +- 2009-09-14 Richard Kistruck. Guide covers: seg states; pads; + retained pages. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolams.txt b/mps/design/poolams.txt new file mode 100644 index 00000000000..90c391e1026 --- /dev/null +++ b/mps/design/poolams.txt @@ -0,0 +1,523 @@ +.. mode: -*- rst -*- + +AMS pool class +============== + +:Tag: design.mps.poolams +:Author: Pekka P. Pirinen +:Date: 1997-08-14 +:Status: draft design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: AMS pool class; design + single: pool class; AMS design + + +Introduction +------------ + +_`.intro`: This is the design of the AMS pool class. + +_`.readership`: MM developers. + +_`.source`: design.mps.buffer_, design.mps.trace_, design.mps.scan_, +design.mps.action and design.mps.pool_ [none of these were +actually used -- pekka 1998-04-21]. No requirements doc [we need a +req.mps that captures the commonalities between the products -- pekka +1998-01-27]. + +.. _design.mps.pool: pool +.. _design.mps.scan: scan +.. _design.mps.trace: trace +.. _design.mps.buffer: buffer + + +Overview +-------- + +_`.overview`: This is the design of the AMS (Automatic Mark-and-Sweep) +pool class. The AMS pool is a proof-of-concept design for a mark-sweep +pool in the MPS. It's not meant to be efficient, but it could serve as +a model for an implementation of a more advanced pool (such as EPVM). + + +Requirements +------------ + +_`.req.mark-sweep`: The pool must use a mark-and-sweep GC algorithm. + +_`.req.colour`: The colour representation should be as efficient as +possible. + +_`.req.incremental`: The pool must support incremental GC. + +_`.req.ambiguous`: The pool must support ambiguous references to +objects in it (but ambiguous references into the middle of an object +do not preserve the object). + +_`.req.format`: The pool must be formatted, for generality. + +_`.req.correct`: The design and the implementation should be simple +enough to be seen to be correct. + +_`.req.simple`: Features not related to mark-and-sweep GC should +initially be implemented as simply as possible, in order to save +development effort. + +_`.not-req.grey`: We haven't figured out how buffers ought to work +with a grey mutator, so we use `.req.correct`_ to allow us to design a +pool that doesn't work in that phase. This is acceptable as long as we +haven't actually implemented grey mutator collection. + + +Architecture +------------ + +Subclassing +........... + +_`.subclass`: Since we expect to have many mark-and-sweep pools, we +build in some protocol for subclasses to modify various aspects of the +behaviour. Notably there's a subclassable segment class, and a +protocol for performing iteration. + + +Allocation +.......... + +_`.align`: We divide the segments in grains, each the size of the +format alignment. _`.alloc-bit-table`: We keep track of allocated +grains using a bit table. This allows a simple implementation of +allocation and freeing using the bit table operators, satisfying +`.req.simple`_, and can simplify the GC routines. Eventually, this +should use some sophisticated allocation technique suitable for +non-moving automatic pools. + +_`.buffer`: We use buffered allocation, satisfying +`.req.incremental`_. The AMC buffer technique is reused, although it +is not suitable for non-moving pools, but req.simple allows us to do +that for now. + +_`.extend`: If there's no space in any existing segment, a new segment +is allocated. The actual class is allowed to decide the size of the +new segment. + +_`.no-alloc`: Do not support ``PoolAlloc()``, because we can't support +one-phase allocation for a scannable pool (unless we disallow +incremental collection). For exact details, see design.mps.buffer_. + +_`.no-free`: Do not support ``PoolFree()``, because automatic pools +don't need explicit free and having it encourages clients to use it +(and therefore to have dangling pointers, double frees, and other +memory management errors.) + + +Colours +....... + +_`.colour`: Objects in a segment which is *not* condemned (for some +trace) take their colour (for this trace) from the segment. + +_`.colour.object`: Since we need to implement a non-copying GC, we +keep track of the colour of each object in a condemned segment +separately. For this, we use bit tables with a bit for each grain. +This format is fast to access, has better locality than mark bits in +the objects themselves, and allows cheap interoperation with the +allocation bit table. + +_`.colour.encoding`: As to the details, we follow +analysis.non-moving-colour(3), implementing both the alloc-white +sharing option described in +analysis.non-moving-colour.constraint.reclaim.white-free-bit and the +vanilla three-table option, because the former cannot work with +interior pointers. However, the colour encoding in both is the same. + +_`.ambiguous.middle`: We will allow ambiguous references into the +middle of an object (as required by `.req.ambiguous`_), using the +trick in analysis.non-moving-colour.interior.ambiguous-only to speed +up scanning. + +_`.interior-pointer`: Note that non-ambiguous interior pointers are +outlawed. + +_`.colour.alloc`: Objects are allocated black. This is the most +efficient alternative for traces in the black mutator phase, and +`.not-req.grey`_ means that's sufficient. + +.. note:: + + Some day, we need to think about allocating grey or white during + the grey mutator phase. + + +Scanning +........ + +_`.scan.segment`: The tracer protocol requires (for segment barrier +hits) that there is a method for scanning a segment and turning all +grey objects on it black. This cannot be achieved with a single +sequential sweep over the segment, since objects that the sweep has +already passed may become grey as later objects are scanned. + +_`.scan.graph`: For a non-moving GC, it is more efficient to trace +along the reference graph than segment by segment. It also allows +passing type information from fix to scan. Currently, the tracer +doesn't offer this option when it's polling for work. + +_`.scan.stack`: Tracing along the reference graph cannot be done by +recursive descent, because we can't guarantee that the stack won't +overflow. We can, however, maintain an explicit stack of things to +trace, and fall back on iterative methods (`.scan.iter`_) when it +overflows and can't be extended. + +_`.scan.iter`: As discussed in `.scan.segment`_, when scanning a +segment, we need to ensure that there are no grey objects in the +segment when the scan method returns. We can do this by iterating a +sequential scan over the segment until nothing is grey (see +`.marked.scan`_ for details). + +_`.scan.iter.only`: Some iterative method is needed as a fallback for +the more advanced methods, and as this is the simplest way of +implementing the current tracer protocol, we will start by +implementing it as the only scanning method. + +_`.scan.buffer`: We do not scan between ScanLimit and Limit of a +buffer (see `.iteration.buffer`_), as usual. + +.. note:: + + design.mps.buffer_ should explain why this works, but doesn't. + Pekka P. Pirinen, 1998-02-11. + +_`.fix.to-black`: When fixing a reference to a white object, if the +segment does not refer to the white set, the object cannot refer to +the white set, and can therefore be marked as black immediately +(rather than grey). + + +Implementation +-------------- + +Colour +...... + +_`.colour.determine`: Following the plan in `.colour`_, if +``SegWhite(seg)`` includes the trace, the colour of an object is given +by the bit tables. Otherwise if ``SegGrey(seg)`` includes the trace, +all the objects are grey. Otherwise all the objects are black. + +_`.colour.bits`: As we only have searches for runs of zero bits, we use +two bit tables, the non-grey and non-white tables, but this is hidden +beneath a layer of macros talking about grey and white in positive +terms. + +_`.colour.single`: We have only implemented a single set of mark and +scan tables, so we can only condemn a segment for one trace at a time. +This is checked for in condemnation. If we want to do overlapping +white sets, each trace needs its own set of tables. + +_`.colour.check`: The grey-and-non-white state is illegal, and free +objects must be white as explained in +analysis.non-moving-colour.constraint.reclaim. + + +Iteration +......... + +_`.iteration`: Scan, reclaim and other operations need to iterate over +all objects in a segment. We abstract this into a single iteration +function, even though we no longer use it for reclaiming and rarely +for scanning. + +_`.iteration.buffer`: Iteration skips directly from ScanLimit to Limit +of a buffer. This is because this area may contain +partially-initialized and uninitialized data, which cannot be +processed. Since the iteration skips the buffer, callers need to take +the appropriate action, if any, on it. + +.. note:: + + ScanLimit is used for reasons which are not documented in + design.mps.buffer_. + + +Scanning Algorithm +.................. + +_`.marked`: Each segment has a ``marksChanged`` flag, indicating +whether anything in it has been made grey since the last scan +iteration (`.scan.iter`_) started. This flag only concerns the colour +of objects with respect to the trace for which the segment is +condemned, as this is the only trace for which objects in the segment +are being made grey by fixing. Note that this flag doesn't imply that +there are grey objects in the segment, because the grey objects might +have been subsequently scanned and blackened. + +_`.marked.fix`: The ``marksChanged`` flag is set ``TRUE`` by +``amsSegFix()`` when an object is made grey. + +_`.marked.scan`: ``amsSegScan()`` must blacken all grey objects on the +segment, so it must iterate over the segment until all grey objects +have been seen. Scanning an object in the segment might grey another +one (`.marked.fix`_), so the scanner iterates until this flag is +``FALSE``, setting it to ``FALSE`` before each scan. It is safe to +scan the segment even if it contains nothing grey. + +_`.marked.scan.fail`: If the format scanner returns failure (see +protocol.mps.scanning), we abort the scan in the middle of a segment. +So in this case the marksChanged flag is set back to TRUE, because we +may not have blackened all grey objects. + +.. note:: + + Is that the best reference for the format scanner? + +_`.marked.unused`: The ``marksChanged`` flag is meaningless unless the +segment is condemned. We make it ``FALSE`` in these circumstances. + +_`.marked.condemn`: Condemnation makes all objects in a segment either +black or white, leaving nothing grey, so it doesn't need to set the +``marksChanged`` flag which must already be ``FALSE``. + +_`.marked.reclaim`: When a segment is reclaimed, it can contain +nothing marked as grey, so the ``marksChanged`` flag must already be +``FALSE``. + +_`.marked.blacken`: When the tracer decides not to scan, but to call +``SegBlacken()``, we know that any greyness can be removed. +``amsSegBlacken()`` does this and resets the ``marksChanged`` flag, if +it finds that the segment has been condemned. + +_`.marked.clever`: AMS could be clever about not setting the +``marksChanged`` flag, if the fixed object is ahead of the current +scan pointer. It could also keep low- and high-water marks of grey +objects, but we don't need to implement these improvements at first. + + +Allocation +.......... + +_`.buffer-init`: We take one init arg to set the Rank on the buffer, +just to see how it's done. + +_`.no-bit`: As an optimization, we won't use the alloc bit table until +the first reclaim on the segment. Before that, we just keep a +high-water mark. + +_`.fill`: ``AMSBufferFill()`` takes the simplest approach: it iterates +over the segments in the pool, looking for one which can be used to +refill the buffer. + +_`.fill.colour`: The objects allocated from the new buffer must be +black for all traces (`.colour.alloc`_), so putting it on a black +segment (meaning one where neither ``SegWhite(seg)`` nor +``SegGrey(seg)`` include the trace, see `.colour.determine`_) is +obviously OK. White segments (where ``SegWhite(seg)`` includes the +trace) are also fine, as we can use the colour tables to make it +black. At first glance, it seems we can't put it on a segment that is +grey but not white for some trace (one where ``SegWhite(seg)`` doesn't +include the trace, but ``SegGrey(seg)`` does), because the new objects +would become grey as the buffer's ScanLimit advanced. However, in many +configurations, the mutator would hit a barrier as soon as it started +initializing the object, which would flip the buffer. In fact, the +current (2002-01) implementation of buffers assumes buffers are black, +so they'd better. + +_`.fill.colour.reclaim`: In fact, putting a buffer on a condemned +segment will screw up the accounting in ``amsSegReclaim()``, so it's +disallowed. + +_`.fill.slow`: ``AMSBufferFill()`` gets progressively slower as more +segments fill up, as it laboriously checks whether the buffer can be +refilled from each segment, by inspecting the allocation bit map. This +is helped a bit by keeping count of free grains in each segment, but +it still spends a lot of time iterating over all the full segments +checking the free size. Obviously, this can be much improved (we could +keep track of the largest free block in the segment and in the pool, +or we could keep the segments in some more efficient structure, or we +could have a real free list structure). + +_`.fill.extend`: If there's no space in any existing segment, the +``segSize`` method is called to decide the size of the new segment to +allocate. If that fails, the code tries to allocate a segment that's +just large enough to satisfy the request. + +_`.empty`: ``amsSegBufferEmpty()`` makes the unused space free, since +there's no reason not to. We have to adjust the colour tables as well, +since these grains were black and now they need to be white (or at +least encoded -G and W). + +_`.reclaim.empty.buffer`: Segments which after reclaim only contain a +buffer could be destroyed by trapping the buffer, but there's no point +to this. + + +Initialization +.............. + +_`.init`: The initialization method ``AMSInit()`` takes three +additional arguments: the format of objects allocated in the pool, the +chain that controls GC timing, and a flag for supporting ambiguous +references. + +_`.init.share`: If support for ambiguity is required, the +``shareAllocTable`` flag is reset to indicate the pool uses three +separate bit tables, otherwise it is set and the pool shares a table +for non-white and alloc (see `.colour.encoding`_). + +_`.init.align`: The pool alignment is set equal to the format +alignment (see design.mps.align). + +_`.init.internal`: Subclasses call ``AMSInitInternal()`` to avoid the +problems of sharing ``va_list`` and emitting a superfluous +``PoolInitAMS`` event. + + +Condemnation +............ + +_`.condemn.buffer`: Buffers are not condemned, instead they are +coloured black, to make sure that the objects allocated will be black, +following `.colour.alloc`_ (or, if you wish, because buffers are +ignored like free space, so need the same encoding). + + +Reclaim +....... + +_`.reclaim`: Reclaim uses either of +analysis.non-moving-colour.constraint.reclaim.white-free-bit (just +reuse the non-white table as the alloc table) or +analysis.non-moving-colour.constraint.reclaim.free-bit (copy it), +depending on the ``shareAllocTable`` flag (as set by `.init.share`_). +However, bit table still has to be iterated over to count the free +grains. Also, in a debug pool, each white block has to be splatted. + + +Segment merging and splitting +............................. + +_`.split-merge`: We provide methods for splitting and merging AMS +segments. The pool implementation doesn't cause segments to be split +or merged -- but a subclass might want to do this (see +`.stress.split-merge`_). The methods serve as an example of how to +implement this facility. + +_`.split-merge.constrain`: There are some additional constraints on +what segments may be split or merged: + +- _`.split-merge.constrain.align`: Segments may only be split or + merged at an address which is aligned to the pool alignment as well + as to the arena grain size. + + _`.split-merge.constrain.align.justify`: This constraint is implied + by the design of allocation and colour tables, which cannot + represent segments starting at unaligned addresses. The constraint + only arises if the pool alignment is larger than the arena + alignment. There's no requirement to split segments at unaligned + addresses. + +- _`.split-merge.constrain.empty`: The higher segment must be empty. + That is, the higher segment passed to ``SegMerge()`` must be empty, + and the higher segment returned by ``SegSplit()`` must be empty. + + _`.split-merge.constrain.empty.justify`: This constraint makes the + code significantly simpler. There's no requirement for a more + complex solution at the moment (as the purpose is primarily + pedagogic). + +_`.split-merge.fail`: The split and merge methods are not proper +anti-methods for each other (see +design.mps.seg.split-merge.fail.anti.no_). Methods will not reverse the +side-effects of their counterparts if the allocation of the colour and +allocation bit tables should fail. Client methods which over-ride +split and merge should not be written in such a way that they might +detect failure after calling the next method, unless they have reason +to know that the bit table allocations will not fail. + +.. _design.mps.seg.split-merge.fail.anti.no: seg#.split-merge.fail.anti.no + + +Testing +------- + +_`.stress`: There's a stress test, MMsrc!amsss.c, that does 800 kB of +allocation, enough for about three GCs. It uses a modified Dylan +format, and checks for corruption by the GC. Both ambiguous and exact +roots are tested. + +_`.stress.split-merge`: There's also a stress test for segment +splitting and merging, MMsrc!segsmss.c. This is similar to amsss.c -- +but it defines a subclass of AMS, and causes segments to be split and +merged. Both buffered and non-buffered segments are split / merged. + + +Notes +----- + +_`.addr-index.slow`: Translating from an address to and from a grain +index in a segment uses macros such as ``AMS_INDEX`` and +``AMS_INDEX_ADDR``. These are slow because they call ``SegBase()`` on +every translation -- we could cache that. + +_`.grey-mutator`: To enforce the restriction set in `.not-req.grey`_ +we check that all the traces are flipped in ``amsSegScan()``. It would +be good to check in ``amsSegFix()`` as well, but we can't do that, +because it's called during the flip, and we can't tell the difference +between the flip and the grey mutator phases with the current tracer +interface. + + +Document History +---------------- + +- 1997-08-14 Nick Barnes. Some notes on the implementation. + +- 1997-08-27 Pekka P. Pirinen. Draft design. Edited on the basis of + review.design.mps.poolams.0, and redesigned the colour + representation (results mostly in analysis.non-moving-colour(0)). + +- 1999-01-04 Pekka P. Pirinen. Described subclassing and allocation + policy. + +- 2002-01-11 Pekka P. Pirinen. New colour encoding scheme. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2002-06-20 NB_ Re-imported from Global Graphics. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _NB: https://www.ravenbrook.com/consultants/nb/ +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolawl.txt b/mps/design/poolawl.txt new file mode 100644 index 00000000000..ebad6106ee1 --- /dev/null +++ b/mps/design/poolawl.txt @@ -0,0 +1,524 @@ +.. mode: -*- rst -*- + +AWL pool class +============== + +:Tag: design.mps.poolawl +:Author: drj +:Date: 1997-03-11 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: AWL pool class; design + single: pool class; AWL design + + +Introduction +------------ + +_`.readership`: Any MPS developer. + +_`.intro`: The AWL (Automatic Weak Linked) pool is used to manage +Dylan Weak Tables (see req.dylan.fun.weak). Currently the design is +specialised for Dylan Weak Tables, but it could be generalised in the +future. + + +Requirements +------------ + +See req.dylan.fun.weak. + +See meeting.dylan.1997-02-27(0) where many of the requirements for +this pool were first sorted out. + +Must satisfy request.dylan.170123_. + +.. _request.dylan.170123: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170123 + +_`.req.obj-format`: Only objects of a certain format need be +supported. This format is a subset of the Dylan Object Format. The +pool uses the first slot in the fixed part of an object to store an +association. See `mail.drj.1997-03-11.12-05`_. + +.. _mail.drj.1997-03-11.12-05: https://info.ravenbrook.com/project/mps/mail/1997/03/11/12-05/0.txt + + +Definitions +----------- + +_`.def.grain`: alignment grain, grain. A grain is a range of addresses +where both the base and the limit of the range are aligned and the +size of range is equal to the (same) alignment. In this context the +alignment is the pool's alignment (``pool->alignment``). The grain is +the unit of allocation, marking, scanning, etc. + + +Overview +-------- + +_`.overview`: + +_`.overview.ms`: The pool is mark and sweep. _`.overview.ms.justify`: +Mark-sweep pools are slightly easier to write (than moving pools), and +there are no requirements (yet) that this pool be high performance or +moving or anything like that. + +_`.overview.alloc`: It is possible to allocate weak or exact objects +using the normal reserve/commit AP protocol. +_`.overview.alloc.justify`: Allocation of both weak and exact objects +is required to implement Dylan Weak Tables. Objects are formatted; the +pool uses format A. + +_`.overview.scan`: The pool handles the scanning of weak objects +specially so that when a weak reference is deleted the corresponding +reference in an associated object is deleted. The associated object is +determined by using information stored in the object itself (see +`.req.obj-format`_). + + +Interface +--------- + +_`.if.init`: The init method takes one extra parameter in the vararg +list. This parameter should have type ``Format`` and be a format +object that describes the format of the objects to be allocated in +this pool. The format should support scan and skip methods. There is +an additional restriction on the layout of objects, see +`.req.obj-format`_. + +_`.if.buffer`: The ``BufferInit()`` method takes one extra parameter +in the vararg list. This parameter should be either ``RankEXACT`` or +``RankWEAK``. It determines the rank of the objects allocated using +that buffer. + + +Data structures +--------------- + +_`.sig`: This signature for this pool will be 0x519bla3l (SIGPooLAWL). + +_`.poolstruct`: The class specific pool structure is:: + + struct AWLStruct { + PoolStruct poolStruct; + PoolGenStruct pgenStruct; /* pool generation */ + PoolGen pgen; /* NULL or pointer to pgenStruct */ + Count succAccesses; /* number of successive single accesses */ + FindDependentFunction findDependent; /* to find a dependent object */ + awlStatTotalStruct stats; + Sig sig; /* */ + } + +_`.awlseg`: The pool defines a segment class ``AWLSegClass``, which is +a subclass of ``MutatorSegClass`` (see +design.mps.seg.over.hierarchy.mutatorseg_). All segments allocated by +the pool are instances of this class, and are of type ``AWLSeg``, for +which the structure is:: + + struct AWLSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + BT mark; + BT scanned; + BT alloc; + Count grains; + Count freeGrains; /* free grains */ + Count bufferedGrains; /* grains in buffers */ + Count newGrains; /* grains allocated since last collection */ + Count oldGrains; /* grains allocated prior to last collection */ + Count singleAccesses; /* number of accesses processed singly */ + awlStatSegStruct stats; + Sig sig; /* */ + } + +.. _design.mps.seg.over.hierarchy.mutatorseg: seg#.over.hierarchy.mutatorseg + +_`.awlseg.bt`: The ``mark``, ``alloc``, and ``scanned`` fields are +bit-tables (see design.mps.bt_). Each bit in the table corresponds to +a single alignment grain in the pool. + +.. _design.mps.bt: bt + +_`.awlseg.mark`: The ``mark`` bit table is used to record mark bits +during a trace. ``awlSegWhiten()`` (see `.fun.whiten`_ below) sets all +the bits of this table to zero. Fix will read and set bits in this +table. Currently there is only one mark bit table. This means that the +pool can only be condemned for one trace. + +_`.awlseg.mark.justify`: This is simple, and can be improved later +when we want to run more than one trace. + +_`.awlseg.scanned`: The ``scanned`` bit-table is used to note which +objects have been scanned. Scanning (see `.fun.scan`_ below) a segment +will find objects that are marked but not scanned, scan each object +found and set the corresponding bits in the scanned table. + +_`.awlseg.alloc`: The ``alloc`` bit table is used to record which +portions of a segment have been allocated. Ranges of bits in this +table are set in ``awlSegBufferFill()`` when a buffer is attached to +the segment. When a buffer is flushed (that is, +``awlSegBufferEmpty()`` is called) from the segment, the bits +corresponding to the unused portion at the end of the buffer are +reset. + +_`.awlseg.alloc.invariant`: A bit is set in the alloc table if and +only if the corresponding address is currently being buffered, or the +corresponding address lies within the range of an allocated object. + +_`.awlseg.grains`: The ``grains`` field is the number of grains that +fit in the segment. Strictly speaking this is not necessary as it can +be computed from ``SegSize`` and AWL's alignment, however, +precalculating it and storing it in the segment makes the code simpler +by avoiding lots of repeated calculations. + +_`.awlseg.freeGrains`: A conservative estimate of the number of free +grains in the segment. It is always guaranteed to be greater than or +equal to the number of free grains in the segment, hence can be used +during allocation to quickly pass over a segment. + +.. note:: + + Maintained by blah and blah. Unfinished obviously. + + +Functions +--------- + +.. note:: + + How will pool collect? It needs an action structure. + +External +........ + +``Res AWLInit(Pool pool, va_list arg)`` + +_`.fun.init`: ``AWLStruct`` has four fields, each one needs initializing. + +_`.fun.init.poolstruct`: The ``poolStruct`` field has already been +initialized by generic code (impl.c.pool). + +_`.fun.init.sig`: The ``sig`` field will be initialized with the +signature for this pool. + +``Res AWLFinish(Pool pool)`` + +_`.fun.finish`: Iterates over all segments in the pool and destroys +each segment (by calling ``SegFree()``). Overwrites the sig field in +the ``AWLStruct``. Finishing the generic pool structure is done by the +generic pool code (impl.c.pool). + +_`.fun.alloc`: ``PoolNoAlloc()`` will be used, as this class does not +implement alloc. + +_`.fun.free`: ``PoolNoFree()`` will be used, as this class does not +implement free. + +``Res AWLBufferFill(Seg *segReturn, Addr *baseReturn, Pool pool, Buffer buffer, Size size)`` + +_`.fun.fill`: This zips round all the segments applying +``SegBufferFill()`` to each segment. ``awlSegBufferFill()`` attempts +to find a large-enough free range; if it finds one then it may be +bigger than the actual request, in which case the remainder can be +used to "fill" the rest of the buffer. If no free range can be found +in an existing segment then a new segment will be created (which is at +least large enough). The range of buffered addresses is marked as +allocated in the segment's alloc table. + +``Res AWLDescribe(Pool pool, mps_lib_FILE *stream, Count depth)`` + +_`.fun.describe`: + + +Internal +........ + +``Res AWLSegCreate(AWLSeg *awlsegReturn, Size size)`` + +_`.fun.awlsegcreate`: Creates a segment of class ``AWLSegClass`` of size at least ``size``. + +_`.fun.awlsegcreate.size.round`: ``size`` is rounded up to the arena +grain size before requesting the segment. + +_`.fun.awlsegcreate.size.round.justify`: The arena requires that all +segment sizes are rounded up to the arena grain size. + +_`.fun.awlsegcreate.where`: The segment is allocated using a +generation preference, using the generation number stored in the +``AWLStruct`` (the ``gen`` field), see `.poolstruct`_ above. + +``Res awlSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)`` + +_`.fun.awlseginit`: Init method for ``AWLSegClass``, called for +``SegAlloc()`` whenever an ``AWLSeg`` is created (see +`.fun.awlsegcreate`_ above). + +_`.fun.awlseginit.tables`: The segment's mark scanned and alloc tables +(see `.awlseg.bt`_ above) are allocated and initialised. The segment's +grains field is computed and stored. + +``void awlSegFinish(Seg seg)`` + +_`.fun.awlsegfinish`: Finish method for ``AWLSegClass``, called from +``SegFree()``. Will free the segment's tables (see `.awlseg.bt`_). + +``Bool awlSegBufferFill(Addr *baseReturn, Addr *limitReturn, Seg seg, Size size, RankSet rankSet)`` + +_`.fun.seg.buffer-fill`: Searches for a free block in the segment that +is at least ``size`` bytes long. The base address of the block is +returned in ``*baseReturn``, the limit of the entire free block (which +must be at least as large as ``size`` and may be bigger) is returned +in ``*limitReturn``. The requested size is converted to a number of +grains, ``BTFindResRange()`` is called to find a run of this length in +the alloc bit-table (`.awlseg.alloc`_). The results (if it is +successful) from ``BTFindResRange()`` are in terms of grains, they are +converted back to addresses before returning the relevant values from +this function. + +``void awlSegBufferEmpty(Seg seg, Buffer buffer)`` + +_`.fun.seg.buffer-empty`: Locates the free portion of the buffer, that +is the memory between the init and the limit of the buffer and records +these locations as being free in the alloc table. + +``Res awlSegWhiten(Seg seg, Trace trace)`` + +_`.fun.whiten`: The current design only permits each segment to be +condemned for one trace (see `.awlseg.mark`_). This function checks +that the segment is not white for any trace (``seg->white == +TraceSetEMPTY``). The segment's mark bit-table is reset, and the +whiteness of the seg (``seg->white``) has the current trace added to +it. + +``void awlSegGreyen(Seg seg, Trace trace)`` + +_`.fun.grey`: If the segment is not white for this trace, the +segment's mark table is set to all 1s and the segment is recorded as +being grey. + +``Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss)`` + +_`.fun.scan`: + +_`.fun.scan.overview`: The scanner performs a number of passes over +the segment, scanning each marked and unscanned (grey) object that is +finds. + +_`.fun.scan.overview.finish`: It keeps perform a pass over the segment +until it is finished. + +_`.fun.scan.overview.finish.condition`: A condition for finishing is +that no new marks got placed on objects in this segment during the +pass. + +_`.fun.scan.overview.finish.approximation`: We use an even stronger +condition for finishing that assumes that scanning any object may +introduce marks onto this segment. It is finished when a pass results +in scanning no objects (that is, all objects were either unmarked or +both marked and scanned). + +_`.fun.scan.overview.finished-flag`: There is a flag called +``finished`` which keeps track of whether we should finish or not. We +only ever finish at the end of a pass. At the beginning of a pass the +flag is set. During a pass if any objects are scanned then the +``finished`` flag is reset. At the end of a pass if the ``finished`` +flag is still set then we are finished. No more passes take place and +the function returns. + +_`.fun.scan.pass`: A pass consists of a setup phase and a repeated +phase. + +_`.fun.scan.pass.buffer`: The following assumes that in the general +case the segment is buffered; if the segment is not buffered then the +actions that mention buffers are not taken (they are unimportant if +the segment is not buffered). + +_`.fun.scan.pass.p`: The pass uses a cursor called ``p`` to progress +over the segment. During a pass ``p`` will increase from the base +address of the segment to the limit address of the segment. When ``p`` +reaches the limit address of the segment, the pass in complete. + +_`.fun.scan.pass.setup`: ``p`` initially points to the base address of +the segment. + +_`.fun.scan.pass.repeat`: The following comprises the repeated phase. +The repeated phase is repeated until the pass completion condition is +true (that is, ``p`` has reached the limit of the segment, see +`.fun.scan.pass.p`_ above and `.fun.scan.pass.repeat.complete`_ +below). + +_`.fun.scan.pass.repeat.complete`: If ``p`` is equal to the segment's +limit then we are done. We proceed to check whether any further passes +need to be performed (see `.fun.scan.pass.more`_ below). + +_`.fun.scan.pass.repeat.free`: If ``!alloc(p)`` (the grain is free) +then increment ``p`` and return to the beginning of the loop. + +_`.fun.scan.pass.repeat.buffer`: If ``p`` is equal to the buffer's +ScanLimit, as returned by ``BufferScanLimit()``, then set ``p`` equal +to the buffer's Limit, as returned by ``BufferLimit()`` and return to +the beginning of the loop. + +_`.fun.scan.pass.repeat.object-end`: The end of the object is located +using the ``format->skip`` method. + +_`.fun.scan.pass.repeat.object`: if ``mark(p) && !scanned(p)`` then +the object pointed at is marked but not scanned, which means we must +scan it, otherwise we must skip it. + +_`.fun.scan.pass.repeat.object.dependent`: To scan the object the +object we first have to determine if the object has a dependent object (see +`.req.obj-format`_). + +_`.fun.scan.pass.repeat.object.dependent.expose`: If it has a +dependent object then we must expose the segment that the dependent +object is on (only if the dependent object actually points to MPS +managed memory) prior to scanning and cover the segment subsequent to +scanning. + +_`.fun.scan.pass.repeat.object.dependent.summary`: The summary of the +dependent segment must be set to ``RefSetUNIV`` to reflect the fact +that we are allowing it to be written to (and we don't know what gets +written to the segment). + +_`.fun.scan.pass.repeat.object.scan`: The object is then scanned by +calling the format's scan method with base and limit set to the +beginning and end of the object (_`.fun.scan.scan.improve.single`: A +scan1 format method would make it slightly simpler here). Then the +finished flag is cleared and the bit in the segment's scanned table is +set. + +_`.fun.scan.pass.repeat.advance`: ``p`` is advanced past the object +and we return to the beginning of the loop. + +_`.fun.scan.pass.more`: At the end of a pass the finished flag is +examined. + +_`.fun.scan.pass.more.not`: If the finished flag is set then we are +done (see `.fun.scan.overview.finished-flag`_ above), ``awlSegScan()`` +returns. + +_`.fun.scan.pass.more.so`: Otherwise (the finished flag is reset) we +perform another pass (see `.fun.scan.pass`_ above). + +``Res awlSegFix(Seg seg, ScanState ss, Ref *refIO)`` + +_`.fun.fix`: If the rank (``ss->rank``) is ``RankAMBIG`` then fix +returns immediately unless the reference is in the segment bounds, +aligned to the pool alignment, and allocated. + +The bit in the marked table corresponding to the referenced grain will +be read. If it is already marked then fix returns. Otherwise (the +grain is unmarked), ``ss->wasMarked`` is set to ``FALSE`` (see +design.mps.fix.was-marked.not_), the remaining actions depend on +whether the rank (``ss->rank``) is ``RankWEAK`` or not. If the rank is +weak then the reference is adjusted to 0 (see design.mps.weakness) and +fix returns. If the rank is something else then the mark bit +corresponding to the referenced grain is set, and the segment is +greyed using ``SegSetGrey()``. + +.. _design.mps.fix.was-marked.not: fix#.was-marked.not + +``void awlSegReclaim(Seg seg, Trace trace)`` + +_`.fun.reclaim`: This iterates over all allocated objects in the +segment and frees objects that are not marked. When this iteration is +complete the marked array is completely reset. + +``p`` points to base of segment. Then:: + + while(p < SegLimit(seg) { + if(!alloc(p)) { ++p;continue; } + q = skip(p) /* q points to just past the object pointed at by p */ + if !marked(p) free(p, q); /* reset the bits in the alloc table from p to q-1 inclusive. */ + p = q + } + +Finally, reset the entire marked array using ``BTResRange()``. + +_`.fun.reclaim.improve.pad`: Consider filling free ranges with padding +objects. Now reclaim doesn't need to check that the objects are +allocated before skipping them. There may be a corresponding change +for scan as well. + +``Bool AWLDependentObject(Addr *objReturn, Addr parent)`` + +_`.fun.dependent-object`: This function abstracts the association +between an object and its linked dependent (see `.req.obj-format`_). +It currently assumes that objects are Dylan Object formatted according +to design.dylan.container (see analysis.mps.poolawl.dependent.abstract +for suggested improvements). An object has a dependent object iff the +second word of the object, that is, ``((Word *)parent)[1]``, is +non-``NULL``. The dependent object is the object referenced by the +second word and must be a valid object. + +This function assumes objects are in Dylan Object Format (see +design.dylan.container). It will check that the first word looks like +a Dylan wrapper pointer. It will check that the wrapper indicates that +the wrapper has a reasonable format (namely at least one fixed field). +If the second word is ``NULL`` it will return ``FALSE``. If the second +word is non-``NULL`` then the contents of it will be assigned to +``*objReturn``, and it will return ``TRUE``. + + +Test +---- + +- must create Dylan objects. +- must create Dylan vectors with at least one fixed field. +- must allocate weak thingies. +- must allocate exact tables. +- must link tables together. +- must populate tables with junk. +- some junk must die. + +Use an LO pool and an AWL pool. Three buffers. One buffer for the LO +pool, one exact buffer for the AWL pool, one weak buffer for the AWL +pool. + +Initial test will allocate one object from each buffer and then +destroy all buffers and pools and exit + + +Document History +---------------- + +- 1997-03-11 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poollo.txt b/mps/design/poollo.txt new file mode 100644 index 00000000000..bd1fb52a6c5 --- /dev/null +++ b/mps/design/poollo.txt @@ -0,0 +1,283 @@ +.. mode: -*- rst -*- + +LO pool class +============= + +:Tag: design.mps.poollo +:Author: David Jones +:Date: 1997-03-07 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: LO pool class; design + single: pool class; LO design + + +Introduction +------------ + +_`.readership`: Any MPS developer. + +_`.intro`: The LO (Leaf Object) pool class is a pool class developed +for DylanWorks. It is designed to manage objects that have no +references (leaf objects) such as strings, bit tables, etc. It is a +garbage collected pool (in that objects allocated in the pool are +automatically reclaimed when they are discovered to be unreachable. + +.. note:: + + Need to sort out issue of alignment. Currently lo grabs alignment + from format, almost certainly "ought" to use the greater of the + format alignment and the ``MPS_ALIGN`` value. David Jones, + 1997-07-02. + + +Definitions +----------- + +_`.def.leaf`: A "leaf" object is an object that contains no +references, or an object all of whose references refer to roots. That +is, any references that the object has must refer to a priori alive +objects that are guaranteed not to move, hence the references do not +need fixing. + +_`.def.grain`: A grain (of some alignment) is a contiguous aligned +area of memory of the smallest size possible (which is the same size +as the alignment). + + +Requirements +------------ + +_`.req.source`: See req.dylan.fun.obj.alloc and +req.dylan.prot.ffi.access. + +_`.req.leaf`: The pool must manage formatted leaf objects (see +`.def.leaf`_ above for a definition). This is intended to encompass +Dylan and C leaf objects. Dylan leaf objects have a reference to their +wrapper, but are still leaf objects (in the sense of `.def.leaf`_) +because the wrapper will be a root. + +_`.req.nofault`: The memory containing objects managed by the pool +must not be protected. The client must be allowed to access these +objects without hitting an MPS barrier. + + +Overview +-------- + +_`.overview`: + +_`.overview.ms`: The LO Pool is a non-moving mark-and-sweep collector. + +_`.overview.ms.justify`: Mark-and-sweep pools are simpler than moving +pools. + +_`.overview.alloc`: Objects are allocated in the pool using the +reserve/commit protocol on allocation points. + +_`.overview.format`: The pool is formatted. The format of the objects +in the pool is specified at instantiation time, using a format object +derived from a variant A format (using variant A is overkill, see +`.if.init`_ below) (see design.mps.format for excuse about calling the +variant 'A'). + + +Interface +--------- + +_`.if.init`: + +_`.if.init.args`: The init method for this class takes one extra +parameter in the vararg parameter list. + +_`.if.init.format`: The extra parameter should be an object of type +Format and should describe the format of the objects that are to be +allocated in the pool. + +_`.if.init.format.use`: The pool uses the skip and alignment slots of +the format. The skip method is used to determine the length of objects +(during reclaim). The alignment field is used to determine the +granularity at which memory should be managed. + +_`.if.init.format.a`: Currently only format variant A is supported +though clearly that is overkill as only skip and alignment are used. + + +Data structures +--------------- + +_`.sig`: The signature for the LO Pool Class is 0x51970b07 +(SIGLOPOoL). + +_`.poolstruct`: The class specific pool structure is:: + + typedef struct LOStruct { + PoolStruct poolStruct; /* generic pool structure */ + PoolGenStruct pgenStruct; /* pool generation */ + PoolGen pgen; /* NULL or pointer to pgenStruct */ + Sig sig; /* */ + } LOStruct; + +_`.loseg`: Every segment is an instance of segment class +``LOSegClass``, a subclass of ``MutatorSegClass`` (see +design.mps.seg.over.hierarchy.mutatorseg_), and is an object of type +``LOSegStruct``. + +.. _design.mps.seg.over.hierarchy.mutatorseg: seg#.over.hierarchy.mutatorseg + +_`.loseg.purpose`: The purpose of the ``LOSeg`` structure is to +associate the bit tables used for recording allocation and mark +information with the segment. + +_`.loseg.decl`: The declaration of the structure is as follows:: + + typedef struct LOSegStruct { + GCSegStruct gcSegStruct; /* superclass fields must come first */ + BT mark; /* mark bit table */ + BT alloc; /* alloc bit table */ + Count freeGrains; /* free grains */ + Count bufferedGrains; /* grains in buffers */ + Count newGrains; /* grains allocated since last collection */ + Count oldGrains; /* grains allocated prior to last collection */ + Sig sig; /* */ + } LOSegStruct; + +_`.loseg.sig`: The signature for a loseg is 0x519705E9 (SIGLOSEG). + +_`.loseg.lo`: The lo field points to the LO structure that owns this +segment. + +_`.loseg.bit`: Bit Tables (see design.mps.bt_) are used to record +allocation and mark information. This is relatively straightforward, +but might be inefficient in terms of space in some circumstances. + +.. _design.mps.bt: bt + +_`.loseg.mark`: This is a Bit Table that is used to mark objects +during a trace. Each grain in the segment is associated with 1 bit in +this table. When ``loSegFix()`` (see `.fun.fix`_ below) is called the +address is converted to a grain within the segment and the +corresponding bit in this table is set. + +_`.loseg.alloc`: This is a Bit Table that is used to record which +addresses are allocated. Addresses that are allocated and are not +buffered have their corresponding bit in this table set. If a bit in +this table is reset then either the address is free or is being +buffered. + +_`.loseg.diagram`: The following diagram is now obsolete. It's also +not very interesting - but I've left the sources in case anyone ever +gets around to updating it. tony 1999-12-16 + +[missing diagram] + + +Functions +--------- + +External +........ + +_`.fun.init`: + +_`.fun.destroy`: + +_`.fun.buffer-fill`: + +.. note:: + + Explain way in which buffers interact with the alloc table and how + it could be improved. + +_`.fun.buffer-empty`: + +_`.fun.condemn`: + + +Internal +........ + +``Res loSegFix(Seg seg, ScanState ss, Ref *refIO)`` + +_`.fun.fix`: Fix treats references of most ranks much the same. There +is one mark table that records all marks. A reference of rank +``RankAMBIG`` is first checked to see if it is aligned to the pool +alignment and discarded if not. The reference is converted to a grain +number within the segment (by subtracting the segments' base from the +reference and then dividing by the grain size). The bit (the one +corresponding to the grain number) is set in the mark table. +Exception, for a weak reference (rank is ``RankWEAK``) the mark table +is checked and the reference is fixed to 0 if this address has not +been marked otherwise nothing happens. Note that there is no check +that the reference refers to a valid object boundary (which wouldn't +be a valid check in the case of ambiguous references anyway). + +``void loSegReclaim(Seg seg, Trace trace)`` + +_`.fun.segreclaim`: For all the contiguous allocated regions in the +segment it locates the boundaries of all the objects in that region by +repeatedly skipping (by calling ``format->skip``) from the beginning +of the region (the beginning of the region is guaranteed to coincide +with the beginning of an object). For each object it examines the bit +in the mark bit table that corresponds to the beginning of the object. +If that bit is set then the object has been marked as a result of a +previous call to ``loSegFix()``, the object is preserved by doing +nothing. If that bit is not set then the object has not been marked +and should be reclaimed; the object is reclaimed by resetting the +appropriate range of bits in the segment's free bit table. + +.. note:: + + Special things happen for buffered segments. + + Explain how the marked variable is used to free segments. + + +Attachment +---------- + +[missing attachment "LOGROUP.CWK"] + + +Document History +---------------- + +- 1997-03-07 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolmfs.txt b/mps/design/poolmfs.txt new file mode 100644 index 00000000000..0cd9440395e --- /dev/null +++ b/mps/design/poolmfs.txt @@ -0,0 +1,98 @@ +.. mode: -*- rst -*- + +MFS pool class +============== + +:Tag: design.mps.poolmfs +:Author: Richard Brooksby +:Date: 1996-11-07 +:Status: Incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: MFS pool class; design + single: pool class; MFS design + + +Overview +-------- + +MFS stands for "Manual Fixed Small". The MFS pool class manages +objects that are of a fixed size. It is intended to only manage small +objects efficiently. Storage is recycled manually by the client +programmer. + +A particular instance of an MFS Pool can manage objects only of a +single size, but different instances can manage objects of different +sizes. The size of object that an instance can manage is declared when +the instance is created. + + +Implementation +-------------- + +_`.impl.extents`: MFS operates in a very simple manner: each extent +allocated from the arena is divided into units. + +_`.impl.free-units`: Free units are kept on a linked list using a +header stored in the unit itself. The linked list is not ordered; +allocation and deallocation simply pop and push from the head of the +list. This is fast, but successive allocations might have poor +locality if previous successive frees did. + +_`.impl.extent-ring`: The list of extents belonging to the pool is +maintained as a ring with a node at the start of each extent. + +_`.impl.extent-ring.justify`: Storing the linked list of free nodes +and the extent ring node in the managed memory is against the general +principle of the MPS design, which keeps its management structures +away from client memory. However, the MFS pool is used during the +bootstrapping process (see design.mps.bootstrap.land.sol.pool_) and so +has no other memory pools available for storage. + +.. _design.mps.bootstrap.land.sol.pool: bootstrap#.land.sol.pool + + +Document History +---------------- + +- 1996-11-07 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2016-03-18 RB_ Moved design text from leader comment of poolmfs.c. + Explained chaining of extents using an embedded ring node. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolmrg.txt b/mps/design/poolmrg.txt new file mode 100644 index 00000000000..a92e4d519d3 --- /dev/null +++ b/mps/design/poolmrg.txt @@ -0,0 +1,703 @@ +.. mode: -*- rst -*- + +MRG pool class +============== + +:Tag: design.mps.poolmrg +:Author: David Jones +:Date: 1997-02-03 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: MRG pool class; design + single: pool class; MRG design + + +Introduction +------------ + +_`.readership`: Any MPS developer. + +_`.intro`: This is the design of the MRG (Manual Rank Guardian) pool +class. The MRG pool class is part of the MPS. The MRG pool class is +internal to the MPS (has no client interface) and is used to implement +finalization. + +_`.source`: Some of the techniques in paper.dbe93 ("Guardians in a +Generation-Based Garbage Collector") were used in this design. Some +analysis of this design (including various improvements and some more +in-depth justification) is in analysis.mps.poolmrg. That document +should be understood before changing this document. It is also helpful +to look at design.mps.finalize_ and design.mps.message_. + +.. _design.mps.message: message +.. _design.mps.finalize: finalize + + +Goals +----- + +_`.goal.final`: The MRG pool class should support all +requirements pertaining to finalization. + + +Requirements +------------ + +_`.req`: We have only one requirement pertaining to finalization: + +_`.req.dylan.fun.finalization`: Support the Dylan language-level +implementation of finalized objects: objects are registered, and are +finalized in random order when they would otherwise have died. Cycles +are broken at random places. There is no guarantee of promptness. + +_`.req.general`: However, finalization is a very common piece of +functionality that is provided by (sophisticated) memory managers, so +we can expect other clients to request this sort of functionality. + +_`.anti-req`: Is it required that the MRG pool class return +unused segments to the arena? MFS, for example, does not do this. MRG +will not do this in its initial implementation. + + +Terminology +----------- + +_`.def.mrg`: **MRG**: The MRG pool class's identifier will be MRG. +This stands for "Manual Rank Guardian". The pool is manually managed +and implements guardians for references of a particular rank +(currently just final). + +_`.def.final.ref`: **final reference**: A reference of rank final (see +design.mps.type.rank). + +_`.def.final.object`: **finalizable object**: An object is finalizable +with respect to a final reference if, since the creation of that +reference, there was a point in time when no references to the object +of lower (that is, stronger) rank were reachable from a root. + +_`.def.final.object.note`: Note that this means an object can be +finalizable even if it is now reachable from the root via exact +references. + +_`.def.finalize`: **finalize**: To finalize an object is to notify the +client that the object is finalizable. The client is presumed to be +interested in this information (typically it will apply some method to +the object). + +_`.def.guardian`: **guardian**: An object allocated in the MRG +Pool. A guardian contains exactly one final reference, and some fields +for the pool's internal use. Guardians are used to implement a +finalization mechanism. + + +Overview +-------- + +_`.over`: The MRG pool class is a pool class in the MPS. It is +intended to provide the functionality of "finalization". + +_`.over.internal`: The MRG pool class is internal to the MPM: it +is not intended to have a client interface. Clients are expected to +access the functionality provided by this pool (finalization) using a +separate MPS finalization interface (design.mps.finalize_). + +.. _design.mps.finalize: finalize + +_`.over.one-size`: The MRG pool class manages objects of a single +size, each object containing a single reference of rank final. + +_`.over.one-size.justify`: This is all that is necessary to meet our +requirements for finalization. Whenever an object is registered for +finalization, it is sufficient to create a single reference of rank +final to it. + +_`.over.queue`: A pool maintains a list of live guardian objects, +called (for historical reasons) the "entry" list. + +_`.over.queue.free`: The pool also maintains a list of free guardian +objects called the "free" list. + +_`.over.queue.exit.not`: There used to be an "exit" list, but this is +now historical and there shouldn't be any current references to it. + +_`.over.alloc`: When guardians are allocated, they are placed on the +entry list. Guardians on the entry list refer to objects that have not +yet been shown to be finalizable (either the object has references of +lower rank than final to it, or the MPS has not yet got round to +determining that the object is finalizable). + +_`.over.message.create`: When a guardian is discovered to refer to a +finalizable object it is removed from the entry list and becomes a +message on the arena's messages queue. + +_`.over.message.deliver`: When the MPS client receives the message the +message system arranges for the message to be destroyed and the pool +reclaims the storage associated with the guardian/message. + +_`.over.scan`: When the pool is scanned at rank final each reference +will be fixed. If the reference is to an unmarked object (before the +fix), then the object must now be finalizable. In this case the +containing guardian will be removed from the entry list and posted as +a message. + +_`.over.scan.justify`: The scanning process is a crucial step +necessary for implementing finalization. It is the means by which the +MPS detects that objects are finalizable. + +_`.over.message`: ``PoolClassMRG`` implements a ``MessageClass`` (see +design.mps.message_). All the messages are of one ``MessageType``. This +type is ``MessageTypeFINALIZATION``. Messages are created when objects +are discovered to be finalizable and destroyed when the MPS client has +received the message. + +.. _design.mps.message: message + +_`.over.message.justify`: Messages provide a means for the MPS to +communicate with its client. Notification of finalization is just such +a communication. Messages allow the MPS to inform the client of +finalization events when it is convenient for the MPS to do so (i.e. +not in PageFault context). + +_`.over.manual`: Objects in the MRG pool are manually managed. + +_`.over.manual.alloc`: They are allocated by ``ArenaFinalize()`` when +objects are registered for finalization. + +_`.over.manual.free`: They are freed when the associated message is +destroyed. + +_`.over.manual.justify`: The lifetime of a guardian object is very +easy to determine so manual memory management is appropriate. + + +Protocols +--------- + +Object Registration +................... + +_`.protocol.register`: There is a protocol by which objects can be +registered for finalization. This protocol is handled by the arena +module on behalf of finalization. see +design.mps.finalize.int.finalize_. + +.. _design.mps.finalize.int.finalize: finalize#.int.finalize + + +Finalizer execution +................... + +_`.protocol.finalizer`: If an object is proven to be finalizable then +a message to this effect will eventually be posted. A client can +receive the message, determine what to do about it, and do it. +Typically this would involve calling the finalization method for the +object, and deleting the message. Once the message is deleted, the +object may become recyclable. + + +Setup / destroy +............... + +_`.protocol.life`: An instance of PoolClassMRG is needed in order to +support finalization, it is called the "final" pool and is attached to +the arena (see design.mps.finalize.int.arena.struct_). + +.. _design.mps.finalize.int.arena.struct: finalize#.int.arena.struct + +_`.protocol.life.birth`: The final pool is created lazily by +``ArenaFinalize()``. + +_`.protocol.life.death`: The final pool is destroyed during +``ArenaDestroy()``. + + +Data structures +--------------- + +_`.guardian`: The guardian + +_`.guardian.over`: A guardian is an object used to manage the +references and other data structures that are used by the pool in +order to keep track of which objects are registered for finalization, +which ones have been finalized, and so on. + +_`.guardian.state`: A guardian can be in one of four states: + +_`.guardian.state.enum`: The states are Free, Prefinal, Final, +PostFinal (referred to as MRGGuardianFree, etc. in the +implementation). + +#. _`.guardian.state.free`: The guardian is free, meaning that it is + on the free list for the pool and available for allocation. + +#. _`.guardian.state.prefinal`: The guardian is allocated, and refers + to an object that has not yet been discovered to be finalizable. It + is on the entry list for the pool. + +#. _`.guardian.state.final`: The guardian is allocated, and refers to + an object that has been shown to be finalizable; this state + corresponds to the existence of a message. + +#. _`.guardian.state.postfinal`: This state is only used briefly and + is entirely internal to the pool; the guardian enters this state + just after the associated message has been destroyed (which happens + when the client receives the message) and will be freed immediately + (whereupon it will enter the Free state). This state is used for + checking only (so that MRGFree can check that only guardians in + this state are being freed). + +_`.guardian.life-cycle`: Guardians go through the following state life-cycle: Free ⟶ Prefinal ⟶ Final ⟶ Postfinal ⟶ Free. + +_`.guardian.two-part`: A guardian is a structure consisting abstractly +of a link part and a reference part. Concretely, the link part is a +``LinkPartStruct``, and the reference part is a ``RefPartStruct`` +(which is just a ``Word``). The link part is used by the pool, the +reference part forms the object visible to clients of the pool. The +reference part is the reference of ``RankFINAL`` that refers to +objects registered for finalization and is how the MPS detects +finalizable objects. + +_`.guardian.two-part.union`: The ``LinkPartStruct`` is a discriminated +union of a ``RingStruct`` and a ``MessageStruct``. The ``RingStruct`` +is used when the guardian is either Free or Prefinal. The +MessageStruct is used when the guardian is Final. Neither part of the +union is used when the guardian is in the Postfinal state. + +_`.guardian.two-part.justify`: This may seem a little profligate with +space, but this is okay as we are not required to make finalization +extremely space efficient. + +_`.guardian.parts.separate`: The two parts will be stored in separate +segments. + +_`.guardian.parts.separate.justify`: This is so that the data +structures the pool uses to manage the objects can be separated from +the objects themselves. This avoids the pool having to manipulate data +structures that are on shielded segments +(analysis.mps.poolmrg.hazard.shield). + +_`.guardian.assoc`: Ref part number *n* (from the beginning of the +segment) in one segment will correspond with link part number *n* in +another segment. The association between the two segments will be +managed by the additional fields in pool-specific segment subclasses +(see `.mrgseg`_). + +_`.guardian.ref`: Guardians that are either Prefinal or Final are live +and have valid references (possibly ``NULL``) in their ref parts. +Guardians that are free are dead and always have ``NULL`` in their ref +parts (see `.free.overwrite`_ and `.scan.free`_). + +_`.guardian.ref.free`: When freeing an object, it is a pointer to the +reference part that will be passed (internally in the pool). + +_`.guardian.init`: Guardians are initialized when the pool is grown +(`.alloc.grow`_). The initial state has the ref part ``NULL`` and the +link part is attached to the free ring. Freeing an object returns a +guardian to its initial state. + +_`.poolstruct`: The Pool structure, ``MRGStruct`` will have: + +- _`.poolstruct.entry`: the head of the entry list. + +- _`.poolstruct.free`: the head of the free list. + +- _`.poolstruct.rings`: The entry list, the exit list, and the free + list will each be implemented as a ``Ring``. Each ring will be + maintained using the link part of the guardian. + + _`.poolstruct.rings.justify`: This is because rings are convenient to + use and are well tested. It is possible to implement all three lists + using a singly linked list, but the saving is certainly not worth + making at this stage. + +- _`.poolstruct.refring`: a ring of "ref" segments in use for links or + messages (see .mrgseg.ref.mrgring below). + +- _`.poolstruct.extend`: a precalculated ``extendBy`` field (see + `.init.extend`_). This value is used to determine how large a + segment should be requested from the arena for the reference part + segment when the pool needs to grow (see `.alloc.grow.size`_). + + _`.poolstruct.extend.justify`: Calculating a reasonable value for this + once and remembering it simplifies the allocation (`.alloc.grow`_). + +_`.poolstruct.init`: poolstructs are initialized once for each pool +instance by ``MRGInit()`` (`.init`_). The initial state has all the +rings initialized to singleton rings, and the ``extendBy`` field +initialized to some value (see `.init.extend`_). + +_`.mrgseg`: The pool defines two segment subclasses: +``MRGRefSegClass`` and ``MRGLinkSegClass``. Segments of the former +class will be used to store the ref parts of guardians, segments of +the latter will be used to store the link parts of guardians (see +`.guardian.two-part`_). Segments are always allocated in pairs, with +one of each class, by the function ``MRGSegPairCreate()``. Each +segment contains a link to its pair. + +_`.mrgseg.ref`: ``MRGRefSegClass`` is a subclass of ``MutatorSegClass``. +Instances are of type ``MRGRefSeg``, and contain: + +- _`.mrgseg.ref.mrgring`: a field for the ring of ref part segments in + the pool. + +- _`.mrgseg.ref.linkseg`: a pointer to the paired link segment. + +- _`.mrgseg.ref.grey`: a set describing the greyness of the segment for each trace. + +_`.mrgseg.ref.init`: A segment is created and initialized once every +time the pool is grown (`.alloc.grow`_). The initial state has the +segment ring node initialized and attached to the pool's segment ring, +the linkseg field points to the relevant link segment, the grey field +is initialized such that the segment is not grey for all traces. + +_`.mrgseg.link`: ``MRGLinkSegClass`` is a subclass of ``SegClass``. +Instances are of type ``MRGLinkSeg``, and contain: + +- _`.mrgseg.link.refseg`: a pointer to the paired ref segment. This + may be ``NULL`` during initialization, while the pairing is being + established. + +- _`.mrgseg.link.init`: The initial state has the ``linkseg`` field + pointing to the relevant ref segment. + + +Functions +--------- + +``Bool MRGCheck(MRG mrg)`` + +_`.check`: Check the signatures, the class, and each field of the +``MRGStruct``. Each field is checked as being appropriate for its +type. + +_`.check.justify`: There are no non-trivial invariants that can +be easily checked. + +``Res MRGRegister(Pool pool, Ref ref)`` + +_`.alloc`: Add a guardian for ``ref``. + +_`.alloc.grow`: If the free list is empty then two new segments are +allocated and the free list filled up from them (note that the +reference fields of the new guardians will need to be overwritten with +``NULL``, see `.free.overwrite`_) + +_`.alloc.grow.size`: The size of the reference part segment will be +the pool's ``extendBy`` (`.poolstruct.extend`_) value. The link part +segment will be whatever size is necessary to accommodate *N* link +parts, where *N* is the number of reference parts that fit in the +reference part segment. + +_`.alloc.error`: If any of the requests for more resource (there are +two; one for each of two segments) fail then the successful requests +will be retracted and the result code from the failing request will be +returned. + +_`.alloc.pop`: ``MRGRegister()`` pops a ring node off the free list, +and add it to the entry list. + +``Res MRGDeregister(Pool pool, Ref obj)`` + +_`.free`: Remove the guardian from the message queue and add it to the +free list. + +_`.free.push`: The guardian will simply be added to the front of the +free list (that is, no keeping the free list in address order or +anything like that). + +_`.free.inadequate`: No attempt will be made to return unused free +segments to the arena (although see +analysis.mps.poolmrg.improve.free.* for suggestions). + +_`.free.overwrite`: ``MRGDeregister()`` also writes over the reference +with ``NULL``. _`.free.overwrite.justify`: This is so that when the +segment is subsequently scanned (`.scan.free`_), the reference that +used to be in the object is not accidentally fixed. + +``Res MRGInit(Pool pool, ArgList args)`` + +_`.init`: Initializes the entry list, the free ring, the ref ring, and +the ``extendBy`` field. + +_`.init.extend`: The ``extendBy`` field is initialized to the arena +grain size. + +_`.init.extend.justify`: This is adequate as the pool is not expected +to grow very quickly. + +``void MRGFinish(Pool pool)`` + +_`.finish`: Iterate over all the segments, returning all the segments +to the arena. + +``Res mrgRefSegScan(Bool *totalReturn, Pool pool, Seg seg, ScanState ss)`` + +_`.scan`: ``mrgRefSegScan()`` scans a segment of guardians. + +_`.scan.trivial`: Scan will do nothing (that is, return immediately) +if the tracing rank is anything other than final. + +.. note:: + + This optimization is missing. impl.c.trace.scan.conservative is + not a problem because there are no faults on these segs, because + there are no references into them. But that's why ``TraceScan()`` + can't do it. Pekka P. Pirinen, 1997-09-19. + +_`.scan.trivial.justify`: If the rank is lower than final then +scanning is detrimental, it will only delay finalization. If the rank +is higher than final there is nothing to do, the pool only contains +final references. + +_`.scan.guardians`: ``mrgRefSegScan()`` will iterate over all +guardians in the segment. Every guardian's reference will be fixed +(_`.scan.free`: note that guardians that are on the free list have +``NULL`` in their reference part). + +_`.scan.wasold`: If the object referred to had not been fixed +previously (that is, was unmarked) then the object is not referenced +by a reference of a lower rank (than ``RankFINAL``) and hence is +finalizable. + +_`.scan.finalize`: The guardian will be finalized. This entails moving +the guardian from state Prefinal to Final; it is removed from the +entry list and initialized as a message and posted on the arena's +message queue. + +_`.scan.finalize.idempotent`: In fact this will only happen if the +guardian has not already been finalized (which is determined by +examining the state of the guardian). + +_`.scan.unordered`: Because scanning occurs a segment at a time, the +order in which objects are finalized is "random" (it cannot be +predicted by considering only the references between objects +registered for finalization). See +analysis.mps.poolmrg.improve.semantics for how this can be improved. + +_`.scan.unordered.justify`: Unordered finalization is all that is +required. + +See analysis.mps.poolmrg.improve.scan.nomove for a suggested +improvement that avoids redundant unlinking and relinking. + +``Res MRGDescribe(Pool pool, mps_lib_FILE *stream, Count depth)`` + +_`.describe`: Describes an MRG pool. Iterates along each of the entry +and exit lists and prints the guardians in each. The location of the +guardian and the value of the reference in it will be printed out. +Provided for debugging only. + + +Transgressions +-------------- + +_`.trans.no-finish`: The MRG pool does not trouble itself to tidy up +its internal rings properly when being destroyed. + +_`.trans.free-seg`: No attempt is made to release free segments to the +arena. A suggested strategy for this is as follows: + +- Add a count of free guardians to each segment, and maintain it in + appropriate places. + +- Add a free segment ring to the pool. + +- In ``mrgRefSegScan()``, if the segment is entirely free, don't scan + it, but instead detach its links from the free ring, and move the + segment to the free segment ring. + +- At some appropriate point, such as the end of ``MRGAlloc()``, + destroy free segments. + +- In ``MRGAlloc()``, if there are no free guardians, check the free + segment ring before creating a new pair of segments. Note that this + algorithm would give some slight measure of segment hysteresis. It + is not the place of the pool to support general segment hysteresis. + + +Future +------ + +_`.future.array`: In future, for speed or simplicity, this pool could +be rewritten to use an array. See `mail.gavinm.1997-09-04.13-08`_. + +.. _mail.gavinm.1997-09-04.13-08: https://info.ravenbrook.com/project/mps/mail/1997/09/04/13-08/0.txt + + +Tests +----- + +.. note:: + + This section is utterly out of date. Pekka P. Pirinen, 1997-09-19. + +_`.test`: The test impl.c.finalcv is similar to the weakness test (see +design.mps.weakness, impl.c.weakcv). + + +Functionality +............. + +This is the functionality to be tested: + +- _`.fun.alloc`: Can allocate objects. +- _`.fun.free`: Can free objects that were allocated. +- _`.prot.write`: Can write a reference into an allocated object. +- _`.prot.read`: Can read the reference from an allocated object. +- _`.promise.faithful`: A reference stored in an allocated object will + continue to refer to the same object. +- _`.promise.live`: A reference stored in an allocated object will + preserve the object referred to. +- _`.promise.unreachable`: Any objects referred to in finalization + messages are not (at the time of reading the message) reachable via + a chain of ambiguous or exact references. (we will not be able to + test this at first as there is no messaging interface) +- _`.promise.try`: The pool will make a "good faith" effort to + finalize objects that are not reachable via a chain of ambiguous or + exact references. + +Attributes +.......... + +The following attributes will be tested: + +- _`.attr.none`: There are no attribute requirements. + +Implementation +.............. + +The test will simply allocate a number of objects in the AMC pool and +finalize each one, throwing away the reference to the objects. Churn. + +_`.test.mpm`: The test will use the MPM interface (impl.h.mpm). + +_`.test.mpm.justify`: This is because it is not intended to provide an +MPS interface to this pool directly, and the MPS interface to +finalization has not been written yet (impl.h.mps). + +_`.test.mpm.change`: Later on it may use the MPS interface, in which +case, where the following text refers to allocating objects in the MRG +pool it will need adjusting. + +_`.test.two-pools`: The test will use two pools, an AMC pool, and an +MRG pool. + +_`.test.alloc`: A number of objects will be allocated in the MRG pool. + +_`.test.free`: They will then be freed. This will test `.fun.alloc`_ +and `.fun.free`_, although not very much. + +_`.test.rw.a`: An object, "A", will be allocated in the AMC pool, a +reference to it will be kept in a root. + +_`.test.rw.alloc`: A number of objects will be allocated in the MRG +pool. + +_`.test.rw.write`: A reference to "A" will be written into each +object. + +_`.test.rw.read`: The reference in each object will be read and +checked to see if it refers to "A". + +_`.test.rw.free`: All the objects will be freed. + +_`.test.rw.drop`: The reference to "A" will be dropped. This will test +`.prot.write`_ and `.prot.read`_. + +_`.test.promise.fl.alloc`: A number of objects will be allocated in +the AMC pool. + +_`.test.promise.fl.tag`: Each object will be tagged uniquely. + +_`.test.promise.fl.refer`: a reference to it will be stored in an +object allocated in the MRG pool. + +_`.test.promise.fl.churn`: A large amount of garbage will be allocated +in the AMC pool. Regularly, whilst this garbage is being allocated, a +check will be performed that all the objects allocated in the MRG pool +refer to valid objects and that they still refer to the same objects. +All objects from the MRG pool will then be freed (thus dropping all +references to the AMC objects). This will test `.promise.faithful`_ +and `.promise.live`_. + +_`.test.promise.ut.alloc`: A number of objects will be allocated in +the AMC pool. + +_`.test.promise.ut.refer`: Each object will be referred to by a root +and also referred to by an object allocated in the MRG pool. + +_`.test.promise.ut.drop`: References to a random selection of the +objects from the AMC pool will be deleted from the root. + +_`.test.promise.ut.churn`: A large amount of garbage will be allocated +in the AMC pool. + +_`.test.promise.ut.message`: The message interface will be used to +receive finalization messages. + +_`.test.promise.ut.final.check`: For each finalization message +received it will check that the object referenced in the message is +not referred to in the root. + +_`.test.promise.ut.nofinal.check`: After some amount of garbage has +been allocated it will check to see if any objects are not in the root +and haven't been finalized. This will test `.promise.unreachable`_ and +`.promise.try`_. + + +Notes +----- + +_`.access.inadequate`: ``SegAccess()`` will scan segments at +`RankEXACT``. Really it should be scanned at whatever the minimum rank +of all grey segments is (the trace rank phase), however there is no +way to find this out. As a consequence we will sometimes scan pages at +``RankEXACT`` when the pages could have been scanned at ``RankFINAL``. +This means that finalization of some objects may sometimes get +delayed. + + +Document History +---------------- + +- 1997-02-03 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText; some modernization + (``MRGAlloc()`` and ``MRGFree()`` are now ``MRGRegister()`` and + ``MRGDeregister()`` respectively; write "list" for "queue"). + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolmvff.txt b/mps/design/poolmvff.txt new file mode 100644 index 00000000000..255b332d5eb --- /dev/null +++ b/mps/design/poolmvff.txt @@ -0,0 +1,142 @@ +.. mode: -*- rst -*- + +MVFF pool class +=============== + +:Tag: design.mps.poolmvff +:Author: Gavin Matthews +:Date: 1998-09-09 +:Organization: Harlequin +:Status: incomplete doc +:Revision: $Id$ +:Index terms: + pair: MVFF pool class; design + single: pool class; MVFF design + + +Introduction +------------ + +_`.intro`: This is the design of the MVFF (Manual Variable First-Fit) +pool class. This pool implements a first (or last) fit policy for +variable-sized manually-managed objects, with control over first/last, +segment preference high/low, and slot fit low/high. + +_`.background`: The pool was created in a response to a belief that +the ScriptWorks EPDL/EPDR's first fit policy is beneficial for some +classes of client behaviour, but the performance of a linear free list +was unacceptable. + + +Overview +-------- + +_`.over`: This pool implements certain variants of the address-ordered +first-fit policy. The implementation allows allocation across segment +boundaries. + +_`.over.buffer`: Buffered allocation is also supported, but in that +case, the buffer-filling policy is worst-fit. Buffered and unbuffered +allocation can be used at the same time, but in that case, the first +ap must be created before any allocations. + +_`.over.buffer.class`: The pool uses the simplest buffer class, +``BufferClass``. This is appropriate since these buffers don't attach +to segments, and hence don't constrain buffered regions to lie within +segment boundaries. + + +Methods +------- + +_`.method.buffer`: The buffer methods implement a worst-fit fill +strategy. + + +Implementation +-------------- + +_`.impl.alloc_list`: The pool stores the address ranges that it has +acquired from the arena in a CBS (see design.mps.cbs_). + +_`.impl.free-list`: The pool stores its free list in a CBS (see +design.mps.cbs_), failing over in emergencies to a Freelist (see +design.mps.freelist_) when the CBS cannot allocate new control +structures. This is the reason for the alignment restriction above. + +.. _design.mps.cbs: cbs +.. _design.mps.freelist: freelist + + +Details +------- + +_`.design.acquire-size`: When acquiring memory from the arena, we use +``extendBy`` as the unit of allocation unless the object won't fit, in +which case we use the object size (in both cases we align up to the +arena alignment). + +_`.design.acquire-fail`: If allocating ``extendBy``, we try again with +an aligned size just large enough for the object we're allocating. +This is in response to request.mps.170186_. + +.. _request.mps.170186: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/mps/170186 + + +Document History +---------------- + +- 1998-09-09 Gavin Matthews. Wrote a list of methods and function plus + some notes. + +- 1999-01-06 Pekka P. Pirinen. Added overview, removed bogus + ``ArenaEnter`` design, and described buffered allocation. + +- Modified for the "Sunset On Segments" redesign of segments. Buffered + allocation is no longer limited to segment boundaries. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-10 RB_ Converted to reStructuredText. Updated to document + keyword arguments, replacing varargs. + +- 2013-06-04 GDR_ The CBS module no longer maintains its own emergency + list, so MVFF handles the fail-over from its CBS to a Freelist. + +- 2014-04-15 GDR_ The address ranges acquired from the arena are now + stored in a CBS; segments are no longer used for this purpose. + +- 2014-06-12 GDR_ Remove public interface documentation (this is in + the reference manual). + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/poolmvt.txt b/mps/design/poolmvt.txt new file mode 100644 index 00000000000..24ec214ccb4 --- /dev/null +++ b/mps/design/poolmvt.txt @@ -0,0 +1,1040 @@ +.. mode: -*- rst -*- + + +Manual Variable Temporal (MVT) pool design +========================================== + +:Tag: design.mps.poolmvt +:Author: P T Withington +:Date: 1998-02-13 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `C. Copyright and License`_. +:Index terms: + pair: MVT pool class; design + single: pool class; MVT design + + +Introduction +------------ + +_`.intro`: This is a second-generation design for a pool that manually +manages variable-sized objects. It is intended as a replacement for +poolmv (except in its control pool role) and poolepdl, and it is +intended to satisfy the requirements of the Dylan "misc" pool and the +product malloc/new drop-in replacement. + +_`.readership`: MM developers + +_`.source`: req.dylan(6), req.epcore(16), req.product(2) + +_`.background`: design.mps.poolmv, design.mps.poolepdl(0), +design.product.soft.drop(0), paper.wil95(1), paper.vo96(0), +paper.grun92(1), paper.beck82(0), `mail.ptw.1998-02-25.22-18`_. + +.. _mail.ptw.1998-02-25.22-18: https://info.ravenbrook.com/project/mps/mail/1998/02/25/22-18/0.txt + + +Definitions +----------- + +_`.def.alignment`: Alignment is a constraint on an object's address, +typically to be a power of 2 (see also, glossary.alignment ) + +_`.def.bit-map`: A bitmap is a boolean-valued vector (see also, +glossary.bitmap ). + +_`.def.block`: A block is a contiguous extent of memory. In this +document, block is used to mean a contiguous extent of memory managed +by the pool for the pool client, typically a subset of a segment +(compare with `.def.segment`_). + +_`.def.cartesian-tree`: A cartesian tree is a binary tree ordered by +two keys (paper.stephenson83(0)). + +_`.def.crossing-map`: A mechanism that supports finding the start of +an object from any address within the object, typically only required +on untagged architectures (see also, glossary.crossing.map ). + +_`.def.footer`: A block of descriptive information describing and +immediately following another block of memory (see also +`.def.header`_). + +_`.def.fragmentation`: Fragmented memory is memory reserved to the +program but not usable by the program because of the arrangement of +memory already in use (see also, glossary.fragmentation ). + +_`.def.header`: A block of descriptive information describing and +immediately preceding another block of memory (see also, +glossary.in-band.header ). + +_`.def.in-band`: From "in band signalling", when descriptive +information about a data structure is stored in the data structure +itself (see also, glossary.in-band.header ). + +_`.def.out-of-band`: When descriptive information about a data +structure is stored separately from the structure itself (see also, +glossary.out-of-band.header ). + +_`.def.refcount`: A refcount is a count of the number of users of an +object (see also, glossary.reference.count ). + +_`.def.segment`: A segment is a contiguous extent of memory. In this +document, segment is used to mean a contiguous extent of memory +managed by the MPS arena (design.mps.arena_) and subdivided by the +pool to provide blocks (see `.def.block`_) to its clients. + +.. _design.mps.arena: arena + +_`.def.splay-tree`: A splay tree is a self-adjusting binary tree +(paper.st85(0), paper.sleator96(0)). + +_`.def.splinter`: A splinter is a fragment of memory that is too small +to be useful (see also, glossary.splinter ) + +_`.def.subblock`: A subblock is a contiguous extent of memory. In this +document, subblock is used to mean a contiguous extent of memory +manage by the client for its own use, typically a subset of a block +(compare with `.def.block`_). + + +Abbreviations +------------- + +_`.abbr.abq`: ABQ = Available Block Queue + +_`.abbr.ap`: AP = Allocation Point + +_`.abbr.cbs`: CBS = Coalescing Block Structure + +_`.abbr.mps`: MPS = Memory Pool System + +_`.abbr.ps`: PS = PostScript + + + +Overview +-------- + +_`.overview`: MVT is intended to satisfy the requirements of the +clients that need manual-variable pools, improving on the performance +of the existing manual-variable pool implementations, and reducing the +duplication of code that currently exists. The expected clients of MVT +are: Dylan (currently for its misc pool), EP (particularly the dl +pool, but all pools other than the PS object pool), and Product +(initially the malloc/new pool, but also other manual pool classes). + + +Requirements +------------ + +_`.req.cat`: Requirements are categorized per guide.req(2). + +_`.req.risk`: req.epcore(16) is known to be obsolete, but the revised +document has not yet been accepted. + + +Critical requirements +..................... + +_`.req.fun.man-var`: The pool class must support manual allocation and +freeing of variable-sized blocks (source: req.dylan.fun.misc.alloc, +req.epcore.fun.{dl,gen,tmp,stat,cache,trap}.{alloc,free}, +req.product.fun.{malloc,new,man.man}). + +_`.non-req.fun.gc`: There is not a requirement that the pool class +support formatted objects, scanning, or collection objects; but it +should not be arbitrarily precluded. + +_`.req.fun.align`: The pool class must support aligned allocations to +client-specified alignments. An individual instance need only support +a single alignment; multiple instances may be used to support more +than one alignment (source: req.epcore.attr.align). + +_`.req.fun.reallocate`: The pool class must support resizing of +allocated blocks (source req.epcore.fun.dl.promise.free, +req.product.dc.env.{ansi-c,cpp}). + +_`.non-req.fun.reallocate.in-place`: There is not a requirement blocks +must be resized in place (where possible); but it seems like a good +idea. + +_`.req.fun.thread`: Each instance of the pool class must support +multiple threads of allocation (source req.epcore.fun.dl.multi, +req.product.dc.env.{ansi-c,cpp}). + +_`.req.attr.performance`: The pool class must meet or exceed +performance of "competitive" allocators (source: +rec.epcore.attr.{run-time,tp}, req.product.attr.{mkt.eval, perform}). +[Dylan does not seem to have any requirement that storage be allocated +with a particular response time or throughput, just so long as we +don't block for too long. Clearly there is a missing requirement.] + +_`.req.attr.performance.time`: By inference, the time overhead must be +competitive. + +_`.req.attr.performance.space`: By inference, the space overhead must +be competitive. + +_`.req.attr.reliability`: The pool class must have "rock-solid +reliability" (source: req.dylan.attr.rel.mtbf, req.epcore.attr.rel, +req.product.attr.rel). + +_`.req.fun.range`: The pool class must be able to manage blocks +ranging in size from 1 byte to all of addressable memory +(req.epcore.attr.{dl,gen,tmp,stat,cache,trap}.obj.{min,max}. The range +requirement may be satisfied by multiple instances each managing a +particular client-specified subrange of sizes. [Dylan has requirements +req.dylan.attr.{capacity,obj.max}, but no requirement that such +objects reside in a manual pool.] + +_`.req.fun.debug`: The pool class must support debugging erroneous +usage by client programs (source: req.epcore.fun.{dc.variety, +debug.support}, req.product.attr.{mkt.eval,perform}). Debugging is +permitted to incur additional overhead. + +_`.req.fun.debug.boundaries`: The pool class must support checking for +accesses outside the boundaries of live objects. + +_`.req.fun.debug.log`: The pool class must support logging of all +allocations and deallocations. + +_`.req.fun.debug.enumerate`: The pool class must support examining all +allocated objects. + +_`.req.fun.debug.free`: The pool class must support detecting +incorrect, overlapping, and double frees. + +_`.req.fun.tolerant`: The pool class must support tolerance of +erroneous usage (source req.product.attr.use.level.1). + + +Essential requirements +...................... + +_`.req.fun.profile`: The pool class should support memory usage +profiling (source: req.product.attr.{mkt.eval, perform}). + +_`.req.attr.flex`: The pool class should be flexible so that it can be +tuned to specific allocation and freeing patterns (source: +req.product.attr.flex,req.epcore.attr.{dl,cache,trap}.typ). The +flexibility requirement may be satisfied by multiple instances each +optimizing a specific pattern. + +_`.req.attr.adapt`: The pool class should be adaptive so that it can +accommodate changing allocation and freeing patterns (source: +req.epcore.fun.{tmp,stat}.policy, +req.product.attr.{mkt.eval,perform}). + + +Nice requirements +................. + +_`.req.fun.suballocate`: The pool class may support freeing of any +aligned, contiguous subset of an allocated block (source +req.epcore.fun.dl.free.any, req.product.attr.{mkt.eval,perform}). + + +Architecture +------------ + +_`.arch.overview`: The pool has several layers: client allocation is +by Allocation Points (APs). + +_`.arch.overview.ap`: APs acquire storage from the pool +available-block queue (ABQ). + +_`.arch.overview.abq`: The ABQ holds blocks of a minimum configurable +size: "reuse size". + +_`.arch.overview.storage`: The ABQ acquires storage from the arena, +and from its internal free block managers. + +_`.arch.overview.storage.contiguous`: The arena storage is requested +to be contiguous to maximize opportunities for coalescing (Loci will +be used when available). + +_`.arch.overview.cbs`: The free block managers hold blocks freed by +the client until, through coalescing, they have reached the reuse +size, at which point they are made available on the ABQ. + +_`.arch.ap`: The pool will use allocation points as the allocation +interface to the client. + +_`.arch.ap.two-phase`: Allocation points will request blocks from the +pool and suballocate those blocks (using the existing AP, compare and +increment, 2-phase mechanism) to satisfy client requests. + +_`.arch.ap.fill`: The pool will have a configurable "fill size" that +will be the preferred size block used to fill the allocation point. + +_`.arch.ap.fill.size`: The fill size should be chosen to amortize the +cost of refill over a number of typical reserve/commit operations, but +not so large as to exceed the typical object population of the pool. + +_`.arch.ap.no-fit`: When an allocation does not fit in the remaining +space of the allocation point, there may be a remaining fragment. + +_`.arch.ap.no-fit.sawdust`: If the fragment is below a configurable +threshold (minimum size), it will be left unused (but returned to the +free block managers so it will be reclaimed when adjacent objects are +freed). + +_`.arch.ap.no-fit.splinter`: otherwise, the remaining fragment will be +(effectively) returned to the head of the available-block queue, so +that it will be used as soon as possible (that is, by objects of similar +birthdate). + +_`.arch.ap.no-fit.oversize`: If the requested allocation exceeds the +fill size it is treated exceptionally (this may indicate the client +has either misconfigured or misused the pool and should either change +the pool configuration or create a separate pool for these exceptional +objects for best performance). + +_`.arch.ap.no-fit.oversize.policy`: Oversize blocks are assumed to +have exceptional lifetimes, hence are allocated to one side and do not +participate in the normal storage recycling of the pool. + +_`.arch.ap.refill.overhead`: If reuse size is small, or becomes small +due to `.arch.adapt`_, all allocations will effectively be treated +exceptionally (the AP will trip and a oldest-fit block will be chosen +on each allocation). This mode will be within a constant factor in +overhead of an unbuffered pool. + +_`.arch.abq`: The available block queue holds blocks that have +coalesced sufficiently to reach reuse size. + +_`.arch.abq.reuse.size`: A multiple of the quantum of virtual memory +is used as the reuse size (`.analysis.policy.size`_). + +_`.arch.abq.fifo`: It is a FIFO queue (recently coalesced blocks go to +the tail of the queue, blocks are taken from the head of the queue for +reuse). + +_`.arch.abq.delay-reuse`: By thus delaying reuse, coalescing +opportunities are greater. + +_`.arch.abq.high-water`: It has a configurable high water mark, which +when reached will cause blocks at the head of the queue to be returned +to the arena, rather than reused. + +_`.arch.abq.return`: When the MPS supports it, the pool will be able +to return free blocks from the ABQ to the arena on demand. + +_`.arch.abq.return.segment`: `.arch.abq.return`_ can be guaranteed to +be able to return a segment by setting reuse size to twice the size of +the segments the pool requests from the arena. + +_`.arch.cbs`: The coalescing block structure holds blocks that have +been freed by the client. + +_`.arch.cbs.optimize`: The data structure is optimized for coalescing. + +_`.arch.cbs.abq`: When a block reaches reuse size, it is added to the +ABQ. + +_`.arch.cbs.data-structure`: The data structures are organized so that +a block can be both in the free block managers and on the ABQ +simultaneously to permit additional coalescing, up until the time the +block is removed from the ABQ and assigned to an AP. + +_`.arch.fragmentation.internal`: Internal fragmentation results from +The pool will request large segments from the arena to minimize the +internal fragmentation due to objects not crossing segment boundaries. + +_`.arch.modular`: The architecture will be modular, to allow building +variations on the pool by assembling different parts. + +_`.arch.modular.example`: For example, it should be possible to build +pools with any of the freelist mechanisms, with in-band or out-of-band +storage (where applicable), that do or do not support derived object +descriptions, etc. + +_`.arch.modular.initial`: The initial architecture will use +`.sol.mech.free-list`_ for the free block managers, +`.sol.mech.storage.out-of-band`_, `.sol.mech.desc.derived`_, and +`.sol.mech.allocate.buffer`_. + +_`.arch.segregate`: The architecture will support segregated +allocation through the use of multiple allocation points. The client +will choose the appropriate allocation point either at run time, or +when possible, at compile time. + +_`.arch.segregate.initial`: The initial architecture will segregate +allocations into two classes: large and small. This will be +implemented by creating two pools with different parameters. + +_`.arch.segregate.initial.choice`: The initial architecture will +provide glue code to choose which pool to allocate from at run time. +If possible this glue code will be written in a way that a good +compiler can optimize the selection of pool at compile time. +Eventually this glue code should be subsumed by the client or +generated automatically by a tool. + +_`.arch.debug`: Debugging features such as tags, fenceposts, types, +creators will be implemented in a layer above the pool and APs. A +generic pool debugging interface will be developed to support +debugging in this outer layer. + +_`.arch.debug.initial`: The initial architecture will have counters +for objects/bytes allocated/freed and support for detecting +overlapping frees. + +_`.arch.dependency.loci`: The architecture depends on the arena being +able to efficiently provide segments of varying sizes without +excessive fragmentation. The locus mechanism should satisfy this +dependency. (See `.analysis.strategy.risk`_.) + +_`.arch.dependency.mfs`: The architecture internal data structures +depend on efficient manual management of small, fixed-sized objects (2 +different sizes). The MFS pool should satisfy this dependency. + +_`.arch.contingency`: Since the strategy we propose is new, it may not +work. + +_`.arch.contingency.pathological`: In particular, pathological +allocation patterns could result in fragmentation such that no blocks +recycle from the free bock managers to the ABQ. + +_`.arch.contingency.fallback`: As a fallback, there will be a pool +creation parameter for a high water mark for the free space. + +_`.arch.contingency.fragmentation-limit`: When the free space as a +percentage of all the memory managed by the pool (a measure of +fragmentation) reaches that high water mark, the free block managers +will be searched oldest-fit before requesting additional segments from +the arena. + +_`.arch.contingency.alternative`: We also plan to implement +`.sol.mech.free-list.cartesian-tree`_ as an alternative free block +manager, which would permit more efficient searching of the free +blocks. + +_`.arch.parameters`: The architecture supports several parameters so +that multiple pools may be instantiated and tuned to support different +object cohorts. The important parameters are: reuse size, minimum +size, fill size, ABQ high water mark, free block fragmentation limit +(see `.arch.contingency.fragmentation-limit`_). + +_`.arch.parameters.client-visible`: The client-visible parameters of +the pool are the minimum object size, the mean object size, the +maximum object size, the reserve depth and fragmentation limit. The +minimum object size determines when a splinter is kept on the head of +the ABQ (`.arch.ap.no-fit.splinter`_). The maximum object size +determines the fill size (`.arch.ap.fill.size`_) and hence when a +block is allocated exceptionally (`.arch.ap.no-fit.oversize`_). The +mean object size is the most likely object size. The reserve depth is +a measure of the hysteresis of the object population. The mean object +size, reserve depth and, maximum object size are used to determine the +size of the ABQ (`.arch.abq.high-water`_). The fragmentation limit is +used to determine when contingency mode is used to satisfy an +allocation request (`.arch.contingency`_). + +_`.arch.adapt`: We believe that an important adaptation to explore is +tying the reuse size inversely to the fragmentation (as measured in +`.arch.contingency.fragmentation-limit`_). + +_`.arch.adapt.reuse`: By setting reuse size low when fragmentation is +high, smaller blocks will be available for reuse, so fragmentation +should diminish. + +_`.arch.adapt.overhead`: This will result in higher overhead as the AP +will need to be refilled more often, so reuse size should be raised +again as fragmentation diminishes. + +_`.arch.adapt.oldest-fit`: In the limit, if reuse size goes to zero, +the pool will implement a "oldest-fit" policy: the oldest free block +of sufficient size will be used for each allocation. + +_`.arch.adapt.risk`: This adaptation is an experimental policy and +should not be delivered to clients until thoroughly tested. + + +Analysis +-------- + +_`.analysis.discard`: We have discarded many traditional solutions based +on experience and analysis in paper.wil95(1). In particular, managing +the free list as a linear list arranged by address or size and basing +policy on searching such a linear list in a particular direction, from +a particular starting point, using fit and/or immediacy as criteria. +We believe that none of these solutions is derived from considering +the root of the problem to be solved (as described in +`.analysis.strategy`_), although their behavior as analyzed by Wilson +gives several insights. + +_`.analysis.strategy`: For any program to run in the minimum required +memory (with minimal overhead -- we discard solutions such as +compression for now), fragmentation must be eliminated. To eliminate +fragmentation, simply place blocks in memory so that they die "in +order" and can be immediately coalesced. This ideal is not achievable, +but we believe we can find object attributes that correlate with +deathtime and exploit them to approximate the ideal. Initially we +believe birth time and type (as approximated by size) will be useful +attributes to explore. + +_`.analysis.strategy.perform`: To meet `.req.attr.performance`_, the +implementation of `.sol.strategy`_ must be competitive in both time +and space. + +_`.analysis.strategy.risk`: The current MPS segment substrate can cause +internal fragmentation which an individual pool can do nothing about. +We expect that request.epcore.170193.sugg.loci_ will be implemented to +remove this risk. + +.. _`request.epcore.170193.sugg.loci`: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/170193/ + +_`.analysis.policy`: Deferred coalescing, when taken to the extreme will +not minimize the memory consumption of a program, as no memory would +ever be reused. Eager reuse appears to lead to more fragmentation, +whereas delayed reuse appears to reduce fragmentation +(paper.wil95(1)). The systems studied by Wilson did not directly +address deferring reuse. Our proposed policy is to reuse blocks when +they reach a (configurable) size. We believe that this policy along +with the policy of segregating allocations by death time, will greatly +reduce fragmentation. + +_`.analysis.policy.risk`: This policy could lead to pathological behavior +if allocations cannot be successfully segregated. + +_`.analysis.policy.allocate.segregate`: This policy has some similarities +to CustomAlloc (paper.grun92(1)). CustomAlloc segregates objects by +size classes, and then within those classes chooses a different +allocator depending on whether that size class has a stable or +unstable population. Classes with stable population recycle storage +within the class, whereas classes with unstable populations return +their storage to the general allocation pool for possible reuse by +another class. CustomAlloc, however, requires profiling the +application and tuning the allocator according to those profiles. +Although we intend to support such tuning, we do not want to require +it. + +_`.analysis.policy.reallocate`: For reallocation, `.req.fun.suballocate`_ +can be used to free the remainder if a block is made smaller. Doing so +will cause the freed block to obey `.sol.policy.allocate`_ (that is, +the freed block will not be treated specially, it will be subject to +the normal policy on reuse). Copying can be used if a block is made +larger. paper.vo96(0) reports success in over-allocating a block the +first time it is resized larger, presumably because blocks that are +resized once tend to be resized again and over-allocating may avoid a +subsequent copy. If each object that will be reallocated can be given +its own allocation point until its final reallocation, the allocation +point can be used to hold released or spare storage. + +_`.analysis.policy.size`: We believe that this will take advantage of the +underlying virtual memory system's ability to compact the physical +memory footprint of the program by discarding free fragments that +align with the virtual memory quantum. (In a VM system one can +approximate compaction by sparse mapping. If every other page of a +segment is unused, the unused pages can be unmapped, freeing up +physical memory that can be mapped to a new contiguous vm range.) + +_`.analysis.mech.free-list`: The literature (paper.grun92(1), +paper.vo96(0)) indicate that `.sol.mech.free-list.cartesian-tree`_ +provides a space-efficient implementation at some cost in speed. +`.sol.mech.free-list.splay-tree`_ is faster but less space-efficient. +`.sol.mech.free-list.bitmap`_ is unstudied. Many of the faster +allocators maintain caches of free blocks by size to speed allocation +of "popular" sizes. We intend to initially explore not doing so, as we +believe that policy ultimately leads to fragmentation by mixing +objects of varying death times. Instead we intend to use a free list +mechanism to support fast coalescing, deferring reuse of blocks until +a minimum size has been reached. + +_`.analysis.mech.allocate.optimize-small`: Wilson (paper.wil95(1)) notes +that small blocks typically have short lifetimes and that overall +performance is improved if you optimize the management of small +blocks, e.g., `.sol.mech.allocate.lookup-table`_ for all small blocks. +We believe that `.sol.mech.allocate.buffer`_ does exactly that. + +_`.analysis.mech.allocate.optimize-new`: Wilson (paper.wil95(1)) reports +some benefit from "preserving wilderness", that is, when a block of +memory must be requested from the system to satisfy an allocation, +only the minimum amount of that block is used, the remainder is +preserved (effectively by putting it at the tail of the free list). +This mechanism may or may not implement `.sol.policy.allocate`_. We +believe a better mechanism is to choose to preserve or not, based on +`.sol.policy.allocate`_. + + +Ideas +----- + +_`.sol`: Many solution ideas for manual management of variable-sized +memory blocks are enumerated by paper.wil95(1). Here we list the most +promising, and some of our own. + + +Strategy +........ + +_`.sol.strategy`: To run a program in the minimal required memory, +with minimal overhead, utilize memory efficiently. Memory becomes +unusable when fragmented. Strategy is to minimize fragmentation. So +place blocks where they won't cause fragmentation later. + +_`.sol.strategy.death`: Objects that will die together (in time) +should be allocated together (in space); thus they will coalesce, +reducing fragmentation. + +_`.sol.strategy.death.birth`: Assume objects allocated near each other +in time will have similar deathtimes (paper.beck82(0)). + +_`.sol.strategy.death.type`: Assume objects of different type may have +different deathtimes, even if born together. + +_`.sol.strategy.death.predict`: Find and use program features to predict deathtimes. + +_`.sol.strategy.reallocate`: Reallocation implies rebirth, or at least +a change in lifetime + +_`.sol.strategy.debug`: As much of the debugging functionality as +possible should be implemented as a generally available MPS utility; +the pool will provide support for debugging that would be expensive or +impossible to allocate outside the pool. + + +Policy +...... + +Policy is an implementable decision procedure, hopefully approximating +the strategy. + +_`.sol.policy.reuse`: Defer reusing blocks, to encourage coalescing. + +_`.sol.policy.split`: When a block is split to satisfy an allocation, +use the remainder as soon as possible. + +_`.sol.policy.size`: Prevent `.sol.policy.reuse`_ from consuming all +of memory by choosing a (coalesced) block for reuse when it reaches a +minimum size. + +_`.sol.policy.size.fixed`: Use the quantum of virtual memory (e.g., +one page) as minimum size. + +_`.sol.policy.size.tune`: Allow tuning minimum size. + +_`.sol.policy.size.adapt`: Adaptively change minimum size. + +_`.sol.policy.allocate`: Allocate objects with similar birthdate and +lifetime together. + +_`.sol.policy.allocate.segregate`: Segregate allocations by type. + +_`.sol.policy.allocate.segregate.size`: Use size as a substitute for type. + +_`.sol.policy.allocate.segregate.tune`: Permit tuning of segregation. + +_`.sol.policy.allocate.segregate.adapt`: Adaptively segregate allocations. + +_`.sol.policy.reallocate`: Implement reallocation in a central +mechanism outside of the pool, create a generic pool interface in +support of same. + +_`.sol.policy.debug`: Implement a pool debugging interface. + +_`.sol.policy.debug.counters`: Implement debugging counters in the +pool that are queried with a generic interface. + +_`.sol.policy.debug.verify`: Implement debugging error returns on +overlapping frees. + + +Mechanism +......... + +Mechanisms are algorithms or data structures used to implement policy. + +_`.sol.mech.free-list`: Mechanisms that can be used to describe the +free list. + +_`.sol.mech.free-list.cartesian-tree`: Using address and size as keys +supports fast coalescing of adjacent blocks and fast searching for +optimal-sized blocks. Unfortunately, because the shape of the tree is +constrained by the second key, it can become unbalanced. This data +structure is used in the SunOS 4.1 malloc (paper.grun92(1)). + +_`.sol.mech.free-list.splay-tree`: The amortized cost of a splay tree +is competitive with balanced binary trees in the worst case, but can +be significantly better for regular patterns of access because +recently-accessed keys are moved to the root of the tree and hence can +be re-accessed quickly. This data structure is used in the System Vr4 +malloc (paper.vo96(0)). (For a complete analysis of the splay tree +algorithm time bounds see paper.st85(0).) + +_`.sol.mech.free-list.bitmap`: Using address as an index and +fix-sized blocks, the booleans can represent whether a block is free +or not. Adjacent blocks can be used to construct larger blocks. +Efficient algorithms for searching for runs in a vector are known. +This data structure is used in many file system disk block managers. + +_`.sol.mech.free-list.refcount`: A count of the number of allocated +but not freed subblocks of a block can be used to determine when a +block is available for reuse. This is an extremely compact data +structure, but does not support subblock reuse. + +_`.sol.mech.free-list.hybrid`: Bitmaps appear suited particularly to +managing small, contiguous blocks. The tree structures appear suited +particularly to managing varying-sized, discontiguous blocks. A +refcount can be very efficient if objects can be placed accurately +according to death time. A hybrid mechanism may offer better +performance for a wider range of situations. + +_`.sol.mech.free-list.linked`: An address-ordered singly linked free +list using space in each free block to store the block's size and a +pointer to the next block. + +_`.sol.mech.storage`: Methods that can be used to store the free list description. + +_`.sol.mech.storage.in-band`: The tree data structures (and +`.sol.mech.free-list.linked`_) are amenable to being stored in the +free blocks themselves, minimizing the space overhead of management. +To do so imposes a minimum size on free blocks and reduces the +locality of the data structure. + +_`.sol.mech.storage.out-of-band`: The bit-map data structure must be +stored separately. + +_`.sol.mech.desc`: for an allocated block to be freed, its base and +bound must be known + +_`.sol.mech.desc.derived`: Most clients can supply the base of the +block. Some clients can supply the bound. + +_`.sol.mech.desc.in-band`: When the bound cannot be supplied, it can +be stored as an in-band "header". If neither the base nor bound can be +supplied (e.g., the client may only have an interior pointer to the +block), a header and footer may be required. + +_`.sol.mech.desc.out-of-band`: In un-tagged architectures, it may be +necessary to store the header and footer out-of-band to distinguish +them from client data. Out-of-band storage can improve locality and +reliability. Any of the free-list structures can also be used to +describe allocated blocks out-of-band. + +_`.sol.mech.desc.crossing-map`: An alternative for untagged +architectures is to store a "crossing map" which records an encoding +of the start of objects and then store the descriptive information +in-band. + +_`.sol.mech.allocate`: Mechanisms that can be used to allocate blocks +(these typically sit on top of a more general free-list manager). + +_`.sol.mech.allocate.lookup-table`: Use a table of popular sizes to +cache free blocks of those sizes. + +_`.sol.mech.allocate.buffer`: Allocate from contiguous blocks using +compare and increment. + +_`.sol.mech.allocate.optimize-small`: Use a combination of techniques +to ensure the time spent managing a block is small relative to the +block's lifetime; assume small blocks typically have short lifetimes. + +_`.sol.mech.allocate.optimize-new`: When "virgin" memory is acquired +from the operating system to satisfy a request, try to preserve it +(that is, use only what is necessary). + +_`.sol.mech.allocate.segregate.size`: Use size as a substitute for +type. + +_`.sol.mech.reallocate`: use `.req.fun.suballocate`_ to return unused +memory when a block shrinks, but differentiate this from an erroneous +overlapping free by using separate interfaces. + + + +Implementation +-------------- + +The implementation consists of the following separable modules: + + +Splay Tree +.......... + +_`.impl.c.splay`: The implementation of +`.sol.mech.free-list.splay-tree`_. See design.mps.splay_. + +.. _design.mps.splay: splay.txt + + +Coalescing Block Structure +.......................... + +_`.impl.c.cbs`: The initial implementation will use +`.sol.mech.free-list.splay-tree`_ and +`.sol.mech.storage.out-of-band`_. For locality, this storage should be +managed as a linked free list of splay nodes suballocated from blocks +acquired from a pool shared by all CBS's. Must support creation and +destruction of an empty tree. Must support search, insert and delete +by key of type Addr. Must support finding left and right neighbors of +a failed search for a key. Must support iterating over the elements of +the tree with reasonable efficiency. Must support storing and +retrieving a value of type Size associated with the key. Standard +checking and description should be provided. See design.mps.cbs_. + +.. _design.mps.cbs: cbs.txt + + +Fail-over to address-ordered free list +...................................... + +_`.impl.c.freelist`: Because the CBS uses out-of-band storage, it may +be unable to handle insert (design.mps.cbs.function.cbs.insert.fail_) +and delete (design.mps.cbs.function.cbs.delete.fail_) operations. When +this happen MVT fails over to an address-ordered singly linked free +list. This uses in-band storage and so cannot run out of memory, but +it has much worse performance than the CBS. Therefore MVT eagerly +attempts to flush blocks from the free list back to the CBS. See +design.mps.freelist_ for the design and implementation of the free +list. + +.. _design.mps.cbs.function.cbs.delete.fail: cbs#.function.cbs.delete.fail +.. _design.mps.cbs.function.cbs.insert.fail: cbs#.function.cbs.insert.fail +.. _design.mps.freelist: freelist + + +Available Block Queue +..................... + +_`.impl.c.abq`: The initial implementation will be a queue of fixed +size (determined at pool creation time from the high water mark). Must +support creation and destruction of an empty queue. Must support +insertion at the head or tail of the queue (failing if full), peeking +at the head of the queue, and removal of the head (failing if empty) +or any element of the queue (found by a search). Standard checking and +description should be provided. See design.mps.abq_. + +.. _design.mps.abq: abq.txt + + +Pool implementation +................... + +_`.impl.c`: The initial implementation will use the above modules to +implement a buffered pool. Must support creation and destruction of +the pool. Creation takes parameters: minimum size, mean size, maximum +size, reserve depth and fragmentation limit. Minimum, mean, and +maximum size are used to calculate the internal fill and reuse sizes. +Reserve depth and mean size are used to calculate the ABQ high water +mark. Fragmentation limit is used to set the contingency mode. Must +support buffer initialization, filling and emptying. Must support +freeing. Standard checking and description should be provided. +[Eventually, it should support scanning, so it can be used with +collected pools, but no manual pool currently does.] + +_`.impl.c.future`: The implementation should not preclude "buffered +free" (`mail.ptw.1997-12-05.19-07`_) being added in the future. + +.. _mail.ptw.1997-12-05.19-07: https://info.ravenbrook.com/project/mps/mail/1997/12/05/19-07/0.txt + +_`.impl.c.parameters`: The pool parameters are calculated as follows +from the input parameters: minimum, mean, and maximum size are taken +directly from the parameters. + +_`.impl.c.parameter.fill-size`: The fill size is set to the maximum +size times the reciprocal of the fragmentation limit, rounded up to +the arena grain size. + +_`.imple.c.parameter.reuse-size`: The reuse size is set to twice the +fill size (see `.arch.abq.return.segment`_, +`.impl.c.free.merge.segment`_). + +_`.impl.c.parameter.abq-limit`: The ABQ high-water limit is set to the +reserve depth times the mean size (that is, the queue should hold as +many reuse blocks as would take to cover the population hysteresis if +the population consisted solely of mean-sized blocks, see +`.arch.abq.high-water`_). + +_`.impl.c.parameter.avail-limit`: The free block high-water limit is +implemented by comparing the available free space to an "available +limit". The available limit is updated each time a segment is +allocated from or returned to the arena by setting it to the total +size of the pool times the fragmentation limit divide vy 100 (see +`.arch.contingency.fallback`_). + +_`.impl.c.ap.fill`: An AP fill request will be handled as follows: + +- If the request is larger than fill size, attempt to request a + segment from the arena sufficient to satisfy the request. + +- Use any previously returned splinter (from `.impl.c.ap.empty`_), if + large enough. + +- Attempt to retrieve a free block from the head of the ABQ (removing + it from ABQ and the free block managers if found). + +- If above fragmentation limit, attempt to find a block in the free + block managers, using oldest-fit search. + +- Attempt to request a segment of fill size from the arena. + +- Attempt to find a block in the free block managers, using oldest-fit + search. + +- Otherwise, fail. + +_`.impl.c.ap.empty`: An AP empty request will be handled as follows: + +- If remaining free is less than min size, return it to the free block + managers. + +- If the remaining free is larger than any previous splinter, return + that splinter to the free block managers and save this one for use + by a subsequent fill. + +- Otherwise return the remaining block to the free block managers. + +_`.impl.c.free`: When blocks are returned to the free block managers +they may be merged with adjacent blocks. If a merge occurs with a +block on the ABQ, the ABQ must be adjusted to reflect the merge. + +_`.impl.c.free.exception`: Exceptional blocks are returned directly to +the arena. + +_`.impl.c.free.merge`: If a merge occurs and the merged block is +larger than reuse size: + +- If the ABQ is full, remove the block at the head of the ABQ from the + ABQ and the free block managers and return it to the arena(*). + +- Insert the newly merged block at the tail of the ABQ, leaving it in + the free block managers for further merging. + +_`.impl.c.free.merge.segment`: (*) Merged blocks may not align with +arena segments. If necessary, return the interior segments of a block +to the arena and return the splinters to the free block managers. + +_`.impl.c.free.merge.segment.reuse`: If the reuse size (the size at +which blocks recycle from the free block managers to the ABQ) is at +least twice the fill size (the size of segments the pool allocates +from the arena), we can guarantee that there will always be a +returnable segment in every ABQ block. + +_`.impl.c.free.merge.segment.overflow`: If the reuse size is set +smaller (see `.arch.adapt`_), there may not be a returnable segment in +an ABQ block, in which case the ABQ has "overflowed". Whenever this +occurs, the ABQ will be refilled by searching the free block managers +for dropped reusable blocks when needed. + +_`.impl.c.free.merge.segment.risk`: The current segment structure does +not really support what we would like to do. Loci should do better: +support reserving contiguous address space and mapping/unmapping any +portion of that address space. + +_`.impl.c.free.merge.alternative`: Alternatively, if the MPS segment +substrate permitted mapping/unmapping of pages, the pool could use +very large segments and map/unmap pages as needed. + + +AP Dispatch +........... + +_`.impl.c.multiap`: The initial implementation will be a glue layer +that selects among several AP's for allocation according to the +predicted deathtime (as approximated by size) of the requested +allocation. Each AP will be filled from a pool instance tuned to the +range of object sizes expected to be allocated from that AP. [For +bonus points provide an interface that creates a batch of pools and +AP's according to some set of expected object sizes. Eventually expand +to understand object lifetimes and general lifetime prediction keys.] + +_`.impl.c.multiap.sample-code`: This glue code is not properly part of +the pool or MPS interface. It is a layer on top of the MPS interface, +intended as sample code for unsophisticated clients. Sophisticated +clients will likely want to choose among multiple AP's more directly. + + +Testing +------- + +_`.test.component`: Components `.impl.c.splay`_, `.impl.c.cbs`_, and +`.impl.c.abq`_ will be subjected to individual component tests to +verify their functionality. + +_`.test.qa`: Once poolmvt is integrated into the MPS, the standard MPS +QA tests will be applied to poolmvt prior to each release. + +_`.test.customer`: Customer acceptance tests will be performed on a +per-customer basis before release to that customer (cf. +proc.release.epcore(2).test) + + +Text +---- + +Possible tweaks (from `mail.pekka.1998-04-15.13-10`_): + +.. _mail.pekka.1998-04-15.13-10: https://info.ravenbrook.com/project/mps/mail/1998/04/15/13-10/0.txt + +- Try to coalesce splinters returned from AP's with the front (or any) + block on the ABQ. + +- Sort ABQ in some other way to minimize splitting/splinters. For + example, proximity to recently allocated blocks. + + + +B. Document History +------------------- + +- 1998-02-04 PTW Initial e-mail discussion. See thread starting with + `mail.ptw.1998-02-04.21-27`_. + + .. _mail.ptw.1998-02-04.21-27: https://info.ravenbrook.com/project/mps/mail/1998/02/04/21-27/0.txt + +- 1998-02-13 PTW Initial draft based on e-mail request for comments. + See thread starting with `mail.ptw.1998-02-12.03-36`_. + + .. _mail.ptw.1998-02-12.03-36: https://info.ravenbrook.com/project/mps/mail/1998/02/12/03-36/0.txt + +- 1998-04-01 PTW Revised in response to e-mail request for comments. + See thread starting with `mail.ptw.1998-03-23.20-43`_. + + .. _mail.ptw.1998-03-23.20-43: https://info.ravenbrook.com/project/mps/mail/1998/03/23/20-43/0.txt + +- 1998-04-15 PTW Revised in response to e-mail request for comments. + See thread starting with `mail.ptw.1998-04-13.21-40`_. + + .. _mail.ptw.1998-04-13.21-40: https://info.ravenbrook.com/project/mps/mail/1998/04/13/21-40/0.txt + +- 1998-05-06 PTW Revised in response to review + review.design.mps.poolmv2.2_(0). + +.. _design.mps.poolmv2.2: poolmv2#.2 + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-21 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +C. Copyright and License +------------------------ + +Copyright © 2002–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/prmc.txt b/mps/design/prmc.txt new file mode 100644 index 00000000000..969ce8f9346 --- /dev/null +++ b/mps/design/prmc.txt @@ -0,0 +1,340 @@ +.. mode: -*- rst -*- + +Mutator context +=============== + +:Tag: design.mps.prmc +:Author: Gareth Rees +:Date: 2014-10-23 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: mutator context; design + + +Introduction +------------ + +_`.intro`: This is the design of the mutator context module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: The mutator context module decodes the *context* of a +mutator thread at the point when it caused a protection fault, so that +access to a protected region of memory can be handled, or when it was +suspended by the thread manager, so that its registers and control +stack can be scanned. + +_`.def.context`: The *context* of a thread (also called its +*continuation*) is an abstract representation of the control state of +the thread at a point in time, including enough information to +continue the thread from that point. + +_`.status`: The mutator context module does not currently present a +clean interface to the rest of the MPS: source files are +inconsistently named, and the implementation is (necessarily) mixed up +with the implementation of the memory protection module +(design.mps.prot_) and the thread manager +(design.mps.thread-manager_). + +.. _design.mps.prot: prot +.. _design.mps.thread-manager: thread-manager + + +Requirements +------------ + +_`.req.fault.addr`: Must determine the address that the mutator was +trying to access when it caused a protection fault. (Without this +address the MPS can't handle the fault. See ``ArenaAccess()``.) + +_`.req.fault.access`: Should determine whether the mutator was trying +to read or write the address when it caused a protection fault. (This +enables a performance improvement in the case of a write fault. A read +fault must be handled by ensuring the address pointed to has been +fixed, which may require scanning the segment, whereas a write fault +merely requires that the segment's summary be discarded. See +``TraceSegAccess()``.) + +_`.req.fault.step`: Should be able to emulate the access that caused +the fault. (This enables a significant performance improvement for +weak hash tables. See request.dylan.160044_.) + +.. _request.dylan.160044: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160044/ + +_`.req.suspend.scan`: Must capture enough information to ambiguously +scan all roots in the context of a thread that has been suspended by +the thread manager. (This is necessary for conservative garbage +collection to work. See design.mps.thread-manager.if.scan_.) + +.. _design.mps.thread-manager.if.scan: thread-manager#.if.scan + + +Interface +--------- + +``typedef unsigned MutatorContextVar`` + +_`.if.var`: The type ``MutatorContextVar`` is the type of the +discriminator for the union within ``MutatorContextStruct``: + +======================== ================================================ +Value Description +======================== ================================================ +``MutatorContextFAULT`` Context of thread stopped by a protection fault. +``MutatorContextTHREAD`` Context of thread stopped by the thread manager. +======================== ================================================ + +``typedef MutatorContextStruct *MutatorContext`` + +_`.if.context`: A structure representing the context of the mutator at +the point when a protection fault occurred, or when it was suspended +by the thread manager. This structure should be declared in a header +so that it can be inlined in the ``Thread`` structure if necessary. +See design.mps.thread-manager.if.thread_. + +.. _design.mps.thread-manager.if.thread: thread-manager#.if.thread + +``Bool MutatorContextCheck(MutatorContext context)`` + +_`.if.check`: The check function for mutator contexts. See +design.mps.check_. + +.. _design.mps.check: check + +``Res MutatorContextInitFault(MutatorContext context, ...)`` + +_`.if.init.fault`: Initialize with the context of the mutator at the +point where it was stopped by a protection fault. The arguments are +platform-specific and the return may be ``void`` instead of ``Res`` if +this always succeeds. + +``Res MutatorContextInitThread(MutatorContext context, ...)`` + +_`.if.init.thread`: Initialize with the context of the mutator at the +point where it was suspended by the thread manager. The arguments are +platform-specific and the return may be ``void`` instead of ``Res`` if +this always succeeds. + +``Bool MutatorContextCanStepInstruction(MutatorContext context)`` + +_`.if.canstep`: Examine the context to determine whether the +protection module can single-step the instruction which is causing the +fault. Return ``TRUE`` if ``MutatorContextStepInstruction()`` is +capable of single-stepping the instruction, or ``FALSE`` if not. + +``Res MutatorContextStepInstruction(MutatorContext context)`` + +_`.if.step`: Single-step the instruction which is causing the fault. +Update the mutator context according to the emulation or execution of +the instruction, so that resuming the mutator will not cause the +instruction which was caused the fault to be re-executed. Return +``ResOK`` if the instruction was single-stepped successfully, or +``ResUNIMPL`` if the instruction cannot be single-stepped. + +This function is only called if +``MutatorContextCanStepInstruction(context)`` returned ``TRUE``. + +``Res MutatorContextScan(ScanState ss, MutatorContext context, mps_area_scan_t scan, void *closure)`` + +_`.if.context.scan`: Scan all roots found in ``context`` using the +given scan state by calling ``scan``, and return the result code from +the scanner. + +``Addr MutatorContextSP(MutatorContext context)`` + +_`.if.context.sp`: Return the pointer to the "top" of the thread's +stack at the point given by ``context``. In the common case, where the +stack grows downwards, this is actually the lowest stack address. + + +Implementations +--------------- + +Generic implementation +...................... + +_`.impl.an`: In ``prmcan.c`` and ``prmcanan.c``. + +_`.impl.an.context`: There is no definition of +``MutatorContextStruct`` and so the mutator context cannot be decoded. + +_`.impl.an.fault`: Compatible only with the generic memory protection +module (design.mps.prot.impl.an_) where there are no protection +faults. + +.. _design.mps.prot.impl.an: prot#.impl.an + +_`.impl.an.suspend`: Compatible only with the generic thread manager +module (design.mps.thread-manager.impl.an_) where there is only one +thread, and so no threads are suspended. + +.. _design.mps.thread-manager.impl.an: thread-manager#.impl.an + + +Posix implementation +.................... + +_`.impl.ix`: In ``prmcix.c`` and ``protsgix.c``, with +processor-specific parts in ``prmci3.c`` and ``prmci6.c``, and other +platform-specific parts in ``prmcfri3.c``, ``prmcfri6.c``, +``prmclia6.c``, ``prmclii3.c``, and ``prmclii6.c``. + +_`.impl.ix.context`: The context consists of the |siginfo_t|_ and +|ucontext_t|_ structures. POSIX specifies some of the fields in +``siginfo_t``, but says nothing about the contents of ``ucontext_t``. +This is decoded on a platform-by-platform basis. + +.. |siginfo_t| replace:: ``siginfo_t`` +.. _siginfo_t: https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/signal.h.html +.. |ucontext_t| replace:: ``ucontext_t`` +.. _ucontext_t: https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaction.html + +_`.impl.ix.fault.signal`: POSIX specifies that "Invalid permissions +for mapped object" (a protection fault) causes a ``SEGV`` signal. + +_`.impl.ix.fault.code`: POSIX specifies that "Invalid permissions for +mapped object" (a protection fault) causes ``siginfo_t.si_code`` to be +set to ``SEGV_ACCERR``. + +_`.impl.ix.fault.addr`: POSIX specifies that ``siginfo_t.si_addr`` is +the address that the faulting instruction was attempting to access. + +_`.impl.ix.fault.mode`: This implementation does not attempt to +determine whether the fault was a read or write. + +_`.impl.ix.fault.step`: This is implemented only on IA-32, and only +for "simple MOV" instructions. + +_`.impl.ix.suspend`: ``PThreadextSuspend()`` records the context of +each suspended thread, and ``ThreadRingSuspend()`` stores this in the +``Thread`` structure. + +_`.impl.ix.context.scan`: The context's root registers are found in +the ``ucontext_t.uc_mcontext`` structure. + +_`.impl.ix.context.sp`: The stack pointer is obtained from +``ucontext_t.uc_mcontext.mc_esp`` (FreeBSD on IA-32), +``uc_mcontext.gregs[REG_ESP]`` (Linux on IA-32), +``ucontext_t.uc_mcontext.mc_rsp`` (FreeBSD on x86-64), or +``uc_mcontext.gregs[REG_RSP]`` (Linux on x86-64). + + +Windows implementation +...................... + +_`.impl.w3`: In ``prmcw3.c``, with processor-specific parts in +``prmci3.c``, ``prmci6.c``, and other platform-specific parts in +``prmcw3i3.c`` and ``prmcw3i6.c``. + +_`.impl.w3.context`: The context of a thread that hit a protection +fault is given by the |EXCEPTION_POINTERS|_ structure passed to a +vectored exception handler, which points to |EXCEPTION_RECORD|_ and +|CONTEXT|_ structures. + +.. |EXCEPTION_POINTERS| replace:: ``EXCEPTION_POINTERS`` +.. _EXCEPTION_POINTERS: https://docs.microsoft.com/en-gb/windows/desktop/api/winnt/ns-winnt-_exception_pointers +.. |EXCEPTION_RECORD| replace:: ``EXCEPTION_RECORD`` +.. _EXCEPTION_RECORD: https://docs.microsoft.com/en-gb/windows/desktop/api/winnt/ns-winnt-_exception_record +.. |CONTEXT| replace:: ``CONTEXT`` +.. _CONTEXT: https://docs.microsoft.com/en-gb/windows/desktop/api/winnt/ns-winnt-_exception_record + +_`.impl.w3.fault.addr`: ``EXCEPTION_RECORD.ExceptionAddress`` is the +address that the faulting instruction was trying to access. + +_`.impl.w3.fault.mode`: ``EXCEPTION_RECORD.ExceptionInformation[0]`` +is 0 for a read fault, 1 for a write fault, and 8 for an execute +fault (which we handle as a read fault). + +_`.impl.w3.fault.step`: This is implemented only on IA-32, and only +for "simple MOV" instructions. + +_`.impl.w3.suspend`: The context of a suspended thread is returned by +|GetThreadContext|_. + +.. |GetThreadContext| replace:: ``GetThreadContext()`` +.. _GetThreadContext: https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getthreadcontext + +_`.impl.w3.context.scan`: The context's root registers are found in +the |CONTEXT|_ structure. + +_`.impl.w3.context.sp`: The stack pointer is obtained from +``CONTEXT.Esp`` (on IA-32) or ``CONTEXT.Rsp`` (on x86-64). + + +macOS implementation +.................... + +_`.impl.xc`: In ``prmcix.c`` and ``prmcxc.c``, with processor-specific +parts in ``prmci3.c`` and ``prmci6.c``, and other platform-specific +parts in ``prmcxca6.c``, ``prmcxci3.c`` and ``prmcxci6.c``. + +_`.impl.xc.context`: The context consists of the +``__Request__mach_exception_raise_state_identity_t`` and +``arm_thread_state_t``, ``x86_thread_state32_t`` or +``x86_thread_state64_t`` structures. There doesn't seem to be any +documentation for these structures, but they are defined in the Mach +headers. + +_`.impl.xc.fault.addr`: ``__Request__mach_exception_raise_state_identity_t.code[1]`` is the +address that the faulting instruction was trying to access. + +_`.impl.xc.fault.mode`: This implementation does not attempt to +determine whether the fault was a read or write. + +_`.impl.xc.fault.step`: This is implemented only on IA-32, and only +for "simple MOV" instructions. + +_`.impl.xc.suspend`: The context of a suspended thread is obtained by +calling |thread_get_state|_. + +.. |thread_get_state| replace:: ``thread_get_state()`` +.. _thread_get_state: https://www.gnu.org/software/hurd/gnumach-doc/Thread-Execution.html + +_`.impl.xc.context.scan`: The thread's registers are found in the +``arm_thread_state64_t``, ``x86_thread_state32_t`` or +``x86_thread_state64_t`` structure. + +_`.impl.xc.context.sp`: The stack pointer is obtained using the +``arm_thread_state64_get_sp()`` macro (on ARM64), or from +``x86_thread_state32_t.__esp`` (on IA-32) or +``x86_thread_state64_t.__rsp`` (on x86-64). + + +Document History +---------------- + +- 2014-10-23 GDR_ Initial draft based on design.mps.thread-manager_ + and design.mps.prot_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/prot.txt b/mps/design/prot.txt new file mode 100644 index 00000000000..4df68869f7e --- /dev/null +++ b/mps/design/prot.txt @@ -0,0 +1,235 @@ +.. mode: -*- rst -*- + +Memory protection +================= + +:Tag: design.mps.prot +:Author: David Jones +:Date: 1997-04-02 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: memory protection; design + + +Introduction +------------ + +_`.intro`: This is the design of the memory protection module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: The memory protection module ensures that the mutator +sees a consistent view of memory during incremental collection, by +applying protection to areas of memory, ensuring that attempts to read +or write from those areas cause protection faults, and implementing +the means for the MPS to handle these faults. + + +Requirements +------------ + +_`.req.consistent`: Must ensure that the mutator sees a consistent +view of memory during incremental collection: in particular, the +mutator must never see objects in oldspace. (Otherwise there's no way +for the MPS to interface with uncooperative code.) + +_`.req.prot.read`: Should allow collections to proceed incrementally, +by read-protecting pages that are not consistent from the mutator's +point of view. (This is the only way for the MPS to meet real-time +requirements on pause times.) + +_`.req.prot.write`: Should allow the MPS to maintain remembered sets +for segments that it has scanned, by write-protecting pages in these +segments. (This improves performance by allowing the MPS to avoid +scanning these segments again.) + +_`.req.fault.handle`: If the module implements protection, it must +also provide a mechanism for handling protection faults. (Otherwise +the MPS cannot take the correct action: that is, fixing references in +a read-protected segment, and discarding the remembered set from a +write-protected segment. See ``TraceSegAccess()``.) + +_`.req.prot.exec`: The protection module should allow mutators to +write machine code into memory managed by the MPS and then execute +that code, for example, to implement just-in-time translation, or +other forms of dynamic compilation. Compare +design.mps.vm.req.prot.exec_. + +.. _design.mps.vm.req.prot.exec: vm#.req.prot.exec + + +Design +------ + +_`.sol.sync`: If memory protection is not available, the only way to +meet `.req.consistent`_ is to ensure that no protection is required, +by running the collector until it has no more incremental work to do. +(This makes it impossible to meet real-time requirements on pause +times, but may be the best that can be done.) + +_`.sol.fault.handle`: The protection module handles protection faults +by decoding the context of the fault (see +design.mps.prmc.req.fault.addr_ and design.mps.prmc.req.fault.access_) +and calling ``ArenaAccess()``. + +.. _design.mps.prmc.req.fault.addr: prmc#.req.fault.addr +.. _design.mps.prmc.req.fault.access: prmc#.req.fault.access + +_`.sol.prot.exec`: The protection module makes memory executable +whenever it is readable by the mutator, if this is supported by the +platform. + + +Interface +--------- + +``void ProtSetup(void)`` + +_`.if.setup`: Called exactly once (per process) as part of the +initialization of the first arena that is created. It must arrange for +the setup and initialization of any data structures or services that +are necessary in order to implement the memory protection module. + +``Size ProtGranularity(void)`` + +_`.if.granularity`: Return the granularity of protection. The ``base`` +and ``limit`` arguments to ``ProtSet()`` must be multiples of the +protection granularity. + +``void ProtSet(Addr base, Addr limit, AccessSet mode)`` + +_`.if.set`: Set the protection of the range of memory between ``base`` +(inclusive) and ``limit`` (exclusive) to *forbid* the specified modes. +The addresses ``base`` and ``limit`` are multiples of the protection +granularity. The ``mode`` parameter contains the ``AccessWRITE`` bit +if write accesses to the range are to be forbidden, and contains the +``AccessREAD`` bit if read accesses to the range are to be forbidden. + +_`.if.set.read`: If the request is to forbid read accesses (that is, +``AccessREAD`` is set) then the implementation may also forbid write +accesses, but read accesses must not be forbidden unless +``AccessREAD`` is set. + +_`.if.set.noop`: ``ProtSet()`` is permitted to be a no-op if +``ProtSync()`` is implemented. + +``void ProtSync(Arena arena)`` + +_`.if.sync`: Ensure that the actual protection (as determined by the +operating system) of every segment in the arena matches the segment's +protection mode (``seg->pm``). + +_`.if.sync.noop`: ``ProtSync()`` is permitted to be a no-op if +``ProtSet()`` is implemented. + + +Implementations +--------------- + +_`.impl.an`: Generic implementation in ``protan.c``. + +_`.impl.an.set`: ``ProtSet()`` does nothing. + +_`.impl.an.sync`: ``ProtSync()`` has no way of changing the protection +of a segment, so it simulates faults on all segments that are supposed +to be protected, by calling ``TraceSegAccess()``, until it determines +that no segments require protection any more. This forces the trace to +proceed until it is completed, preventing incremental collection. + +_`.impl.an.sync.issue`: This relies on the pool actually removing the +protection, otherwise there is an infinite loop here. This is +therefore not compatible with implementations of the protection +mutator context module that support single-stepping of accesses (see design.mps.prmc.req.fault.step_). + +.. _design.mps.prmc.req.fault.step: prmc#.req.fault.step + +_`.impl.ix`: POSIX implementation. See design.mps.protix_. + +.. _design.mps.protix: protix + +_`.impl.w3`: Windows implementation. + +_`.impl.xc`: macOS implementation. + +_`.impl.xc.prot.exec`: The approach in `.sol.prot.exec`_ of always +making memory executable causes a difficulty on macOS on Apple +Silicon. On this platform, programs may enable `Hardened Runtime`_. +This feature rejects attempts to map or protect memory so that it is +simultaneously writable and executable. Moreover, the feature is +enabled by default (as of macOS 13 Ventura), so that if you install +Xcode and then use it to compile the following program, the executable +fails when run with "mmap: Permission denied". :: + + #include + #include + + int main(void) + { + void *p = mmap(0, 1, PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (p == MAP_FAILED) perror("mmap"); + return 0; + } + +.. _Hardened Runtime: https://developer.apple.com/documentation/security/hardened_runtime + +_`.impl.xc.prot.exec.detect`: The protection module detects Hardened +Runtime if the operating system is macOS, the CPU architecture is +ARM64, a call to ``mprotect()`` fails, the call requested writable and +executable access, and the error code is ``EACCES``. + +_`.impl.xc.prot.exec.retry`: To avoid requiring developers who don't +need to allocate executable memory to figure out how to disable +Hardened Runtime, or enable the appropriate entitlement, the +protection module handles the ``EACCES`` error from ``mprotect()`` in +the Hardened Runtime case by retrying without the request for the +memory to be executable, and setting a global variable to prevent the +writable and executable combination being attempted again. + + +Document History +---------------- + +- 1997-04-02 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2014-10-23 GDR_ Move mutator context interface to design.mps.prmc_. + Bring design up to date. + + .. _design.mps.prmc: prmc + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/protix.txt b/mps/design/protix.txt new file mode 100644 index 00000000000..529b43990a2 --- /dev/null +++ b/mps/design/protix.txt @@ -0,0 +1,226 @@ +.. mode: -*- rst -*- + +POSIX implementation of protection module +========================================= + +:Tag: design.mps.protix +:Author: Tony Mann +:Date: 2000-02-03 +:Status: incomplete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: POSIX; protection interface design + pair: POSIX protection interface; design + + +Introduction +------------ + +_`.readership`: Any MPS developer + +_`.intro`: This is the design of the POSIX implementation of the +protection module. It makes use of various services provided by POSIX. +It is intended to work with POSIX Threads. + + +Requirements +------------ + +_`.req.general`: Required to implement the general protection +interface defined in design.mps.prot.if_. + +.. _design.mps.prot.if: prot#.if + + +Data structures +--------------- + +_`.data.signext`: If the SIGSEGV signal is not handled by any MPS +arena, ``sigHandle()`` needs to forward the signal to the next signal +handler in the chain (the signal handler that was installed when the +``ProtSetup()`` was called), by temporarily reinstalling the old +signal handler and calling ``kill()``. The only way to pass the next +signal handler to the current signal handler is via a global variable, +in this case the variable ``sigNext``. + + +Functions +--------- + +_`.fun.setup`: ``ProtSetup()`` installs a signal handler for the +signal ``SIGSEGV`` to catch and handle protection faults (this handler +is the function ``sigHandle()``). + +_`.fun.setup.previous`: The previous handler is recorded (in the +variable ``sigNext``, see `.data.signext`_) so that it can be reached +from ``sigHandle()`` if it fails to handle the fault. + +_`.fun.setup.restart`: We set the ``SA_RESTART`` flag when installing +the signal handler so that if the mutator gets a protection fault +while blocked in a system call, the system call is automatically +restarted after the signal is handled, instead of failing with +``EINTR``. Note that unlike the corresponding case in the thread +management subsystem (see design.mps.thread-manager.req.thread.intr_) +we are unsure if this case can actually arise: the ``SIGSEGV`` from a +protection fault is delivered to the thread that accessed the +protected memory, but in all the cases we have checked, if this access +occurred during a blocking system call such as a ``read()`` with the +buffer in protected memory, the system call fails with ``EFAULT`` and +is not restarted. However, it costs us nothing to set the +``SA_RESTART`` flag. + +.. _design.mps.thread-manager.req.thread.intr: thread-manager#.req.thread.intr + +_`.fun.set`: ``ProtSet()`` uses ``mprotect()`` to adjust the +protection for pages. + +_`.fun.set.convert`: The requested protection (which is expressed in +the ``mode`` parameter, see design.mps.prot.if.set_) is translated into +an operating system protection. If read accesses are to be forbidden +then all accesses are forbidden, this is done by setting the +protection of the page to ``PROT_NONE``. If write accesses are to be +forbidden (and not read accesses) then write accesses are forbidden +and read accesses are allowed, this is done by setting the protection +of the page to ``PROT_READ|PROT_EXEC``. Otherwise (all access are +okay), the protection is set to ``PROT_READ|PROT_WRITE|PROT_EXEC``. + +.. _design.mps.prot.if.set: prot#.if.set + +_`.fun.set.assume.mprotect`: We assume that the call to ``mprotect()`` +always succeeds. We should always call the function with valid +arguments (aligned, references to mapped pages, and with an access +that is compatible with the access of the underlying object). + +_`.fun.sync`: ``ProtSync()`` does nothing in this implementation as +``ProtSet()`` sets the protection without any delay. + + +Threads +------- + +_`.threads`: The design must operate in a multi-threaded environment +(with POSIX Threads) and cooperate with the POSIX support for locks +(see design.mps.lock_) and the thread suspension mechanism (see +design.mps.pthreadext_ ). + +.. _design.mps.pthreadext: pthreadext +.. _design.mps.lock: lock + +_`.threads.suspend`: The ``SIGSEGV`` signal handler does not mask out +any signals, so a thread may be suspended while the handler is active, +as required by the design (see +design.mps.pthreadext.req.suspend.protection_). The signal handlers +simply nest at top of stack. + +.. _design.mps.pthreadext.req.suspend.protection: pthreadext#.req.suspend.protection + +_`.threads.async`: POSIX imposes some restrictions on signal handler +functions (see design.mps.pthreadext.analysis.signal.safety_). Basically +the rules say the behaviour of almost all POSIX functions inside a +signal handler is undefined, except for a handful of functions which +are known to be "async-signal safe". However, if it's known that the +signal didn't happen inside a POSIX function, then it is safe to call +arbitrary POSIX functions inside a handler. + +.. _design.mps.pthreadext.analysis.signal.safety: pthreadext#.analysis.signal.safety + +_`.threads.async.protection`: If the signal handler is invoked because +of an MPS access, then we know the access must have been caused by +client code, because the client is not allowed to permit access to +protectable memory to arbitrary foreign code. In these circumstances, +it's OK to call arbitrary POSIX functions inside the handler. + +.. note:: + + Need a reference for "the client is not allowed to permit access + to protectable memory to arbitrary foreign code". + +_`.threads.async.other`: If the signal handler is invoked for some +other reason (that is, one we are not prepared to handle) then there +is less we can say about what might have caused the SIGSEGV. In +general it is not safe to call arbitrary POSIX functions inside the +handler in this case. + +_`.threads.async.choice`: The signal handler calls ``ArenaAccess()`` +to determine whether the segmentation fault was the result of an MPS +access. ``ArenaAccess()`` will claim various MPS locks (that is, the +arena ring lock and some arena locks). The code calls no other POSIX +functions in the case where the segmentation fault is not an MPS +access. The locks are implemented as mutexes and are claimed by +calling ``pthread_mutex_lock()``, which is not defined to be +async-signal safe. + +_`.threads.async.choice.ok`: However, despite the fact that POSIX +Threads documentation doesn't define the behaviour of +``pthread_mutex_lock()`` in these circumstances, we expect the POSIX +Threads implementation will be well-behaved unless the segmentation +fault occurs while while in the process of locking or unlocking one of +the MPS locks. But we can assume that a segmentation fault will not +happen then (because we use the locks correctly, and generally must +assume that they work). Hence we conclude that it is OK to call +``ArenaAccess()`` directly from the signal handler. + +_`.threads.async.improve`: In future it would be preferable to not +have to assume reentrant mutex locking and unlocking functions. An +alternative approach would be necessary anyway when supporting another +platform which doesn't offer reentrant locks (if such a platform does +exist). + +_`.threads.async.improve.how`: We could avoid the assumption if we had +a means of testing whether an address lies within an arena chunk +without the need to claim any locks. Such a test might actually be +possible. For example, arenas could update a global datastructure +describing the ranges of all chunks, using atomic updates rather than +locks; the handler code would be allowed to read this without locking. +However, this is somewhat tricky; a particular consideration is that +it's not clear when it's safe to deallocate stale portions of the +datastructure. + +_`.threads.sig-stack`: We do not handle signals on a separate signal +stack. Separate signal stacks apparently don't work properly with +POSIX Threads. + + +Document History +---------------- + +- 2000-02-03 Tony Mann. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2016-10-13 GDR_ Generalise to POSIX, not just Linux. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/protocol.txt b/mps/design/protocol.txt new file mode 100644 index 00000000000..93ac13efd0d --- /dev/null +++ b/mps/design/protocol.txt @@ -0,0 +1,671 @@ +.. mode: -*- rst -*- + +Protocol inheritance +==================== + +:Tag: design.mps.protocol +:Author: Tony Mann +:Date: 1998-10-12 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `C. Copyright and License`_. +:Index terms: pair: protocol inheritance; design +:Readership: MPS developers + + +Introduction +------------ + +_`.intro`: This document explains the design of the support for class +inheritance in MPS. + +_`.readership`: This document is intended for any MPS developer. + + +Purpose +------- + +_`.purpose.code-maintain`: The purpose of the protocol inheritance +design is to ensure that the MPS code base can make use of the +benefits of object-oriented class inheritance to maximize code reuse, +minimize code maintenance and minimize the use of boilerplate code. + +_`.purpose.related`: For related discussion, see +`mail.tony.1998-08-28.16-26`_, `mail.tony.1998-09-01.11-38`_, +`mail.tony.1998-10-06.11-03`_ and other messages in the same threads. + +.. _mail.tony.1998-10-06.11-03: https://info.ravenbrook.com/project/mps/mail/1998/10/06/11-03/0.txt +.. _mail.tony.1998-09-01.11-38: https://info.ravenbrook.com/project/mps/mail/1998/09/01/11-38/0.txt +.. _mail.tony.1998-08-28.16-26: https://info.ravenbrook.com/project/mps/mail/1998/08/28/16-26/0.txt + + +Requirements +------------ + +_`.req.implicit`: The object system should provide a means for classes +to inherit the methods of their direct superclasses implicitly for all +functions in the protocol without having to write any explicit code +for each inherited function. + +_`.req.override`: There must additionally be a way for classes to +override the methods of their superclasses. + +_`.req.next-method`: As a result of `.req.implicit`_, classes cannot +make static assumptions about methods used by direct superclasses. The +object system must provide a means for classes to extend (not just +replace) the behaviour of protocol functions, such as a mechanism for +invoking the "next-method". + +_`.req.ideal.extend`: The object system must provide a standard way +for classes to implement the protocol supported by their superclass and +additionally add new methods of their own which can be specialized by +subclasses. + +_`.req.ideal.multiple-inheritance`: The object system should support +multiple inheritance such that sub-protocols can be "mixed in" with +several classes which do not themselves support identical protocols. + + +Overview +-------- + +_`.overview.inst`: The key concept in the design is the relationship +between an "instance" and its "class". Every structure that +participates in the protocol system begins with an ``InstStruct`` +structure that contains a pointer to an ``InstClassStruct`` that +describes it, like this:: + + instance class + + .----------. .----------. + | class |----->| class | + ------------ ------------ + | ... | | sig | + ------------ ------------ + | ... | | name | + ------------ ------------ + | ... | |superclass| + ------------ ------------ + | | | ... | + +_`.overview.prefix`: We make use of the fact that we can cast between +structures with common prefixes, or between structures and their first +members, to provide dynamic typing and subtyping (see +[Kernighan_1988]_, A.8.3). + +_`.overview.method`: The ``InstClassStruct`` it itself at the start of +a class structure contains pointers to functions that can be called to +manipulate the instance as an abstract data type. We refer to these +functions as "methods" to distinguish them from functions not involved +in the object-oriented protocol. The macro ``Method`` is provided for +calling methods. + +_`.overview.subclass`: An instance structure can be extended by using +it as the first field of another structure, and by overriding its +class pointer with a pointer to a "subclass" that provides different +behavior. + +_`.overview.inherit`: Classes inherit the methods from their +superclasses when they are initialized, so by default they have the +same methods as the class from which they inherit. Methods on the +superclass can be re-used, providing polymorphism. + +_`.overview.inherit.specialize`: Classes may specialize the behaviour +of their superclass. They do this by by overriding methods or other +fields in the class object. + +_`.overview.mixin`: Groups of related overrides are provided by +"mixins", and this provides a limited form of multiple inheritance. + +_`.overview.extend`: Classes may extend the protocols supported by +their superclasses by adding new fields for methods or other data. +Extending a class creates a new kind of class. + +_`.overview.kind`: Classes are themselves instance objects, and have +classes of their own. A class of a class is referred to as a "kind", +but is not otherwise special. Classes which share the same set of +methods (or other class fields) are instances of the same kind. If a +class is extended, it becomes a member of a different kind. Kinds +allow subtype checking to be applied to classes as well as instances, +to determine whether methods are available. :: + + instance class kind + (e.g. CBS) (e.g. CBSClass) (e.g. LandClassClass) + .----------. .----------. .----------. + | class |----->| class |----->| class |-->InstClassClass + ------------ ------------ ------------ + | ... | | sig | | sig | + ------------ ------------ ------------ + | ... | | name | | name | + ------------ ------------ ------------ + | ... | |superclass|-. |superclass|-->InstClassClass + ------------ ------------ | ------------ + | | | ... | | | ... | + | + | + LandClass<-' + + +_`.overview.sig.inherit`: Instances (and therefore classes) will +contain signatures. Classes must not specialize (override) the +signatures they inherit from their superclasses, as they are used to +check the actual type (not sub- or supertype) of the object they're +in. + +_`.overview.sig.extend`: When extending an instance or class, it is +normal policy for the new structure to include a new signature as the +last field. + +_`.overview.superclass`: Each class contains a ``superclass`` field. +This enables classes to call "next-method". + +_`.overview.next-method`: A specialized method in a class can make use +of an overridden method from a superclass using the ``NextMethod`` +macro, statically naming the superclass. + +_`.overview.next-method.dynamic`: It is possible to write a method +which does not statically know its superclass, and call the next +method by extracting a class from one of its arguments using +``ClassOfPoly`` and finding the superclass using ``SuperclassPoly``. +Debug pool mixins do this. However, this is not fully general, and +combining such methods is likely to cause infinite recursion. Take +care! + +_`.overview.access`: Classes must be initialized by calls to +functions, since there is no way to express overrides statically in +C89. ``DEFINE_CLASS`` defines an "ensure" function that initializes +and returns the canonical copy of the class. The canonical copy may +reside in static storage, but no MPS code may refer to that static +storage by name. + +_`.overview.init`: In addition to the "ensure" function, each class +must provide an "init" function, which initialises its argument as a +fresh copy of the class. This allows subclasses to derive their +methods and other fields from superclasses. + +_`.overview.naming`: There are some strict naming conventions which +must be followed when defining and using classes. The use is +obligatory because it is assumed by the macros which support the +definition and inheritance mechanism. For every kind ``Foo``, +we insist upon the following naming conventions: + +* ``Foo`` names a type that points to a ``FooStruct``. + +* ``FooStruct`` is the type of the instance structure, the first field + of which is the structure it inherits from (ultimately an + ``InstStruct``). + +* ``FooClass`` names the type that points to a ``FooClassStruct``. + +* ``FooClassStruct`` names the structure for the class pointed to by + ``FooStruct``, containing the methods that operate on ``Foo``. + + +Interface +--------- + + +Class declaration +................. + +``DECLARE_CLASS(kind, className)`` + +_`.if.declare-class`: Class declaration is performed by the macro +``DECLARE_CLASS``, which declares the existence of the class +definition elsewhere. It is intended for use in headers. + + +Class definition +................ + +``DEFINE_CLASS(kind, className, var)`` + +_`.if.define-class`: Class definition is performed by the macro +``DEFINE_CLASS``. A call to the macro must be followed by a function +body of initialization code. The parameter ``className`` is used to +name the class being defined. The parameter ``var`` is used to name a +local variable of type of classes of kind ``kind``, which is defined +by the macro; it refers to the canonical storage for the class being +defined. This variable may be used in the initialization code. (The +macro doesn't just pick a name implicitly because of the danger of a +name clash with other names used by the programmer). A call to the +macro defines the ensure function for the class along with some static +storage for the canonical class object, and some other things to +ensure the class gets initialized at most once. + + +Class access +............ + +``CLASS(className)`` + +_`.if.class`: To get the canonical class object, use the ``CLASS`` +macro, e.g. ``CLASS(Land)``. + + +Single inheritance +.................. + +``INHERIT_CLASS(this, className, parentName)`` + +_`.if.inheritance`: Class inheritance details must be provided in the +class initialization code (see `.if.define-class`_). Inheritance is +performed by the macro ``INHERIT_CLASS``. A call to this macro will +make the class being defined a direct subclass of ``parentClassName`` +by ensuring that all the fields of the embedded parent class (pointed +to by the ``this`` argument) are initialized as the parent class, and +setting the superclass field of ``this`` to be the canonical parent +class object. The parameter ``this`` must be the same kind as +``parentClassName``. + + +Specialization +.............. + +_`.if.specialize`: Fields in the class structure must be assigned +explicitly in the class initialization code (see +`.if.define-class`_). This must happen *after* inheritance details +are given (see `.if.inheritance`_), so that overrides work. + + +Extension +......... + +_`.if.extend`: To extend the protocol when defining a new class, a +new type must be defined for the class structure. This must embed the +structure for the primarily inherited class as the first field of the +structure. Extension fields in the class structure must be assigned +explicitly in the class initialization code (see +`.if.define-class`_). This should be done *after* the inheritance +details are given for consistency with `.if.inheritance`_. This is, +in fact, how all the useful classes extend ``Inst``. + +_`.if.extend.kind`: In addition, a class must be defined for the new +kind of class. This is just an unspecialized subclass of the kind of +the class being specialized by the extension. For example:: + + typedef struct LandClassStruct { + InstClassStruct instClass; /* inherited class */ + LandInsertMethod insert; + ... + } LandClassStruct; + + DEFINE_CLASS(Inst, LandClass, class) + { + INHERIT_CLASS(class, LandClass, InstClass); + } + + DEFINE_CLASS(Land, Land, class) + { + INHERIT_CLASS(&class->instClass, Land, Inst); + class->insert = landInsert; + ... + } + + +Methods +....... + +``Method(kind, inst, meth)`` + +_`.if.method`: To call a method on an instance of a class, use the +``Method`` macro to retrieve the method. This macro may assert if the +class is not of the kind requested. For example, to call the +``insert`` method on ``land``:: + + res = Method(Land, land, insert)(rangeReturn, land, range); + + +``NextMethod(kind, className, meth)`` + +_`.if.next-method`: To call a method from a superclass of a class, +use the ``NextMethod`` macro to retrieve the method. This macro may +assert if the superclass is not of the kind requested. For example, +the function to split AMS segments wants to split the segments they +are based on, so does:: + + res = NextMethod(Seg, AMSSeg, split)(seg, segHi, base, mid, limit); + + +Conversion +.......... + + +``IsA(className, inst)`` + +_`if.isa`: Returns non-zero iff the class of ``inst`` is a member of +the class or any of its subclasses. + + +``MustBeA(className, inst)`` + +_`.if.must-be-a`: To convert the C type of an instance to that of a +compatible class (the class of the actual object or any superclass), +use the ``MustBeA`` macro. In hot varieties this macro performs a +fast dynamic type check and will assert if the class is not +compatible. It is like C++ "dynamic_cast" with an assert. In cool +varieties, the class check method is called on the object. For +example, in a specialized Land method in the CBS class:: + + static Res cbsInsert(Range rangeReturn, Land land, Range range) + { + CBS cbs = MustBeA(CBS, land); + ... + + +``MustBeA_CRITICAL(className, inst)`` + +_`.if.must-be-a.critical`: When the cost of a type check is too +expensive in hot varieties, use ``MustBeA_CRITICAL`` in place of +``MustBeA``. This only performs the check in cool varieties. Compare +with ``AVER_CRITICAL``. + + +``CouldBeA(className, inst)`` + +_`.if.could-be-a`: To make an unsafe conversion equivalent to +``MustBeA``, use the ``CouldBeA`` macro. This is in effect a simple +pointer cast, but it expresses the intention of class compatibility in +the source code. It is mainly intended for use when initializing an +object, when a class compatibility check would fail, when checking an +object, or in debugging code such as describe methods, where asserting +is inappropriate. It is intended to be equivalent to the C++ +``static_cast``, although since this is C there is no actual static +checking, so in fact it's more like ``reinterpret_cast``. + + +Introspection +............. + +_`.introspect.c-lang`: The design includes a number of introspection +functions for dynamically examining class relationships. These +functions are polymorphic and accept arbitrary subclasses of +``InstClass``. C doesn't support such polymorphism. So although these +have the semantics of functions (and could be implemented as functions +in another language with compatible calling conventions) they are +actually implemented as macros. The macros are named as function-style +macros despite the fact that this arguably contravenes +guide.impl.c.macro.method. The justification for this is that this +design is intended to promote the use of polymorphism, and it breaks +the abstraction for the users to need to be aware of what can and +can't be expressed directly in C function syntax. These functions all +have names ending in ``Poly`` to identify them as polymorphic +functions. + + +``SuperclassPoly(kind, class)`` + +_`.if.superclass-poly`: An introspection function which returns the +direct superclass of class object ``class`` as a class of kind +``kind``. This may assert if the superclass is not (a subtype of) the +kind requested. + + +``ClassOfPoly(kind, inst)`` + +_`.if.class-of-poly`: An introspection function which returns the +class of which ``inst`` is a direct instance, as a class of kind +``kind``. This may assert if the class is not (a subtype of) the kind +requested. + + +``SetClassOfPoly(inst, class)`` + +_`.if.set-class-of-poly`: An initialization function that sets the +class of ``inst`` to be ``class``. This is intended only for use in +initialization functions, to specialize the instance once its fields +have been initialized. Each Init function should call its superclass +init, finally reaching InstInit, and then, once it has set up its +fields, use SetClassOfPoly to set the class and check the instance +with its check method. Compare with `design.mps.sig`_. + +.. _`design.mps.sig`: sig + +``IsSubclass(sub, super)`` + +_`.if.is-subclass`: An introspection function which returns a ``Bool`` +indicating whether ``sub`` is a subclass of ``super``. That is, it is +a predicate for testing subclass relationships. + + +Protocol guidelines +................... + +_`.guide.fail`: When designing an extensible method which might fail, +the design must permit the correct implementation of the failure-case +code. Typically, a failure might occur in any method in the chain. +Each method is responsible for correctly propagating failure +information supplied by superclass methods and for managing it's own +failures. This is not really different from the general MPS +convention for unwinding on error paths. It implies that the design +of a class must include an anti-method for each method that changes +the state of an instance (e.g. by allocating memory) to allow the +state to be reverted in case of a failure. See `.example.fail`_ +below. + + +Example +....... + +_`.example.inheritance`: The following example class definition shows +both inheritance and specialization. It shows the definition of the +class ``RankBuf``, which inherits from ``SegBuf`` of kind ``Seg`` +and has specialized ``varargs`` and ``init`` method. :: + + DEFINE_CLASS(Buffer, RankBuf, class) + { + INHERIT_CLASS(class, RankBuf, SegBuf); + class->varargs = rankBufVarargs; + class->init = rankBufInit; + } + +_`.example.extension`: The following (hypothetical) example class +definition shows inheritance, specialization and also extension. It +shows the definition of the class ``EPDLDebugPool``, which inherits +from ``EPDLPool`` of kind ``Pool``, but also implements a method for +checking properties of the pool. :: + + typedef struct EPDLDebugPoolClassStruct { + EPDLPoolClassStruct epdl; + DebugPoolCheckMethod check; + Sig sig; + } EPDLDebugPoolClassStruct; + + typedef EPDLDebugPoolClassStruct *EPDLDebugPoolClass; + + DEFINE_CLASS(Inst, EPDLDebugPoolClass, class) + { + INHERIT_CLASS(class, EPDLPoolClass, InstClass); + } + + DEFINE_CLASS(EPDLDebugPool, EPDLDebugPool, class) + { + INHERIT_CLASS(&class->epdl, EPDLDebugPool, EPDLPoolClass); + class->check = EPDLDebugCheck; + class->sig = EPDLDebugSig; + } + +_`.example.fail`: The following example shows the implementation of +failure-case code for an "init" method, making use of the "finish" +anti-method to clean-up a subsequent failure. :: + + static Res AMSSegInit(Seg seg, Pool pool, + Addr base, Size size, + ArgList args) + { + AMS ams = MustBeA(AMSPool, pool); + Arena arena = PoolArena(pool); + AMSSeg amsseg; + Res res; + + /* Initialize the superclass fields first via next-method call */ + res = NextMethod(Seg, AMSSeg, init)(seg, pool, base, size, args); + if (res != ResOK) + goto failNextMethod; + amsseg = CouldBeA(AMSSeg, seg); + + amsseg->grains = size >> ams->grainShift; + amsseg->freeGrains = amsseg->grains; + amsseg->oldGrains = (Count)0; + amsseg->newGrains = (Count)0; + amsseg->marksChanged = FALSE; /* */ + amsseg->ambiguousFixes = FALSE; + + res = amsCreateTables(ams, &amsseg->allocTable, + &amsseg->nongreyTable, &amsseg->nonwhiteTable, + arena, amsseg->grains); + if (res != ResOK) + goto failCreateTables; + + /* start off using firstFree, see */ + amsseg->allocTableInUse = FALSE; + amsseg->firstFree = 0; + amsseg->colourTablesInUse = FALSE; + + amsseg->ams = ams; + RingInit(&amsseg->segRing); + RingAppend((ams->allocRing)(ams, SegRankSet(seg), size), + &amsseg->segRing); + + SetClassOfPoly(seg, CLASS(AMSSeg)); + amsseg->sig = AMSSegSig; + AVERC(AMSSeg, amsseg); + + return ResOK; + + failCreateTables: + NextMethod(Seg, AMSSeg, finish)(seg); + failNextMethod: + AVER(res != ResOK); + return res; + } + + +Implementation +-------------- + +_`.impl.define-class.lock`: The ``DEFINE_CLASS`` macro ensures that +each class is initialized at most once (even in multi-threaded +programs) by claiming the global recursive lock (see design.mps.thread-safety.arch.global.recursive_). + +.. _design.mps.thread-safety.arch.global.recursive: thread-safety#.arch.global.recursive + +_`.impl.derived-names`: The ``DEFINE_CLASS()`` macro derives some +additional names from the class name as part of it's implementation. +These should not appear in the source code, but it may be useful to +know about this for debugging purposes. For each class definition for +class ``SomeClass`` of kind ``SomeKind``, the macro defines the +following: + +* ``extern SomeKind SomeClassGet(void);`` + + The class ensure function. See `.overview.naming`_. This function + handles local static storage for the canonical class object and a + guardian to ensure the storage is initialized at most once. This + function is invoked by the ``CLASS`` macro (`.if.class`_). + +* ``static void SomeClassInit(SomeKind);`` + + A function called by ``SomeClassGet()``. All the class + initialization code is actually in this function. + +_`.impl.subclass`: The subclass test `.if.is-subclass`_ is implemented +using an array of superclasses [Cohen_1991]_ giving a fast +constant-time test. (RB_ tried an approach using prime factors +[Gibbs_2004]_ but found that they overflowed in 32-bits too easily to +be useful.) Each class is assigned a "level" which is the distance +from the root of the class hierarchy. The ``InstClass`` structure +contains an array of class ids indexed by level, representing the +inheritance of this class. A class is a subclass of another if and +only if the superclass id is present in the array at the superclass +level. The level is statically defined using enum constants, and the +id is the address of the canonical class object, so the test is fast +and simple. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Common instance methods +----------------------- + +_`.method`: These methods are available on all instances. + +``typedef void (*FinishMethod)(Inst inst)`` + +_`.method.finish`: The ``finish`` method should finish the instance +data structure (releasing any resources that were acquired by the +instance during its lifetime) and then call its superclass method via +the ``NextMethod()`` macro. + +``typedef Res (*DescribeMethod)(Inst inst, mps_lib_FILE *stream, Count depth)`` + +_`.method.describe`: The ``describe`` field should print out a +description of the instance to ``stream`` (by calling ``WriteF()``). + + +A. References +------------- + +.. [Cohen_1991] "Type-Extension Type Tests Can Be Performed In + Constant Time"; Norman H Cohen; IBM Thomas J Watson Research + Center; ACM Transactions on Programming Languages and Systems, + Vol. 13 No. 4, pp. 626-629; 1991-10. + +.. [Gibbs_2004] "Fast Dynamic Casting"; Michael Gibbs, Bjarne + Stroustrup; 2004; + . + +.. [Kernighan_1988] "The C Programming language 2nd Edition"; Brian W. + Kernighan, Dennis M. Ritchie; 1988. + + +B. Document History +------------------- + +- 1998-10-12 Tony Mann. Initial draft. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-04-14 GDR_ Converted to reStructuredText. + +- 2016-04-07 RB_ Removing never-used multiple inheritance speculation. + +- 2016-04-08 RB_ Substantial reorgnisation. + +- 2016-04-13 RB_ Writing up overview of kinds, with explanation of + class extension. Writing up ``Method``, ``NextMethod``, + ``SetClassOfPoly``, ``MustBeA``, etc. and updating the descriptions + of some older interface. Updating the example. + +- 2016-04-19 RB_ Miscellaneous clean-up in response to review by GDR_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +C. Copyright and License +------------------------ + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/pthreadext.txt b/mps/design/pthreadext.txt new file mode 100644 index 00000000000..6099920358d --- /dev/null +++ b/mps/design/pthreadext.txt @@ -0,0 +1,402 @@ +.. mode: -*- rst -*- + +POSIX thread extensions +======================= + +:Tag: design.mps.pthreadext +:Author: Tony Mann +:Date: 2000-02-01 +:Status: Draft document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: POSIX thread extensions; design + + +Introduction +------------ + +_`.readership`: Any MPS developer. + +_`.intro`: This is the design of the Pthreads extension module, which +provides some low-level threads support for use by MPS (notably +suspend and resume). + + +Definitions +----------- + +_`.pthreads`: The term "Pthreads" means an implementation of the POSIX +1003.1c-1995 thread standard. (Or the Single UNIX Specification, +Version 2, aka USV2 or UNIX98.) + +_`.context`: The "context" of a thread is a (platform-specific) +OS-defined structure which describes the current state of the +registers for that thread. + + +Requirements +------------ + +_`.req.suspend`: A means to suspend threads, so that they don't make +any progress. + +_`.req.suspend.why`: Needed by the thread manager so that other +threads registered with an arena can be suspended (see +design.mps.thread-manager_). Not directly provided by Pthreads. + +.. _design.mps.thread-manager: thread-manager + +_`.req.resume`: A means to resume suspended threads, so that they are +able to make progress again. _`.req.resume.why`: Needed by the thread +manager. Not directly provided by Pthreads. + +_`.req.suspend.multiple`: Allow a thread to be suspended on behalf of +one arena when it has already been suspended on behalf of one or more +other arenas. _`.req.suspend.multiple.why`: The thread manager +contains no design for cooperation between arenas to prevent this. + +_`.req.resume.multiple`: Allow requests to resume a thread on behalf +of each arena which had previously suspended the thread. The thread +must only be resumed when requests from all such arenas have been +received. _`.req.resume.multiple.why`: A thread manager for an arena +must not permit a thread to make progress before it explicitly resumes +the thread. + +_`.req.suspend.context`: Must be able to access the context for a +thread when it is suspended. + +_`.req.suspend.protection`: Must be able to suspend a thread which is +currently handling a protection fault (i.e., an arena access). Such a +thread might even own an arena lock. + +_`.req.legal`: Must use the Pthreads and other POSIX APIs in a legal +manner. + + + +Analysis +-------- + +_`.analysis.suspend`: Thread suspension is inherently asynchronous. MPS +needs to be able to suspend another thread without prior knowledge of +the code that thread is running. (That is, we can't rely on +cooperation between threads.) The only asynchronous communication +available on POSIX is via signals -- so the suspend and resume +mechanism must ultimately be built from signals. + +_`.analysis.signal.safety`: POSIX imposes some restrictions on what a +signal handler function might do when invoked asynchronously (see the +sigaction_ documentation, and search for the string "reentrant"). In +summary, a small number of POSIX functions are defined to be +"async-signal safe", which means they may be invoked without +restriction in signal handlers. All other POSIX functions are +considered to be unsafe. Behaviour is undefined if an unsafe function +is interrupted by a signal and the signal handler then proceeds to +call another unsafe function. See `mail.tony.1999-08-24.15-40`_ and +followups for some further analysis. + +.. _mail.tony.1999-08-24.15-40: https://info.ravenbrook.com/project/mps/mail/1999/08/24/15-40/0.txt +.. _sigaction: https://pubs.opengroup.org/onlinepubs/007908799/xsh/sigaction.html + +_`.analysis.signal.safety.implication`: Since we can't assume that we +won't attempt to suspend a thread while it is running an unsafe +function, we must limit the use of POSIX functions in the suspend +signal handler to those which are designed to be "async-signal safe". +One of the few such functions related to synchronization is +``sem_post()``. + +_`.analysis.signal.example`: An example of how to suspend threads in POSIX +was posted to newsgroup comp.programming.threads in August 1999 +[Lau_1999-08-16]_. The code in the post was written by David Butenhof, who +contributed some comments on his implementation [Butenhof_1999-08-16]_ + +_`.analysis.signal.linux-hack`: In the current implementation of Linux +Pthreads, it would be possible to implement suspend/resume using +``SIGSTOP`` and ``SIGCONT``. This is, however, nonportable and will +probably stop working on Linux at some point. + +_`.analysis.component`: There is no known way to meet the requirements +above in a way which cooperates with another component in the system +which also provides its own mechanism to suspend and resume threads. +The best bet for achieving this is to provide the functionality in +shared low-level component which may be used by MPS and other clients. +This will require some discussion with other potential clients and/or +standards bodies. + +_`.analysis.component.dylan`: Note that such cooperation is actually a +requirement for Dylan (req.dylan.dc.env.self), though this is not a +problem, since all the Dylan components share the MPS mechanism. + + +Interface +--------- + +``typedef PThreadextStruct *PThreadext`` + +_`.if.pthreadext.abstract`: A thread is represented by the abstract +type ``PThreadext``. A ``PThreadext`` object corresponds directly with +a thread (of type ``pthread_t``). There may be more than one +``PThreadext`` object for the same thread. + +_`.if.pthreadext.structure`: The structure definition of +``PThreadext`` (``PThreadextStruct``) is exposed by the interface so +that it may be embedded in a client datastructure (for example, +``ThreadStruct``). This means that all storage management can be left +to the client (which is important because there might be multiple +arenas involved). Clients may not access the fields of a +``PThreadextStruct`` directly. + +``void PThreadextInit(PThreadext pthreadext, pthread_t id)`` + +_`.if.init`: Initializes a ``PThreadext`` object for a thread with the +given ``id``. + +``Bool PThreadextCheck(PThreadext pthreadext)`` + +_`.if.check`: Checks a ``PThreadext`` object for consistency. Note +that this function takes the mutex, so it must not be called with the +mutex held (doing so will probably deadlock the thread). + +``Res PThreadextSuspend(PThreadext pthreadext, struct sigcontext **contextReturn)`` + +_`.if.suspend`: Suspends a ``PThreadext`` object (puts it into a +suspended state). Meets `.req.suspend`_. The object must not already +be in a suspended state. If the function returns ``ResOK``, the +context of the thread is returned in contextReturn, and the +corresponding thread will not make any progress until it is resumed. + +``Res PThreadextResume(PThreadext pthreadext)`` + +_`.if.resume`: Resumes a ``PThreadext`` object. Meets `.req.resume`_. +The object must already be in a suspended state. Puts the object into +a non-suspended state. Permits the corresponding thread to make +progress again, although that might not happen immediately if there is +another suspended ``PThreadext`` object corresponding to the same +thread. + +``void PThreadextFinish(PThreadext pthreadext)`` + +_`.if.finish`: Finishes a PThreadext object. + + + +Implementation +-------------- + +``typedef struct PThreadextStruct PThreadextStruct`` + +_`.impl.pthreadext`: The structure definition for a ``PThreadext`` +object is:: + + struct PThreadextStruct { + Sig sig; /* */ + pthread_t id; /* Thread ID */ + MutatorContext context; /* context if suspended */ + RingStruct threadRing; /* ring of suspended threads */ + RingStruct idRing; /* duplicate suspensions for id */ + }; + +_`.impl.field.id`: The ``id`` field shows which PThread the object +corresponds to. + +_`.impl.field.context`: The ``context`` field contains the context +when in a suspended state. Otherwise it is ``NULL``. + +_`.impl.field.threadring`: The ``threadRing`` field is used to chain +the object onto the suspend ring when it is in the suspended state +(see `.impl.global.suspend-ring`_). When not in a suspended state, +this ring is single. + +_`.impl.field.idring`: The ``idRing`` field is used to group the +object with other objects corresponding to the same thread (same +``id`` field) when they are in the suspended state. When not in a +suspended state, or when this is the only ``PThreadext`` object with +this ``id`` in the suspended state, this ring is single. + +_`.impl.global.suspend-ring`: The module maintains a global variable +``suspendedRing``, a ring of ``PThreadext`` objects which are in a +suspended state. This is primarily so that it's possible to determine +whether a thread is currently suspended anyway because of another +``PThreadext`` object, when a suspend attempt is made. + +_`.impl.global.victim`: The module maintains a global variable +``suspendingVictim`` which is used to indicate which ``PThreadext`` is +the current victim during suspend operations. This is used to +communicate information between the controlling thread and the thread +being suspended (the victim). The variable has value ``NULL`` at other +times. + +_`.impl.static.mutex`: We use a lock (mutex) around the suspend and +resume operations. This protects the state data (the suspend-ring and +the victim: see `.impl.global.suspend-ring`_ and +`.impl.global.victim`_ respectively). Since only one thread can be +suspended at a time, there's no possibility of two arenas suspending +each other by concurrently suspending each other's threads. + +_`.impl.static.semaphore`: We use a semaphore to synchronize between +the controlling and victim threads during the suspend operation. See +`.impl.suspend`_ and `.impl.suspend-handler`_). + +_`.impl.static.init`: The static data and global variables of the +module are initialized on the first call to ``PThreadextSuspend()``, +using ``pthread_once()`` to avoid concurrency problems. We also enable +the signal handlers at the same time (see `.impl.suspend-handler`_ and +`.impl.resume-handler`_). + +_`.impl.suspend`: ``PThreadextSuspend()`` first ensures the module is +initialized (see `.impl.static.init`_). After this, it claims the +mutex (see `.impl.static.mutex`_). It then checks to see whether +thread of the target ``PThreadext`` object has already been suspended +on behalf of another ``PThreadext`` object. It does this by iterating +over the suspend ring. + +_`.impl.suspend.already-suspended`: If another object with the same id +is found on the suspend ring, then the thread is already suspended. +The context of the target object is updated from the other object, and +the other object is linked into the ``idRing`` of the target. + +_`.impl.suspend.not-suspended`: If the thread is not already +suspended, then we forcibly suspend it using a technique similar to +Butenhof's (see `.analysis.signal.example`_): First we set the victim +variable (see `.impl.global.victim`_) to indicate the target object. +Then we send the signal ``PTHREADEXT_SIGSUSPEND`` to the thread (see +`.impl.signals`_), and wait on the semaphore for it to indicate that +it has received the signal and updated the victim variable with the +context. If either of these operations fail (for example, because of +thread termination) we unlock the mutex and return ``ResFAIL``. + +_`.impl.suspend.update`: Once we have ensured that the thread is +definitely suspended, we add the target ``PThreadext`` object to the +suspend ring, unlock the mutex, and return the context to the caller. + +_`.impl.suspend-handler`: The suspend signal handler is invoked in the +target thread during a suspend operation, when a +``PTHREADEXT_SIGSUSPEND`` signal is sent by the controlling thread +(see `.impl.suspend.not-suspended`_). The handler determines the +context (received as a parameter, although this may be +platform-specific) and stores this in the victim object (see +`.impl.global.victim`_). The handler then masks out all signals except +the one that will be received on a resume operation +(``PTHREADEXT_SIGRESUME``) and synchronizes with the controlling +thread by posting the semaphore. Finally the handler suspends until +the resume signal is received, using ``sigsuspend()``. + +_`.impl.resume`: ``PThreadextResume()`` first claims the mutex (see +`.impl.static.mutex`_). It then checks to see whether thread of the +target ``PThreadext`` object has also been suspended on behalf of +another ``PThreadext`` object (in which case the id ring of the target +object will not be single). + +_`.impl.resume.also-suspended`: If the thread is also suspended on +behalf of another ``PThreadext``, then the target object is removed from +the id ring. + +_`.impl.resume.not-also`: If the thread is not also suspended on +behalf of another ``PThreadext``, then the thread is resumed using the +technique proposed by Butenhof (see `.analysis.signal.example`_). I.e. we +send it the signal ``PTHREADEXT_SIGRESUME`` (see `.impl.signals`_) and +expect it to wake up. If this operation fails (for example, because of +thread termination) we unlock the mutex and return ``ResFAIL``. + +_`.impl.resume.update`: Once the target thread is in the appropriate +state, we remove the target ``PThreadext`` object from the suspend +ring, set its context to ``NULL`` and unlock the mutex. + +_`.impl.resume-handler`: The resume signal handler is invoked in the +target thread during a resume operation, when a +``PTHREADEXT_SIGRESUME`` signal is sent by the controlling thread (see +`.impl.resume.not-also`_). The resume signal handler simply returns. +This is sufficient to unblock the suspend handler, which will have +been blocking the thread at the time of the signal. The Pthreads +implementation ensures that the signal mask is restored to the value +it had before the signal handler was invoked. + +_`.impl.finish`: ``PThreadextFinish()`` supports the finishing of +objects in the suspended state, and removes them from the suspend ring +and id ring as necessary. It must claim the mutex for the removal +operation (to ensure atomicity of the operation). Finishing of +suspended objects is supported so that clients can dispose of +resources if a resume operation fails (which probably means that the +PThread has terminated). + +_`.impl.signals`: The choice of which signals to use for suspend and +restore operations may need to be platform-specific. Some signals are +likely to be generated and/or handled by other parts of the +application and so should not be used (for example, ``SIGSEGV``). Some +implementations of PThreads use some signals for themselves, so they +may not be used; for example, LinuxThreads uses ``SIGUSR1`` and +``SIGUSR2`` for its own purposes, and so do popular tools like +Valgrind that we would like to be compatible with the MPS. The design +therefore abstractly names the signals ``PTHREADEXT_SIGSUSPEND`` and +``PTHREAD_SIGRESUME``, so that they may be easily mapped to +appropriate real signal values. Candidate choices are ``SIGXFSZ`` and +``SIGXCPU``. + +_`.impl.signals.config`: The identity of the signals used to suspend +and resume threads can be configured at compilation time using the +preprocessor constants ``CONFIG_PTHREADEXT_SIGSUSPEND`` and +``CONFIG_PTHREADEXT_SIGRESUME`` respectively. + + +Attachments +----------- + +[missing attachment "posix.txt"] + +[missing attachment "susp.c"] + + +References +---------- + +.. [Butenhof_1999-08-16] + "Re: Problem with Suspend & Resume Thread Example"; + Dave Butenhof; comp.programming.threads; 1999-08-16; + . + +.. [Lau_1999-08-16] + "Problem with Suspend & Resume Thread Example"; + Raymond Lau; comp.programming.threads; 1999-08-16; + . + + +Document History +---------------- + +- 2000-02-01 Tony Mann. Draft document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/range.txt b/mps/design/range.txt new file mode 100644 index 00000000000..990352802a8 --- /dev/null +++ b/mps/design/range.txt @@ -0,0 +1,165 @@ +.. mode: -*- rst -*- + +Ranges of addresses +=================== + +:Tag: design.mps.range +:Author: Gareth Rees +:Date: 2013-05-21 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: address range; design + + +Introduction +------------ + +_`.intro`: This is the design of the Range module, which implements +objects representing address ranges. + +_`.readership`: This document is intended for any MPS developer. + + +Requirements +------------ + +_`.req.range`: A range object must be able to represent an arbitrary +range of addresses that neither starts at ``NULL`` nor includes the +top grain of the address space. + +_`.req.empty`: A range object must be able to represent the empty +range. + +_`.req.stack-alloc`: It must be possible to allocate range objects on +the stack: that is, they do not require any heap resource. + + +Interface +--------- + +``typedef RangeStruct *Range`` + +``Range`` is the type of a range. It is an alias for +``RangeStruct *``. ``RangeStruct`` is defined in the header so that it +can be inlined in client structures or allocated on the stack. Clients +must not depend on its implementation details. + +``void RangeInit(Range range, Addr base, Addr limit)`` + +Initialize a range object to represent the half-open address range +between ``base`` (inclusive) and ``limit`` (exclusive). It must be the +case that ``base <= limit``. If ``base == limit`` then the range is +empty. + +``void RangeCopy(Range dest, Range src)`` + +Initialize ``dest`` to be a copy of ``src``. + +``void RangeInitSize(Range range, Addr base, Size size)`` + +Initialize a range object to represent the half-open address range +between ``base`` (inclusive) and ``base + size`` (exclusive). If +``size == 0`` then the range is empty. + +``void RangeFinish(Range range)`` + +Finish a range object. Because a range object uses no heap resources +(`.req.stack-alloc`_) it is not necessary to call this. However, +clients may wish to do so in order to ensure that the range object is +invalid. + +``Addr RangeBase(Range range)`` + +Return the base of the range. (This is implemented as a macro, but +there is a function too.) + +``Addr RangeLimit(Range range)`` + +Return the limit of the range. (This is implemented as a macro, but +there is a function too.) + +``void RangeSetBase(Range range, Addr addr)`` + +Set the base of the range. ``addr`` must not be greater than the range +limit. To set them both at once, use ``RangeInit()``. (This is +implemented as a macro, but there is a function too.) + +``void RangeSetLimit(Range range, Addr addr)`` + +Set the limit of the range. ``addr`` must not be less than the range +base. To set the both at once, use ``RangeInit()``. (This is +implemented as a macro, but there's a function too.) + +``Size RangeSize(Range range)`` + +Return the size of the range. (This is implemented as a macro, but +there is a function too. The macro evaluates its argument twice.) + +``Bool RangeContains(Range range, Addr addr)`` + +Return ``TRUE`` if ``addr`` belongs to the range, or ``FALSE`` if it +does not. (This is implemented as a macro, but there is a function +too. The macro evaluates its arguments twice.) + +``Bool RangeIsEmpty(Range range)`` + +Return ``TRUE`` if the range is empty (contains no addresses), +``FALSE`` otherwise. (This is implemented as a macro, but there is a +function too. The macro evaluates its argument twice.) + +``Bool RangeIsAligned(Range range, Align alignment)`` + +Return ``TRUE`` if the base and limit of the range are both aligned to +the given alignment, or ``FALSE`` if either is not. + +``Bool RangesOverlap(Range range1, Range range2)`` + +Return ``TRUE`` if the two ranges overlap (have at least one address +in common), or ``FALSE`` if they do not. Note that ranges [*A*, *B*) and +[*B*, *C*) do not overlap. + +``Bool RangesNest(Range outer, Range inner)`` + +Return ``TRUE`` if all addresses in ``inner`` are also in ``outer``, +or ``FALSE`` otherwise. + + +Document history +---------------- + +- 2013-05-21 GDR_ Created. +- 2014-01-15 GDR_ Added ``RangeContains()``. +- 2016-03-27 RB_ Added ``RangeSetBase()`` and ``RangeSetLimit()``. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/ring.txt b/mps/design/ring.txt new file mode 100644 index 00000000000..f0dc72efde0 --- /dev/null +++ b/mps/design/ring.txt @@ -0,0 +1,282 @@ +.. mode: -*- rst -*- + +Ring data structure +=================== + +:Tag: design.mps.ring +:Author: Richard Brooksby +:Date: 1996-09-26 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: ring structure; design + + +Introduction +------------ + +_`.source`: rings are derived from the earlier use of double-ended +queues (deques). RB found that most of the deque features were unused +(see item 6 of `mail.richard.1996-03-25.16-02`_) and so the simple +doubly-linked list structure of rings suffices. + +.. _mail.richard.1996-03-25.16-02: https://info.ravenbrook.com/project/mps/mail/1996/03/25/16-02/0.txt + + +Description +----------- + +``typedef RingStruct *Ring`` + +_`.def.ring`: Rings are circular doubly-linked lists of ring "nodes". +The nodes are fields of structures which are the "elements" of the +ring. + +Ring node structures (``RingStruct``) are inlined in the structures on +the ring, like this:: + + typedef struct FooStruct *Foo; /* the element type */ + typedef struct FooStruct { /* the element structure */ + int baz, bim; + RingStruct ring; /* the ring node */ + float bip, bop; + } FooStruct; + +This arrangement means that they do not need to be managed separately. +This is especially useful in avoiding re-entrancy and bootstrapping +problems in the memory manager. Rings also provide flexible insertion +and deletion because the entire ring can be found from any node. + +In the MPS, rings are used to connect a "parent" structure (such as a +``Arena``) to a number of "child" structures (such as ``Pool``), as +shown in `.fig.ring`_. + +_`.fig.ring`: A ring of ``Child`` objects owned by a ``Parent`` +object. + +[missing figure] + +_`.fig.empty`: An empty ring of ``Child`` objects owned by a ``Parent`` +object. + +[missing figure] + +_`.def.singleton`: A "singleton" ring is a ring containing one node, +whose previous and next nodes are itself (see `.fig.single`_). + +_`.fig.single`: A singleton ``Child`` object not on any ring. + +[missing figure] + +_`.fig.elt`: How ``RING_ELT()`` gets a parent pointer from a node pointer. + +[missing figure] + + +Interface +--------- + +Init / Finish +............. + +``void RingInit(Ring ring)`` + +_`.init`: Rings are initialized with the ``RingInit()`` function. They +are initialized to be a singleton ring (`.def.singleton`_). + +``void RingFinish(Ring ring)`` + +_`.finish`: Rings are finished with the ``RingFinish()`` function. A +ring must be a singleton ring before it can be finished (it is an +error to attempt to finish a non-singleton ring). + + +Checking +........ + +``Bool RingCheck(Ring ring)`` + +_`.check`: ``RingCheck()`` is the check function for rings. See +design.mps.check_). + +.. _design.mps.check: check + +``Bool RingCheckSingle(Ring ring)`` + +_`.check.single`: ``RingCheckSingle()`` is a check function that +additionally checks that ``ring`` is a singleton (see +`.def.singleton`_). + +``Bool RingIsSingle(Ring ring)`` + +_`.is.single`: Return ``TRUE`` if ``ring`` is a singleton (see +`.def.singleton`_). + +``Count RingLength(Ring ring)`` + +_`.length`: Return the number of elements in the ring, not counting +``ring`` itself. This therefore returns 0 for singleton rings, and for +parent-children rings it returns the number of children. + + +Iteration +......... + +``RING_FOR(node, ring, next)`` + +_`.for`: A macro is used for iterating over the elements in a ring. +This macro is called ``RING_FOR()``. ``RING_FOR()`` takes three arguments. +The first is an iteration variable: ``node``. The second is the +"parent" element in the ring: ``ring``. The third is a variable used +by the iterator for working state (it holds a pointer to the next +node): ``next``. All arguments must be of type ``Ring``. The ``node`` +and ``next`` variables must be declared and in scope already. All +elements except for the "parent" element are iterated over. The macro +expands to a ``for`` statement. During execution of the loop, the +``node`` variable (the first argument to the macro) will be the value +of successive elements in the Ring (at the beginning of the statement +in the body of the loop). + +_`.for.error`: It is an error (possibly unchecked) for the ``node`` +and ``next`` variables to be modified except implicitly by using this +iterator. + +_`.for.safe`: It is safe to delete the current node during the +iteration. + +_`.for.ex`: An example:: + + Ring node, nextNode; + RING_FOR(node, &parent->childRing, nextNode) { + Child child = RING_ELT(Child, ParentRing, node); + foo(child); + } + +_`.for.ex.elt`: Notice the idiomatic use of ``RING_ELT()`` which is +almost universal when using ``RING_FOR()``. + + +Element access +.............. + +``Ring RingNext(Ring ring)`` + +_`.next`: ``RingNext()`` returns the next node in the ring. + +``Ring RingPrev(Ring ring)`` + +_`.prev`: ``RingPrev()`` returns the previous node in the ring. + +``RING_ELT(type, field, node)`` + +_`.elt`: ``RING_ELT()`` is a macro that converts a pointer to a ring +structure into a pointer to the enclosing parent structure. +``RING_ELT()`` has three arguments which are, in order: ``type``, the +type of a pointer to the enclosing structure, ``field``, the name of +the ring structure field within it, ``ring``, the ring node. The +result is a pointer to the enclosing structure. + +.. note:: ``RING_ELT()`` does not work for arrays of rings. + + +Append / Remove +............... + +``void RingAppend(ring, new)`` + +_`.append`: ``RingAppend()`` appends a singleton ring to a ring (such +that the newly added element will be last in the iteration sequence). + +``void RingInsert(Ring ring, Ring new)`` + +_`.insert`: ``RingInsert()`` adds a singleton ring to a ring (such that +the newly added element will be first in the iteration sequence). + +``void RingRemove(Ring old)`` + +_`.remove`: ``RingRemove()`` removes an element from a ring. The newly +removed element becomes a singleton ring. It is an error for the +element to already be a singleton. + +_`.improve.join`: It would be possible to add a ``RingJoin()`` operation +that joined two rings. This is not done as it is not required. + + +Naming +------ + +_`.naming`: By convention, when one structure ``Parent`` contains one +ring of ``Child`` structures, the field in ``Parent`` is usually known +as ``childRing``, and the field in ``Child`` is known as +``parentRing``. If the ``Parent`` structure contains more than one +ring of ``Child`` structures, then they should have names like +``allocatedChildRing`` and ``freeChildRing``. + +_`.naming.rule.break`: Note the slight abuse of naming convention, in +that the ring members have names ending in ``Ring`` rather than +``RingStruct``. + + +Deques +------ + +This section documents where rings differ significantly from deques. + +_`.head`: Deques used a distinguished head structure for the head of +the ring. Rings still have a separate head structure, but it is not +distinguished by type. + + +Defects +------- + +This section documents known defects with the current design. + +_`.app_for.misuse`: It is easy to pass ``RingAppend()`` and +``RING_FOR()`` the arguments in the wrong order as all the arguments +have the same type. + +_`.check.improve`: There is no method for performing a full integrity +check. This could be added. + + +Document History +---------------- + +- 1996-09-26 RB_ Created. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 1995–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/root.txt b/mps/design/root.txt new file mode 100644 index 00000000000..1e84634b920 --- /dev/null +++ b/mps/design/root.txt @@ -0,0 +1,123 @@ +.. mode: -*- rst -*- + +Root manager +============ + +:Tag: design.mps.root +:Author: Richard Brooksby +:Date: 1995-08-25 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: root manager; design + + +Basics +------ + +_`.root.def`: The root node of the object graph is the node which +defines whether objects are accessible, and the place from which the +mutator acts to change the graph. In the MPS, a root is an object +which describes part of the root node. The root node is the total of +all the roots attached to the space. + +.. note:: + + Note that this combines two definitions of root: the accessibility + is what defines a root for tracing (see analysis.tracer.root.* and + the mutator action for barriers (see analysis.async-gc.root). + Pekka P. Pirinen, 1998-03-20. + +_`.root.repr`: Functionally, roots are defined by their scanning +functions. Roots *could* be represented as function closures: that is, +a pointer to a C function and some auxiliary fields. The most general +variant of roots is just that. However, for reasons of efficiency, +some special variants are separated out. + + +Details +------- + +Creation +........ + +_`.create`: A root becomes "active" as soon as it is created. + +_`.create.col`: The root inherits its colour from the mutator, since +it can only contain references copied there by the mutator from +somewhere else. If the mutator is grey for a trace when a root is +created then that root will be used to determine accessibility for +that trace. More specifically, the root will be scanned when that +trace flips. + +Destruction +........... + +_`.destroy`: It's OK to destroy a root at any time, except perhaps +concurrently with scanning it, but that's prevented by the arena lock. +If a root is destroyed the references in it become invalid and +unusable. + +Invariants +.......... + +_`.inv.white`: Roots are never white for any trace, because they +cannot be condemned. + +_`.inv.rank`: Roots always have a single rank. A root without ranks +would be a root without references, which would be pointless. The +tracer doesn't support multiple ranks in a single colour. + +Scanning +........ + +_`.method`: Root scanning methods are provided by the client so that +the MPS can locate and scan the root set. See protocol.mps.root for +details. + +.. note:: + + There are some more notes about root methods in + meeting.qa.1996-10-16. + + +Document History +---------------- + +- 1995-08-25 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/scan.txt b/mps/design/scan.txt new file mode 100644 index 00000000000..c04741d2f72 --- /dev/null +++ b/mps/design/scan.txt @@ -0,0 +1,147 @@ +.. mode: -*- rst -*- + +The generic scanner +=================== + +:Tag: design.mps.scan +:Author: Richard Brooksby +:Date: 1995-08-25 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: generic scanner; design + + +Summaries +--------- + +Scanned summary +............... + +_`.summary.subset`: The summary of reference seens by scan +(``ss.unfixedSummary``) is a subset of the summary previously computed +(``SegSummary()``). + +There are two reasons that it is not an equality relation: + +1. If the segment has had objects forwarded onto it then its summary + will get unioned with the summary of the segment that the object + was forwarded from. This may increase the summary. The forwarded + object of course may have a smaller summary (if such a thing were + to be computed) and so subsequent scanning of the segment may + reduce the summary. (The forwarding process may erroneously + introduce zones into the destination's summary). + +2. A write barrier hit will set the summary to ``RefSetUNIV``. + +The reason that ``ss.unfixedSummary`` is always a subset of the +previous summary is due to an "optimization" which has not been made +in ``TraceFix()``. See design.mps.trace.fix.fixed.all_. + +.. _design.mps.trace.fix.fixed.all: trace#.fix.fixed.all + + +Partial scans +............. + +_`.clever-summary`: With enough cleverness, it's possible to have +partial scans of condemned segments contribute to the segment summary. + +.. note:: + + We had a system which nearly worked -- see MMsrc(MMdevel_poolams + at 1997/08/14 13:02:55 BST), but it did not handle the situation + in which a segment was not under the write barrier when it was + condemned. + +_`.clever-summary.acc`: Each time we partially scan a segment, we +accumulate the post-scan summary of the scanned objects into a field +in the group, called ``summarySoFar``. The post-scan summary is +(summary \ white) ∪ fixed. + +_`.clever-summary.acc.condemn`: The cumulative summary is only +meaningful while the segment is condemned. Otherwise it is set to +``RefSetEMPTY`` (a value which we can check). + +_`.clever-summary.acc.reclaim`: Then when we reclaim the segment, we +set the segment summary to the cumulative summary, as it is a +post-scan summary of all the scanned objects. + +_`.clever-summary.acc.other-trace`: If the segment is scanned by +another trace while it is condemned, the cumulative summary must be +set to the post-scan summary of this scan (otherwise it becomes +out-of-date). + +_`.clever-summary.scan`: The scan summary is expected to be a summary +of all scanned references in the segment. We don't know this +accurately until we've scanned everything in the segment. So we add in +the segment summary each time. + +_`.clever-summary.scan.fix`: ``traceScanSeg()`` also expects the scan +state fixed summary to include the post-scan summary of all references +which were white. Since we don't scan all white references, we need to +add in an approximation to the summary of all white references which +we didn't scan. This is the intersection of the segment summary and +the white summary. + +_`.clever-summary.wb`: If the cumulative summary is smaller than the +mutator's summary, a write-barrier is needed to prevent the mutator +from invalidating it. This means that sometimes we'd have to put the +segment under the write-barrier at condemn, which might not be very +efficient + +.. note:: + + This is not an operation currently available to pool class + implementations Pekka P. Pirinen, 1998-02-26. + +_`.clever-summary.method.wb`: We need a new pool class method, called +when the write barrier is hit (or possibly any barrier hit). The +generic method will do the usual TraceAccess work, the trivial method +will do nothing. + +_`.clever-summary.acc.wb`: When the write barrier is hit, we need to +correct the cumulative summary to the mutator summary. This is +approximated by setting the summary to ``RefSetUNIV``. + + +Document History +---------------- + +- 1995-08-25 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2001–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/seg.txt b/mps/design/seg.txt new file mode 100644 index 00000000000..809c76682a9 --- /dev/null +++ b/mps/design/seg.txt @@ -0,0 +1,534 @@ +.. mode: -*- rst -*- + +Segment data structure +====================== + +:Tag: design.mps.seg +:Author: David Jones +:Date: 1997-04-03 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: segments; design + + +Introduction +------------ + +_`.intro`: This is the design of the segment data structure. + + +Overview +-------- + +_`.over.segments`: Segments are the basic units of tracing and +shielding. The MPM also uses them as units of scanning and colour, +although pool classes may subdivide segments and be able to maintain +colour on a finer grain (down to the object level, for example). + +_`.over.objects`: The mutator's objects are stored in segments. +Segments are contiguous blocks of memory managed by some pool. + +_`.segments.pool`: The arrangement of objects within a segment is +determined by the class of the pool which owns the segment. The pool +is associated with the segment indirectly via the first tract of the +segment. + +_`.over.memory`: The relationship between segments and areas of memory +is maintained by the segment module. Pools acquire tracts from the +arena, and release them back to the arena when they don't need them +any longer. The segment module can associate contiguous tracts owned +by the same pool with a segment. The segment module provides the +methods SegBase, SegLimit, and SegSize which map a segment onto the +addresses of the memory block it represents. + +_`.over.hierarchy`: The Segment datastructure is designed to be +subclassable (see design.mps.protocol_). The basic segment class +(``Seg``) supports colour and protection for use by the tracer, as +well as support for a pool ring, and all generic segment functions. +Clients may use ``Seg`` directly, but will most probably want to use a +subclass with additional properties. + +.. _design.mps.protocol: protocol + +_`.over.hierarchy.gcseg`: ``GCSeg`` is a subclass of ``Seg`` which +implements garbage collection, including buffering and the ability to +be linked onto the grey ring. It does not implement hardware barriers, +and so can only be used with software barriers, for example internally +in the MPS. + +_`.over.hierarchy.mutatorseg`: ``MutatorSeg`` is a subclass of +``GCSeg`` implementing hardware barriers. It is suitable for handing +out to the mutator. + + +Data Structure +-------------- + +``typedef struct SegStruct *Seg`` +``typedef struct GCSegStruct *GCSeg`` + +The implementations are as follows:: + + typedef struct SegStruct { /* segment structure */ + Sig sig; /* */ + SegClass class; /* segment class structure */ + Tract firstTract; /* first tract of segment */ + RingStruct poolRing; /* link in list of segs in pool */ + Addr limit; /* limit of segment */ + unsigned depth : ShieldDepthWIDTH; /* see */ + AccessSet pm : AccessLIMIT; /* protection mode, */ + AccessSet sm : AccessLIMIT; /* shield mode, */ + TraceSet grey : TraceLIMIT; /* traces for which seg is grey */ + TraceSet white : TraceLIMIT; /* traces for which seg is white */ + TraceSet nailed : TraceLIMIT; /* traces for which seg has nailed objects */ + RankSet rankSet : RankLIMIT; /* ranks of references in this seg */ + } SegStruct; + + typedef struct GCSegStruct { /* GC segment structure */ + SegStruct segStruct; /* superclass fields must come first */ + RingStruct greyRing; /* link in list of grey segs */ + RefSet summary; /* summary of references out of seg */ + Buffer buffer; /* non-NULL if seg is buffered */ + Sig sig; /* design.mps.sig */ + } GCSegStruct; + + +_`.field.rankSet`: The ``rankSet`` field represents the set of ranks +of the references in the segment. It is initialized to empty by +``SegInit()``. + +_`.field.rankSet.single`: The Tracer only permits one rank per segment +[ref?] so this field is either empty or a singleton. + +_`.field.rankSet.empty`: An empty ``rankSet`` indicates that there are +no references. If there are no references in the segment then it +cannot contain black or grey references. + +_`.field.rankSet.start`: If references are stored in the segment then +it must be updated, along with the summary (`.field.summary.start`_). + +_`.field.depth`: The ``depth`` field is used by the Shield +(impl.c.shield) to manage protection of the segment. It is initialized +to zero by ``SegInit()``. + +_`.field.sm`: The ``sm`` field is used by the Shield (impl.c.shield) +to manage protection of the segment. It is initialized to +``AccessSetEMPTY`` by ``SegInit()``. + +_`.field.pm`: The ``pm`` field is used by the Shield (impl.c.shield) +to manage protection of the segment. It is initialized to +``AccessSetEMPTY`` by ``SegInit()``. The field is used by both the +shield and the ANSI fake protection (impl.c.protan). + +_`.field.black`: The ``black`` field is the set of traces for which +there may be black objects (that is, objects containing references, +but no references to white objects) in the segment. More precisely, if +there is a black object for a trace in the segment then that trace +will appear in the ``black`` field. It is initialized to +``TraceSetEMPTY`` by ``SegInit()``. + +_`.field.grey`: The ``grey`` field is the set of traces for which +there may be grey objects (i.e containing references to white objects) +in the segment. More precisely, if there is a reference to a white +object for a trace in the segment then that trace will appear in the +``grey`` field. It is initialized to ``TraceSetEMPTY`` by ``SegInit()``. + +_`.field.white`: The ``white`` field is the set of traces for which +there may be white objects in the segment. More precisely, if there is +a white object for a trace in the segment then that trace will appear +in the ``white`` field. It is initialized to ``TraceSetEMPTY`` by +``SegInit()``. + +_`.field.summary`: The ``summary`` field is an approximation to the +set of all references in the segment. If there is a reference ``R`` in +the segment, then ``RefSetIsMember(summary, R)`` is ``TRUE``. The +summary is initialized to ``RefSetEMPTY`` by ``SegInit()``. + +_`.field.summary.start`: If references are stored in the segment then +it must be updated, along with ``rankSet`` (`.field.rankSet.start`_). + +_`.field.buffer`: The ``buffer`` field is either ``NULL``, or points +to the descriptor structure of the buffer which is currently +allocating in the segment. The field is initialized to ``NULL`` by +``SegInit()``. + +_`.field.buffer.owner`: This buffer must belong to the same pool as +the segment, because only that pool has the right to attach it. + + +Interface +--------- + +Splitting and merging +..................... + +_`.split-and-merge`: There is support for splitting and merging +segments, to give pools the flexibility to rearrange their tracts +among segments as they see fit. + +``Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at)`` + +_`.split`: If successful, segment ``seg`` is split at address ``at``, +yielding two segments which are returned in segLoReturn and +segHiReturn for the low and high segments respectively. The base of +the low segment is the old base of ``seg``. The limit of the low +segment is ``at``. The base of the high segment is ``at``. This limit +of the high segment is the old limit of ``seg``. ``seg`` is +effectively destroyed during this operation (actually, it might be +reused as one of the returned segments). Segment subclasses may make +use of the optional arguments; the built-in classes do not. + +_`.split.invariants`: The client must ensure some invariants are met +before calling ``SegSplit()``: + +- _`.split.inv.align`: ``at`` must be a multiple of the arena grain + size, and lie between the base and limit of ``seg``. Justification: + the split segments cannot be represented if this is not so. + +- _`.split.inv.buffer`: If ``seg`` is attached to a buffer, the + buffered region must not include address ``at``. Justification: the + segment module is not in a position to know how (or whether) a pool + might wish to split a buffer. This permits the buffer to remain + attached to just one of the returned segments. + +_`.split.state`: Except as noted above, the segments returned have the +same properties as ``seg``. That is, their colour, summary, rankset, +nailedness etc. are set to the values of ``seg``. + +``Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi)`` + +_`.merge`: If successful, segments ``segLo`` and ``segHi`` are merged +together, yielding a segment which is returned in mergedSegReturn. +``segLo`` and ``segHi`` are effectively destroyed during this +operation (actually, one of them might be reused as the merged +segment). Segment subclasses may make use of the optional arguments; +the built-in classes do not. + +_`.merge.invariants`: The client must ensure some invariants are met +before calling ``SegMerge()``: + +- _`.merge.inv.abut`: The limit of ``segLo`` must be the same as the + base of ``segHi``. Justification: the merged segment cannot be + represented if this is not so. + +- _`.merge.inv.buffer`: One or other of ``segLo`` and ``segHi`` may + be attached to a buffer, but not both. Justification: the segment + module does not support attachment of a single seg to 2 buffers. + +- _`.merge.inv.similar`: ``segLo`` and ``segHi`` must be sufficiently + similar. Two segments are sufficiently similar if they have + identical values for each of the following fields: ``class``, + ``grey``, ``white``, ``nailed``, ``rankSet``. Justification: There + has yet to be a need to implement default behaviour for these + cases. Pool classes should arrange for these values to be the same + before calling ``SegMerge()``. + +_`.merge.state`: The merged segment will share the same state as +``segLo`` and ``segHi`` for those fields which are identical (see +`.merge.inv.similar`_). The summary will be the union of the summaries +of ``segLo`` and ``segHi``. + + +Extensibility +------------- + +Allocation +.......... + +``typedef Bool (*SegBufferFillMethod)(Addr *baseReturn, Addr *limitReturn, Seg seg, Size size, RankSet rankSet)`` + +_`.method.buffer-fill`: Allocate a block in the segment, of at least +``size`` bytes, with the given set of ranks. If successful, update +``*baseReturn`` and ``*limitReturn`` to the block and return ``TRUE``. +Otherwise, return ``FALSE``. The allocated block must be accounted as +buffered (see design.mps.strategy.account.buffered_). + +.. _design.mps.strategy.account.buffered: strategy#.account.buffered + +``typedef void (*SegBufferEmptyMethod)(Seg seg, Buffer buffer)`` + +_`.method.buffer-empty`: Free the unused part of the buffer to the +segment. Account the used part as new (see design.mps.strategy.account.new_) and the unused part as free (see design.mps.strategy.account.free_). + +.. _design.mps.strategy.account.new: strategy#.account.new +.. _design.mps.strategy.account.free: strategy#.account.free + + +Garbage collection +.................. + +``typedef Res (*SegAccessMethod)(Seg seg, Arena arena, Addr addr, AccessSet mode, MutatorContext context)`` + +_`.method.access`: The ``access`` method indicates that the client +program attempted to access the address ``addr``, but has been denied +due to a protection fault. The ``mode`` indicates whether the client +program was trying to read (``AccessREAD``) or write (``AccessWRITE``) +the address. If this can't be determined, ``mode`` is ``AccessREAD | +AccessWRITE``. The segment should perform any work necessary to remove +the protection whilst still preserving appropriate invariants (this +might scanning the region containing ``addr``). Segment classes are +not required to provide this method, and not doing so indicates they +never protect any memory managed by the pool. This method is called +via the generic function ``SegAccess()``. + +``typedef Res (*SegWhitenMethod)(Seg seg, Trace trace)`` + +_`.method.whiten`: The ``whiten`` method requests that the segment +``seg`` condemn (a subset of, but typically all) its objects for the +trace ``trace``. That is, prepare them for participation in the trace +to determine their liveness. The segment should expect fix requests +(`.method.fix`_) during the trace and a reclaim request +(`.method.reclaim`_) at the end of the trace. Segment +classes that automatically reclaim dead objects must provide this +method, and pools that use these segment classes must additionally set +the ``AttrGC`` attribute. This method is called via the generic +function ``SegWhiten()``. + +``typedef void (*SegGreyenMethod)(Seg seg, Trace trace)`` + +_`.method.grey`: The ``greyen`` method requires the segment ``seg`` to +colour its objects grey for the trace ``trace`` (excepting objects +that were already condemned for this trace). That is, make them ready +for scanning by the trace ``trace``. The segment must arrange that any +appropriate invariants are preserved, possibly by using the protection +interface (see design.mps.prot_). Segment classes are not required to +provide this method, and not doing so indicates that all instances of +this class will have no fixable or traceable references in them. This +method is called via the generic function ``SegGreyen()``. + +.. _design.mps.prot: prot + +``typedef void (*SegBlackenMethod)(Seg seg, TraceSet traceSet)`` + +_`.method.blacken`: The ``blacken`` method is called if it is known +that the segment ``seg`` cannot refer to the white set for any of the +traces in ``traceSet``. The segment must blacken all its grey objects +for those traces. Segment classes are not required to provide this +method, and not doing so indicates that all instances of this class +will have no fixable or traceable references in them. This method is +called via the generic function ``SegBlacken()``. + +``typedef Res (*SegScanMethod)(Bool *totalReturn, Seg seg, ScanState ss)`` + +_`.method.scan`: The ``scan`` method scans all the grey objects on the +segment ``seg``, passing the scan state ``ss`` to +``TraceScanFormat()``. The segment may additionally accumulate a +summary of *all* its objects. If it succeeds in accumulating such a +summary it must indicate that it has done so by setting the +``*totalReturn`` parameter to ``TRUE``. Otherwise it must set +``*totalReturn`` to ``FALSE``. This method is called via the generic +function ``SegScan()``. + +_`.method.scan.required`: Automatically managed segment classes are +required to provide this method, even if all instances of this class +will have no fixable or traceable references in them, in order to +support ``mps_pool_walk()``. + +``typedef Res (*SegFixMethod)(Seg seg, ScanState ss, Ref *refIO)`` + +_`.method.fix`: The ``fix`` method indicates that the reference +``*refIO`` has been discovered at rank ``ss->rank`` by the traces in +``ss->traces``, and the segment must handle this discovery according +to the fix protocol (design.mps.fix_). If the method moves the object, +it must update ``*refIO`` to refer to the new location of the object. +If the method determines that the referenced object died (for example, +because the highest-ranking references to the object were weak), it +must update ``*refIO`` to ``NULL``. Segment classes that automatically +reclaim dead objects must provide this method, and pools that use +these classes must additionally set the ``AttrGC`` attribute. Pool +classes that use segment classes that may move objects must also set +the ``AttrMOVINGGC`` attribute. The ``fix`` method is on the critical +path (see design.mps.critical-path_) and so must be fast. This method +is called via the function ``TraceFix()``. + +.. _design.mps.fix: fix +.. _design.mps.critical-path: critical-path + +_`.method.fixEmergency`: The ``fixEmergency`` method is used to +perform fixing in "emergency" situations. Its specification is +identical to the ``fix`` method, but it must complete its work without +allocating memory (perhaps by using some approximation, or by running +more slowly). Segment classes must provide this method if and only if +they provide the ``fix`` method. If the ``fix`` method does not need +to allocate memory, then it is acceptable for ``fix`` and +``fixEmergency`` to be the same. + +``typedef void (*SegReclaimMethod)(Seg seg, Trace trace)`` + +_`.method.reclaim`: The ``reclaim`` method indicates that any +remaining white objects in the segment ``seg`` have now been proved +unreachable by the trace ``trace``, and so are dead. The segment +should reclaim the resources associated with the dead objects. Segment +classes are not required to provide this method. If they do, pools +that use them must set the ``AttrGC`` attribute. This method is called +via the generic function ``SegReclaim()``. + +``typedef void (*SegWalkMethod)(Seg seg, Format format, FormattedObjectsVisitor f, void *v, size_t s)`` + +_`.method.walk`: The ``walk`` method must call the visitor function +``f`` (along with its closure parameters ``v`` and ``s`` and the +format ``format``) once for each of the *black* objects in the segment +``seg``. Padding objects may or may not be included in the walk, at +the segment's discretion: it is the responsibility of the client +program to handle them. Forwarding objects must not be included in the +walk. Segment classes need not provide this method. This method is +called by the generic function ``SegWalk()``, which is called by the +deprecated public functions ``mps_arena_formatted_objects_walk()`` and +``mps_amc_apply()``. + +_`.method.walk.deprecated`: The ``walk`` method is deprecated along +with the public functions ``mps_arena_formatted_objects_walk()`` and +``mps_amc_apply()`` and will be removed along with them in a future +release. + +``typedef void (*SegFlipMethod)(Seg seg, Trace trace)`` + +_`.method.flip`: Raise the read barrier, if necessary, for a trace +that's about to flip and for which the segment is grey and potentially +contains references. + + +Splitting and merging +..................... + +``typedef Res (*SegSplitMethod)(Seg seg, Seg segHi, Addr base, Addr mid, Addr limit)`` + +_`.method.split`: Segment subclasses may extend the support for +segment splitting by defining their own "split" method. On entry, +``seg`` is a segment with region ``[base,limit)``, ``segHi`` is +uninitialized, ``mid`` is the address at which the segment is to be +split. The method is responsible for destructively modifying ``seg`` +and initializing ``segHi`` so that on exit ``seg`` is a segment with +region ``[base,mid)`` and ``segHi`` is a segment with region +``[mid,limit)``. Usually a method would only directly modify the +fields defined for the segment subclass. + +_`.method.split.next`: A split method should always call the next +method, either before or after any class-specific code (see +design.mps.protocol.overview.next-method_). + +.. _design.mps.protocol.overview.next-method: protocol#.overview.next-method + +_`.method.split.accounting`: If ``seg`` belongs to a generation in a +chain, then the pool generation accounting must be updated. In the +simple case where the split segments remain in the same generation, +this can be done by calling ``PoolGenAccountForSegSplit()``. + +``typedef Res (*SegMergeMethod)(Seg seg, Seg segHi, Addr base, Addr mid, Addr limit)`` + +_`.method.merge`: Segment subclasses may extend the support for +segment merging by defining their own ``merge`` method. On entry, +``seg`` is a segment with region ``[base,mid)``, ``segHi`` is a +segment with region ``[mid,limit)``, The method is responsible for +destructively modifying ``seg`` and finishing ``segHi`` so that on +exit ``seg`` is a segment with region ``[base,limit)`` and ``segHi`` +is garbage. Usually a method would only modify the fields defined for +the segment subclass. + +_`.method.merge.next`: A merge method should always call the next +method, either before or after any class-specific code (see +design.mps.protocol.overview.next-method_). + +.. _design.mps.protocol.overview.next-method: protocol#.overview.next-method + +_`.method.merge.accounting`: If ``seg`` belongs to a generation in a +chain, then the pool generation accounting must be updated. In the +simple case where the two segments started in the same generation and +the merged segment remains in that generation, this can be done by +calling ``PoolGenAccountForSegMerge()``. + +_`.split-merge.shield`: Split and merge methods may assume that the +segments they are manipulating are not in the shield queue. + +_`.split-merge.shield.flush`: The shield queue is flushed before any +split or merge methods are invoked. + +_`.split-merge.shield.re-flush`: If a split or merge method performs +an operation on a segment which might cause the segment to be queued, +the method must flush the shield queue before returning or calling +another split or merge method. + +_`.split-merge.fail`: Split and merge methods might fail, in which +case segments ``seg`` and ``segHi`` must be equivalently valid and +configured at exit as they were according to the entry conditions. +It's simplest if the failure can be detected before calling the next +method (for example, by allocating any objects early in the method). + +_`.split-merge.fail.anti`: If it's not possible to detect failure +before calling the next method, the appropriate anti-method must be +used (see design.mps.protocol.guide.fail.after-next_). Split methods +are anti-methods for merge methods, and vice-versa. + +.. _design.mps.protocol.guide.fail.after-next: protocol#.guide.fail.after-next + +_`.split-merge.fail.anti.constrain`: In general, care should be taken +when writing split and merge methods to ensure that they really are +anti-methods for each other. The anti-method must not fail if the +initial method succeeded. The anti-method should reverse any side +effects of the initial method, except where it's known to be safe to +avoid this (see `.split-merge.fail.summary`_ for an example of a safe +case). + +_`.split-merge.fail.anti.no`: If this isn't possible (it might not be) +then the methods won't support after-next failure. This fact should be +documented, if the methods are intended to support further +specialization. Note that using va_arg with the ``args`` parameter is +sufficient to make it impossible to reverse all side effects. + +_`.split-merge.fail.summary`: The segment summary might not be +restored exactly after a failed merge operation. Each segment would be +left with a summary which is the union of the original summaries (see +`.merge.state`_). This increases the conservatism in the summaries, +but is otherwise safe. + +_`.split-merge.unsupported`: Segment classes need not support segment +merging at all. The function ``SegClassMixInNoSplitMerge()`` is supplied +to set the split and merge methods to unsupporting methods that will +report an error in checking varieties. + + +Document History +---------------- + +- 1997-04-03 RB_ Initial draft (replacing various notes in revisions 0 + and 1) was as part of editing MMsrc!seg.c(MMdevel_action2.1). + +- 1999-04-16 Tony Mann. Rewritten to separate segments and tracts, + following `mail.tony.1998-11-02.10-26`_. + + .. _mail.tony.1998-11-02.10-26: https://info.ravenbrook.com/project/mps/mail/1998/11/02/10-26/0.txt + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2001–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/shield.txt b/mps/design/shield.txt new file mode 100644 index 00000000000..248b72c4346 --- /dev/null +++ b/mps/design/shield.txt @@ -0,0 +1,432 @@ +.. mode: -*- rst -*- + +Shield +====== + +:Tag: design.mps.shield +:Author: Richard Kistruck +:Date: 2006-12-19 +:Status: incomplete guide +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: shield; design + + +Introduction +------------ + +_`.intro`: This document contains a guide to the MPS Shield. There is +no historical initial design, but in its place there are some early +ideas and discussions: see `.ideas`_. + +_`.readership`: Any MPS developer. Not confidential. + + +Overview +-------- + +_`.overview`: The MPS implements incremental garbage collection using +memory barriers implemented by a combination of hardware memory +protection and thread control. The MPS needs *separate control* of +collector access and mutator (client) access to memory: the collector +must be able to incrementally scan objects, without the mutator being +able to see them yet. + +Unfortunately common operating systems do not support different access +levels (protection maps) for different parts of the same process. + +The MPS Shield is an abstraction that does extra work to overcome this +limitation, and give the rest of the MPS the illusion that we can +control collector and mutator access separately. + + +Interface +--------- + + +Mutator access +.............. + +The shield provides ``ShieldRaise()`` and ``ShieldLower()`` to forbid +or permit the mutator access to object memory segments. Between these +two, a segment is said to have the shield *raised* (`.def.raised`_). + +``void ShieldRaise(Arena arena, Seg seg, AccessSet mode)`` + + Prevent the mutator accessing the memory segment in the specified + mode (``AccessREAD``, ``AccessWRITE``, or both). + +``void ShieldLower(Arena arena, Seg seg, AccessSet mode)`` + + Allow the mutator to access the memory segment in the specified + mode (``AccessREAD``, ``AccessWRITE``, or both). + +If the mutator attempts an access that hits the shield, the MPS gets +an OS-specific hardware protection fault which reaches +``ArenaAccess()``, does whatever work is necessary, then lowers the +shield and returns to the mutator. + +``ShieldRaise()`` and ``ShieldLower()`` do *not* nest. + + +Entering the shield +................... + +The MPS can only gain exclusive access from *inside* the shield +(`.def.inside`_). To enter the shield, the MPS must call +``ShieldEnter()``, and to leave it, the MPS must call +``ShieldLeave()``. + +``ShieldEnter()`` and ``ShieldLeave()`` are called by ``ArenaEnter()`` +and ``ArenaLeave()`` so almost all of the MPS is is inside the +shield. + + +Collector access to segments +............................ + +When the MPS wants to access object memory segments from inside the +shield, it must wrap any accesses with a ``ShieldExpose()`` and +``ShieldCover()`` pair. These calls nest. After a call to +``ShieldExpose()`` a segment is said to be *exposed* until the last +nested call to ``ShieldCover()``. The shield arranges that the MPS can +access the memory while it is exposed. + +A segment might for example be exposed during: + + - format-scan (when scanning); + - format-skip (when marking grains in a non-moving fix); + - format-isMoved and ``AddrCopy()`` (during a copying fix); + - format-pad (during reclaim). + +Note that there is no need to call ``ShieldExpose()`` when accessing +pool management memory such as bit tables. This is not object memory, +is never (legally) accessed by the mutator, and so is never shielded. + +Similarly, a pool class that never raises the shield on its segments +need never expose them to gain access. + + +Collector access to the unprotectable +..................................... + +When the MPS wants to access an unprotectable object from inside the +shield, it must wrap any accesses with a ``ShieldHold()`` and +``ShieldRelease()`` pair. This allows access to objects which cannot +be shielded by ``ShieldRaise()``, such as: + + - the stack and registers of mutator threads, + - lockless allocation point structures, + - areas of memory that can't be protected by operating system calls, + - unprotectable roots. + +``void ShieldHold(Arena arena)`` + + Get exclusive access to the unprotectable. + +``void ShieldRelease(Arena arena)`` + + Declare that exclusive access is no longer needed. + + +Mechanism +--------- + +On common operating systems, the only way to allow the MPS access is +to allow access from the whole process, including the mutator. So +``ShieldExpose()`` will suspend all mutator threads to prevent any +mutator access, and so will ``ShieldRaise()`` on an unexposed segment. +The shield handles suspending and resuming threads, and so the rest of +the MPS does not need to worry about it. + +The MPS can make multiple sequential, overlapping, or nested calls to +``ShieldExpose()`` on the same segment, as long as each is balanced by +a corresponding ``ShieldCover()`` before ``ShieldLeave()`` is called. +A usage count is maintained on each segment in ``seg->depth``. When +the usage count reaches zero, there is no longer any reason the +segment should be unprotected, and the shield may reinstate hardware +protection at any time. + +However, as a performance-improving hysteresis, the shield defers +re-protection, maintaining a queue of segments that require attention +before mutator threads are resumed (`.impl.delay`_). While a segment +is in the queue, it has ``seg->queued`` set true. + +This hysteresis allows the MPS to proceed with garbage collection +during a pause without actually setting hardware protection until it +returns to the mutator. This is particularly important on operating +systems where the protection is expensive and poorly implemented, such +as macOS. + +The queue also ensures that no memory protection system calls will be +needed for incremental garbage collection if a complete collection +cycle occurs during one pause. + + +Implementation +-------------- + +_`.impl.delay`: The implementation of the shield avoids suspending +threads for as long as possible. When threads are suspended, it +maintains a queue of segments where the desired and actual protection +do not match. This queue is flushed on leaving the shield. + + +Definitions +........... + +_`.def.raised`: A segment has the shield *raised* for an access mode +after a call to ``ShieldRaise()`` and before a call to +``ShieldLower()`` with that mode. + +_`.def.exposed`: A segment is *exposed* after a call to +``ShieldExpose()`` and before a call to ``ShieldLower()``. + +_`.def.synced`: A segment is *synced* if the prot and shield modes are +the same, and unsynced otherwise. + +_`.def.depth`: The *depth* of a segment is defined as: + + | depth ≔ #exposes − #covers, where + | #exposes = the number of calls to ``ShieldExpose()`` on the segment + | #covers = the number of calls to ``ShieldCover()`` on the segment + +``ShieldCover()`` must not be called without a matching +``ShieldExpose()``, so this figure must always be non-negative. + +_`.def.total.depth`: The total depth is the sum of the depth over all +segments. + +_`.def.outside`: Being outside the shield is being between calls to +``ShieldLeave()`` and ``ShieldEnter()``, and similarly _`.def.inside`: +being inside the shield is being between calls to ``ShieldEnter()`` +and ``ShieldLeave()``. [In a multi-threaded MPS this would be +per-thread. RB 2016-03-18] + +_`.def.shielded`: A segment is shielded if the shield mode is +non-zero. [As set by ShieldRaise.] + + +Properties +.......... + +_`.prop.outside.running`: The mutator may not be suspended while outside +the shield. + +_`.prop.mutator.access`: An attempt by the mutator to access shielded +memory must be pre-empted by a call to ``ArenaAccess()``. + +_`.prop.inside.access`: Inside the shield the MPS must be able to access +all unshielded segments and all exposed segments. + + +Invariants +.......... + +_`.inv.outside.running`: The mutator is not suspended while outside the +shield. + +_`.inv.unsynced.suspended`: If any segment is not synced, the mutator is +suspended. + +_`.inv.unsynced.depth`: All unsynced segments have positive depth or are +in the queue. + +_`.inv.outside.depth`: The total depth is zero while outside the shield. + +_`.inv.prot.shield`: The prot mode is never more than the shield mode. + +_`.inv.expose.depth`: An exposed segment's depth is greater than zero. + +_`.inv.expose.prot`: An exposed segment is not protected in the mode +it was exposed with. + + +Proof Hints +........... + +Hints at proofs of properties from invariants. + +_`.proof.outside`: `.inv.outside.running`_ directly ensures +`.prop.outside.running`_. + +_`.proof.sync`: As the depth of a segment cannot be negative + + | total depth = 0 + | ⇒ for all segments, depth = 0 + | ⇒ all segments are synced (by `.inv.unsynced.depth`_) + +_`.proof.access`: If the mutator is running then all segments must be +synced (`.inv.unsynced.suspended`_). Which means that the hardware +protection (protection mode) must reflect the software protection +(shield mode). Hence all shielded memory will be hardware protected +while the mutator is running. This ensures `.prop.mutator.access`_. + +_`.proof.inside`: `.inv.prot.shield`_ and `.inv.expose.prot`_ ensure +`.prop.inside.access`_. + + +Initial ideas +------------- + +_`.ideas`: There never was an initial design document, but +[RB_1995-11-29]_ and [RB_1995-11-30]_ contain some initial ideas. + + +Improvement Ideas +----------------- + + +Mass exposure +............. + +_`.improv.mass-expose`: If protection calls have a high overhead it might +be good to pre-emptively unprotect large ranges of memory when we +expose one segment. With the current design this would mean +discovering adjacent shielded segments and adding them to the queue. +The collector should take advantage of this by preferentially scanning +exposed segments during a pause. + + +Segment independence +.................... + +_`.improv.noseg`: The shield is implemented in terms of segments, using +fields in the segment structure to represent its state. This forces us +to (for example) flush the shield queue when deleting a segment. The +shield could keep track of protection and shielding independently, +possibly allowing greater coalescing and more efficient and flexible +use of system calls (see `.improv.mass-expose`_). + + +Concurrent collection +..................... + +_`.improv.concurrent`: The MPS currently does not collect +concurrently, however the only thing that makes it not-concurrent is a +critical point in the Shield abstraction where the MPS seeks to gain +privileged access to memory (usually in order to scan it). The +critical point is where ``ShieldExpose()`` in shield.c has to call +``ShieldHold()`` to preserve the shield invariants. This is the only +point in the MPS that prevents concurrency, and the rest of the MPS is +designed to support it. + +The restriction could be removed if either: + + * the MPS could use a different set of protections to the mutator + program + + * the mutator program uses a software barrier + +The first one is tricky, and the second one just hasn't come up in any +implementation we've been asked to make yet. Given a VM, it could +happen, and the MPS would be concurrent. + +So, I believe there's nothing fundamentally non-concurrent about the +MPS design. It's kind of waiting to happen. + +(Originally written at .) + + +Early Resume +............ + +_`.improv.resume`: There is a tradeoff between delaying flushing the +shield queue (preventing unnecessary protection and allowing us to +coalesce) and resuming mutator threads. We could resume threads +earlier under some circumstances, such as before reclaim (which does +not need to interact with the mutator). Basically, it might be worth +resuming the mutator early in a pause if we know that we're unlikely +to suspend it again (no more calls to ``ShieldRaise()`` or +``ShieldExpose()`` on shielded segments). + + +Expose modes +............ + +_`.improv.expose-modes`: Would it be a good idea for +``ShieldExpose()`` to take an ``AccessSet``? It might be good if we +didn't have to raise a write barrier unless we want to write. When +scanning (for instance), we may not need to write, so when scanning a +segment behind a write barrier we shouldn't have to call +``mprotect()``. That's a bit speculative: how often do we scan a +segment and not write to it. Alternatively, and more speculatively, we +could keep the write barrier up, handle the (possibly nested) trap and +*then* expose the shield. I'm just scraping around for ways to reduce +calls to ``mprotect()``. + +Theoretically we can do this, but: + + 1. We're mostly a moving collector so we'll almost always want to + write to segments we scan. That could change if we do more + non-moving collection. + + 2. The main cost of protection is changing it at all, not whether we + change just read or write. On macOS, the main cost seems to be the + TLB flush, which affects wall-clock time of everything on the + processor! + + +References +---------- + +.. [RB_1995-11-29] "Shield protocol for barriers"; Richard Brooksby; + Harlequin; 1995-11-29; + . + +.. [RB_1995-11-30] "Exegesis of Incremental Tracing"; Richard Brooksby; + Harlequin; 1995-11-30; + . + + +Document History +---------------- + +- 2006-12-19 Richard Kistruck. Created: Guide, plus links to initial + ideas. + +- 2007-01-04 Richard Kistruck. Minor text changes for clarity. + +- 2007-01-12 Richard Kistruck. ``ShieldEnter()`` and ``ShieldLeave()`` + are called by ``ArenaEnter()`` and ``ArenaLeave()`` respectively. + +- 2013-05-24 GDR_ Converted to reStructuredText. + +- 2016-03-17 RB_ Updated for dynamic queueing and general code tidying + that has removed complaints. + +- 2016-03-19 RB_ Updated for separate queued flag on segments, changes + of invariants, cross-references, and ideas for future improvement. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/sig.txt b/mps/design/sig.txt new file mode 100644 index 00000000000..3d19e4af1fb --- /dev/null +++ b/mps/design/sig.txt @@ -0,0 +1,243 @@ +.. mode: -*- rst -*- + +Signatures in the MPS +===================== + +:Tag: design.mps.sig +:Author: Richard Brooksby +:Organization: Ravenbrook Limited +:Date: 2013-05-09 +:Revision: $Id$ +:Readership: MPS developers, developers +:Copyright: See section `Copyright and License`_. +:Index terms: + pair: structure signatures; design + single: signatures + +.. TODO: Use RFC-2119 keywords. + + +Introduction +------------ + +Integrity of data structures is absolutely critical to the cost of +deploying the Memory Pool System. Memory corruption and memory +management bugs are incredibly hard to detect and debug, often +manifesting themselves hours or days after they occur. One of the key +ways the MPS detects corruption or the passing of illegal data is using +*signatures*. This simple technique has proved invaluable at catching +defects early. + +This document is based on [RB_1995-08-25]_. + + +Overview +-------- + +_`.overview`: Signatures are `magic numbers`_ which are written into +structures when they are created and invalidated (by overwriting with +``SigInvalid``) when they are destroyed. They provide a limited form +of run-time type checking and dynamic scope checking. They are a +simplified form of "Structure Marking", a technique used in the +Multics filesystem [THVV_1995]_. + +.. _`magic numbers`: https://en.wikipedia.org/wiki/Magic_number_(programming) + + +Definitions +----------- + +_`.field`: Nearly every structure should start with a field of type +``Sig`` with the name ``sig``. For example:: + + typedef struct mps_message_s { + Sig sig; /* design.mps.sig.field */ + Arena arena; /* owning arena */ + MessageClass class; /* Message Class Structure */ + Clock postedClock; /* mps_clock() at post time, or 0 */ + RingStruct queueRing; /* Message queue ring */ + } MessageStruct; + +_`.value`: There must also be a definition for the valid value for +that signature:: + + #define MessageSig ((Sig)0x5193e559) /* SIG MESSaGe */ + +_`.value.unique`: The hex value should be unique to the structure +type. (See `.test.uniq`_ for a method of ensuring this.) + +_`.value.hex`: This is a 32-bit hex constant, spelled using *hex +transliteration* according to `guide.hex.trans`_:: + + ABCDEFGHIJKLMNOPQRSTUVWXYZ + ABCDEF9811C7340BC6520F3812 + +.. _guide.hex.trans: guide.hex.trans.rst + +_`.value.hex.just`: Hex transliteration allows the structure to be +recognised when looking at memory in a hex dump or memory window, or +found using memory searches. + +_`.field.end`: In some circumstances the signature should be placed at +the end of the structure. + +_`.field.end.outer`: When a structure extends an *inner structure* +that already has a signature, it is good practice to put the signature +for the outer structure at the end. This gives some extra fencepost +checking. For example:: + + typedef struct MVFFStruct { /* MVFF pool outer structure */ + PoolStruct poolStruct; /* generic structure */ + LocusPrefStruct locusPrefStruct; /* the preferences for allocation */ + ... + Sig sig; /* design.mps.sig.field.end.outer */ + } MVFFStruct; + + +Init and Finish +--------------- + +_`.init`: When the structure is initialised, the signature is +initialised as the *last* action, just before validating it. (Think +of it as putting your signature at the bottom of a document to say +it's done.) This ensures that the structure will appear invalid until +it is completely initialized and ready to use. For example:: + + void MessageInit(...) { + ... + message->arena = arena; + message->class = class; + RingInit(&message->queueRing); + message->postedClock = 0; + message->sig = MessageSig; + AVERT(Message, message); + } + +_`.finish`: When the structure is finished, the signature is +invalidated just after checking the structure, before finishing any of +other fields. This ensures that the structure appears invalid while +it is being torn down and can't be used after. For example:: + + void MessageFinish(Message message) + { + AVERT(Message, message); + AVER(RingIsSingle(&message->queueRing)); + + message->sig = SigInvalid; + RingFinish(&message->queueRing); + } + +_`.ambit`: Do not do anything else with signatures. See +`.rule.purpose`_. + + +Checking +-------- + +_`.check.arg`: Every function that takes a pointer to a signed +structure should check its argument. + +_`.check.arg.unlocked`: A function that does not hold the arena lock +should check the argument using ``AVER(TESTT(type, val))``, which +checks that ``val->sig`` is the correct signature for ``type``. + +_`.check.arg.locked`: A function that holds the arena lock should +check the argument using the ``AVERT`` macro. This macro has different +definitions depending on how the MPS is compiled (see +`design.mps.config.def.var`_). It may simply check the signature, or +call the full checking function for the structure. + +.. _design.mps.config.def.var: config.txt#def-var + +_`.check.sig`: The checking function for the structure should also +validate the signature as its first step using the ``CHECKS()`` macro +(see `design.mps.check.macro.sig `_). For example:: + + Bool MessageCheck(Message message) + { + CHECKS(Message, message); + CHECKU(Arena, message->arena); + CHECKD(MessageClass, message->class); + ... + +This combination makes it extremely difficult to get an object of the +wrong type, an uninitialized object, or a dead object, or a random +pointer into a function. + + +Rules +----- + +_`.rule.purpose`: **Do not** use signatures for any other purpose. +The code must function in exactly the same way (modulo defects) if +they are removed. For example, don't use them to make any actual +decisions within the code. They must not be used to discriminate +between structure variants (or union members). They must not be used +to try to detect *whether* a structure has been initialised or +finished. They are there to double-check whether these facts are +true. They lose their value as a consistency check if the code uses +them as well. + + +Tools +----- + +_`.test.uniq`: The Unix command:: + + sed -n '/^#define [a-zA-Z]*Sig/s/[^(]*(/(/p' code/*.[ch] | sort | uniq -c + +will display all signatures defined in the MPS along with a count of how +many times they are defined. If any counts are greater than 1, then the +same signature value is being used for different signatures. This is +undesirable and the problem should be investigated. + + +References +---------- + +.. [RB_1995-08-25] "design.mps.sig: The design of the Memory Pool System + Signature System"; Richard Brooksby; Harlequin; 1995-08-25; + . + +.. [THVV_1995] "Structure Marking"; Tom Van Vleck; 1995; + . + + +Document History +---------------- + +- 2013-05-09 RB_ Created based on scanty MM document [RB_1995-08-25]_. + +- 2023-03-09 RB_ Justified the use of signatures at the end of + structures (`.field.end`_). Updated markup and improved tagging. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2013–2023 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/sp.txt b/mps/design/sp.txt new file mode 100644 index 00000000000..1038ec7876b --- /dev/null +++ b/mps/design/sp.txt @@ -0,0 +1,213 @@ +.. mode: -*- rst -*- + +Stack probe +=========== + +:Tag: design.mps.sp +:Author: Gareth Rees +:Date: 2014-10-23 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: stack probe; design + + +Introduction +------------ + +_`.intro`: This is the design of the stack probe module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: This module ensures that the stack cannot overflow while +the MPS is holding a lock, so that a mutator can handle stack overflow +faults and call into the MPS from the handler. + + +Requirements +------------ + +_`.req.overflow`: The mutator should be able to call into the MPS from +a stack overflow fault handler. (This is a convenient way to handle +stack overflows in dynamic language implementations: if the stack +overflow exception and associated backtrace are to be represented as +objects, this may require allocation, and hence a call into the MPS.) + +_`.req.complete`: In an application where the mutator might call into +the MPS from a stack overflow fault handler, then whenever the MPS +takes a lock, it must complete the operation and release the lock +without running out of stack. (This is because running out of stack +would cause a stack overflow fault, causing the mutator to enter the +MPS recursively, which would fail because the lock is held.) + + +Design +------ + +_`.sol.probe`: Before taking the arena lock in ``ArenaEnterLock()``, +the MPS *probes* the stack: that is, it checks whether there are at +least ``StackProbeDEPTH`` words available, and provokes a stack +overflow fault if there are not. (This ensures that the fault occurs +outside of the arena lock where it can be handled safely.) + +_`.sol.depth`: The configuration parameter ``StackProbeDEPTH`` +specifies the maximum number of words of stack that the MPS might use. +(It is simpler, faster, and more reliable, to determine this globally +than to try to figure it out dynamically.) + +_`.sol.depth.constraint`: Operating systems typically use a single +"guard page" to detect stack overflow and grow the stack. (See for +example the documentation for Windows_.) This means that the probe +will be ineffective if it skips over the guard page into the memory +beyond. If ``StackProbeDEPTH`` is greater than or equal to the number +of words per page, the implementation might need to carry out multiple +probes. (This constraint is checked in ``MPMCheck()``.) + +.. _Windows: https://docs.microsoft.com/en-us/windows/desktop/procthread/thread-stack-size + +_`.sol.depth.no-recursion`: In order to implement this design, the MPS +must have constant bounded stack depth, and therefore, no recursion. + +_`.sol.depth.analysis`: Here's a table showing a deep call into the +MPS (in the master sources at changelevel 187378), starting in +``ArenaAccess()`` at the point where the arena ring lock is taken. The +access forces a scan of a segment in an AMC pool, which fixes a +reference to an object in an AMC pool's oldspace, which has to be +forwarded, and this overflows the forwarding buffer, which requires +the arena to allocate a new buffer in an appropriate zone, by +searching the splay tree representing free memory. + +The "Args" column gives the number of arguments to the function (all +arguments to functions in the MPS are word-sized or smaller, since we +prohibit passing structures by value), and the "Locals" column gives +the number of words in local variables. The value "≤64" for the stack +usage of the object format's scan method is the limit that's +documented in the manual. + +==== ====== ======================== +Args Locals Function +==== ====== ======================== + 5 0 ``SegAccess()`` + 5 0 ``SegWholeAccess()`` + 3 8 ``TraceSegAccess()`` + 4 1 ``traceScanSeg()`` + 4 9 ``traceScanSegRes()`` + 4 0 ``SegScan()`` + 4 5 ``amcSegScan()`` + 3 0 ``TraceScanFormat()`` + 3 ≤64 ``format->scan()`` + 3 0 ``SegFix()`` + 4 15 ``amcSegFix()`` + 3 5 ``BufferFill()`` + 5 11 ``AMCBufferFill()`` + 5 73 ``PoolGenAlloc()`` + 6 5 ``SegAlloc()`` + 4 4 ``ArenaAlloc()`` + 5 6 ``PolicyAlloc()`` + 6 10 ``ArenaFreeLandAlloc()`` + 7 1 ``LandFindInZones()`` + 7 16 ``cbsFindInZones()`` + 5 3 ``cbsFindFirst()`` + 6 7 ``SplayFindFirst()`` + 3 7 ``SplaySplay()`` + 4 8 ``SplaySplitDown()`` + 3 0 ``SplayZig()`` + 111 ≤258 **Total** +==== ====== ======================== + +We expect that a compiler will not need to push all local variables +onto the stack, but even in the case where it pushes all of them, this +call requires no more than 369 words of stack space. + +This isn't necessarily the deepest call into the MPS (the MPS's +modular design and class system makes it hard to do a complete +analysis using call graph tools), but it's probably close. The value +for ``StackProbeDEPTH`` is thus chosen to be a round number that's +comfortably larger than this. + + +Interface +--------- + +``void StackProbe(Size depth)`` + +_`.if.probe`: If there are at least ``depth`` words of stack +available, return. If not, provoke a stack overflow fault. + + +Issues +------ + +_`.issue.an`: The generic implementation is non-functional. This means +that it is only suitable for use with programs that do not handle +stack overflow faults, or do not call into the MPS from the handler. +This is because our customers have only required `.req.overflow`_ on +Windows so far. If this becomes a requirement on other platforms, the +following Standard C implementation might work:: + + void StackProbe(Size depth) { + volatile Word w; + Word *p = &w - depth; + w = *p; + } + +The use of ``volatile`` here is to prevent compilers from warning +about the variable ``w`` being written but never read, or worse, +optimizing away the whole statement under the "as if" rule. + + +Implementations +--------------- + +_`.impl.an`: Generic implementation in ``span.c``. This implementation +does nothing. See `.issue.an`_. + +_`.impl.w3i3`: Implementation for Windows on IA-32 in ``spw3i3.c``. +This uses assembly to get the stack pointer (from the ESP register) +and to read the location ``depth`` words below the stack pointer. + +_`.impl.w3i6`: Implementation for Windows on x86-64 in ``spw3i6.c``. +This passes the argument ``depth*sizeof(Word)`` to the Windows +function |alloca|_, for which the documentation says, "A stack +overflow exception is generated if the space cannot be allocated." + +.. |alloca| replace:: ``_alloca()`` +.. _alloca: https://docs.microsoft.com/en-gb/cpp/c-runtime-library/reference/alloca + + +Document History +---------------- + +- 2014-10-23 GDR_ Initial draft. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/splay-assemble.svg b/mps/design/splay-assemble.svg new file mode 100644 index 00000000000..45956b496ea --- /dev/null +++ b/mps/design/splay-assemble.svg @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + R + + assemble + + x + y + + L + + + A + + B + + + R + x + + L + + + + diff --git a/mps/design/splay-link-left.svg b/mps/design/splay-link-left.svg new file mode 100644 index 00000000000..f94d145743a --- /dev/null +++ b/mps/design/splay-link-left.svg @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + B + + A + + + L + + linkleft + + x + y + + R + + + B + + A + + + L + + x + y + + R + + diff --git a/mps/design/splay-link-right.svg b/mps/design/splay-link-right.svg new file mode 100644 index 00000000000..95c8c31e0a0 --- /dev/null +++ b/mps/design/splay-link-right.svg @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + R + + linkright + + x + y + + L + + + A + + B + + + R + + x + y + + L + + diff --git a/mps/design/splay-rotate-left.svg b/mps/design/splay-rotate-left.svg new file mode 100644 index 00000000000..00ae646a735 --- /dev/null +++ b/mps/design/splay-rotate-left.svg @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + + C + + + + C + + B + + + + A + + rotateleft + + x + y + y + x + + diff --git a/mps/design/splay-rotate-right.svg b/mps/design/splay-rotate-right.svg new file mode 100644 index 00000000000..d2a02bc48bb --- /dev/null +++ b/mps/design/splay-rotate-right.svg @@ -0,0 +1,391 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + + C + + + + C + + B + + + + A + + rotateright + + x + y + y + x + + diff --git a/mps/design/splay.txt b/mps/design/splay.txt new file mode 100644 index 00000000000..3460161a79e --- /dev/null +++ b/mps/design/splay.txt @@ -0,0 +1,981 @@ +.. mode: -*- rst -*- + +Splay trees +=========== + +:Tag: design.mps.splay +:Author: Gavin Matthews +:Date: 1998-05-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: splay trees; design + + +Introduction +------------ + +_`.intro`: This document explains the design of impl.c.splay, an +implementation of Splay Trees, including its interface and +implementation. + +_`.readership`: This document is intended for any MM developer. + +_`.source`: The primary sources for this design are [ST85]_ and +[Sleator96]_. As CBS is a client, design.mps.cbs_. As PoolMVFF is an +indirect client, design.mps.poolmvff_. Also, as PoolMVT is an indirect +client, design.mps.poolmvt_. + +.. _design.mps.cbs: cbs +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff + +_`.background`: The following background documents influence the design: +guide.impl.c.adt(0). + + +Overview +-------- + +_`.overview`: Splay trees are a form of binary tree where each access +brings the accessed element (or the nearest element) to the root of +the tree. The restructuring of the tree caused by the access gives +excellent amortised performance, as the splay tree adapts its shape to +usage patterns. Unused nodes have essentially no time overhead. + + +Definitions +----------- + +_`.def.splay-tree`: A *splay tree* is a self-adjusting binary tree as +described in [ST85]_ and [Sleator96]_. + +_`.def.node`: A *node* is used in the typical data structure sense to +mean an element of a tree (see also `.type.tree`_). + +_`.def.key`: A *key* is a value associated with each node; the keys +are totally ordered by a client provided comparator. + +_`.def.comparator`: A *comparator* is a function that compares keys to +determine their ordering (see also `.type.tree.compare.function`_). + +_`.def.successor`: Node *N*\ :subscript:`2` is the *successor* of node +*N*\ :subscript:`1` if *N*\ :subscript:`1` and *N*\ :subscript:`2` are +both in the same tree, and the key of *N*\ :subscript:`2` immediately +follows the key of *N*\ :subscript:`1` in the ordering of all keys for +the tree. + +_`.def.left-child`: Each node *N* contains a *left child*, which is a +(possibly empty) sub-tree of nodes. The key of *N* is ordered after +the keys of all nodes in this sub-tree. + +_`.def.right-child`: Each node *N* contains a *right child*, which is +a (possibly empty) sub-tree of nodes. The key of *N* is ordered before +the keys of all nodes in this sub-tree. + +_`.def.neighbour`: The *left neighbour* of a key *K* is the node *N* +with the largest key that compares less than *K* in the total order. +The *right neighbour* of a key *K* is the node *N* with the smaller +key that compares greater than *K* in the total order. A node is a +*neighbour* of a key if it is either the left or right neighbour of +the key. + +_`.def.first`: A node is the *first* node in a set of nodes if its key +compares less than the keys of all other nodes in the set. + +_`.def.last`: A node is the *last* node in a set of nodes if its key +compares greater than the keys of all other nodes in the set. + +_`.def.client-property`: A *client property* is a value that the +client may associate with each node in addition to the key (a block +size, for example). This splay tree implementation provides support +for efficiently finding the first or last nodes with suitably large +client property values. See also `.prop`_ below. + + +Requirements +------------ + +_`.req`: These requirements are drawn from those implied by +design.mps.poolmvt_, design.mps.poolmvff_, design.mps.cbs_, and +general inferred MPS requirements. + +_`.req.order`: Must maintain a set of abstract keys which is totally +ordered for a comparator. + +_`.req.fast`: Common operations must have low amortized cost. + +_`.req.add`: Must be able to add new nodes. This is a common +operation. + +_`.req.remove`: Must be able to remove nodes. This is a common +operation. + +_`.req.locate`: Must be able to locate a node, given a key. This is +a common operation. + +_`.req.neighbours`: Must be able to locate the neighbouring nodes of a +key (see `.def.neighbour`_). This is a common operation. + +_`.req.iterate`: Must be able to iterate over all nodes in key order +with reasonable efficiency. + +_`.req.protocol`: Must support detection of protocol violations. + +_`.req.debug`: Must support debugging of clients. + +_`.req.stack`: Must do all non-debugging operations with stack usage +bounded by a constant size. + +_`.req.adapt`: Must adapt to regularities in usage pattern, for better +performance. + +_`.req.property`: Must permit a client to associate a client property +(such as a size) with each node in the tree. + +_`.req.property.change`: Must permit a client to dynamically reassign +client properties to nodes in the tree. This is a common operation. + +_`.req.property.find`: Must support rapid finding of the first and +last nodes which have a suitably large value for their client +property. This is a common operation. + +_`.req.root`: Must be able to find the root of a splay tree (if one +exists). + + +Generic binary tree interface +----------------------------- + +Types +..... + +``typedef struct TreeStruct *Tree`` + +_`.type.tree`: ``Tree`` is the type of a node in a binary tree. +``Tree`` contains no fields to store the key associated with the node, +or the client property. Again, it is intended that the ``TreeStruct`` +can be embedded in another structure, and that this is how the +association will be made (see `.usage.client-node`_ for an example). +No convenience functions are provided for allocation or deallocation. + +``typedef void *TreeKey`` + +_`.type.treekey`: ``TreeKey`` is the type of a key associated with a +node in a binary tree. It is an alias for ``void *`` but expresses the +intention. + +``typedef TreeKey (*TreeKeyFunction)(Tree tree)`` + +_`.type.tree.key.function`: A function of type ``TreeKey`` returns the +key associated with a node in a binary tree. (Since there is no space +in a ``TreeStruct`` to store a key, it is expected that the +``TreeStruct`` is embedded in another structure from which the key can +be extracted.) + +``typedef Compare (*TreeCompareFunction)(Tree tree, TreeKey key)`` + +_`.type.tree.compare.function`: A function of type ``TreeCompareFunction`` is +required to compare ``key`` with the key the client associates with +that splay tree node ``tree``, and return the appropriate Compare +value (see `.usage.compare`_ for an example). The function compares a +key with a node, rather than a pair of keys or nodes as might seem +more obvious. This is because the details of the mapping between nodes +and keys is left to the client (see `.type.tree`_), and the splaying +operations compare keys with nodes (see `.impl.splay`_). + +``typedef Res (*TreeDescribeFunction)(Tree tree, mps_lib_FILE *stream)`` + +_`.type.tree.describe.function`: A function of type +``TreeDescribeFunction`` is required to write (via ``WriteF()``) a +client-oriented representation of the splay node. The output should be +non-empty, short, and without newline characters. This is provided for +debugging only. + + +Functions +......... + +``Bool TreeCheck(Tree tree)`` + +_`.function.tree.check`: This is a check function for the +``Tree`` type (see guide.impl.c.adt.method.check and +design.mps.check_). + +.. _design.mps.check: check + + +Splay tree interface +-------------------- + +Types +..... + +``typedef struct SplayTreeStruct *SplayTree`` + +_`.type.splay.tree`: ``SplayTree`` is the type of the main object at +the root of the splay tree. It is intended that the +``SplayTreeStruct`` can be embedded in another structure (see +`.usage.client-tree`_ for an example). No convenience functions are +provided for allocation or deallocation. + +``typedef Bool (*SplayTestNodeFunction)(SplayTree splay, Tree tree, void *closure)`` + +_`.type.splay.test.node.function`: A function of type +``SplayTestNodeFunction`` required to determine whether the node itself +meets some client determined property (see `.prop`_ and +`.usage.test.node`_ for an example). The ``closure`` +parameter describes the environment for the function (see +`.function.splay.find.first`_ and `.function.splay.find.last`_). + +``typedef Bool (*SplayTestTreeFunction)(SplayTree splay, Tree tree, void *closure)`` + +_`.type.splay.test.tree.function`: A function of type +``SplayTestTreeFunction`` is required to determine whether any of the +nodes in the sub-tree rooted at the given node meet some client +determined property (see `.prop`_ and `.usage.test.tree`_ for an +example). In particular, it must be a precise (not conservative) +indication of whether there are any nodes in the sub-tree for which +the ``testNode`` function (see `.type.splay.test.node.function`_) would +return ``TRUE``. The ``closure`` parameter describes the +environment for the function (see `.function.splay.find.first`_ and +`.function.splay.find.last`_). + +``typedef void (*SplayUpdateNodeFunction)(SplayTree splay, Tree tree)`` + +_`.type.splay.update.node.function`: A function of type +``SplayUpdateNodeFunction`` is required to update any client data +structures associated with a node to maintain some client determined +property (see `.prop`_) given that the children of the node have +changed. (See `.usage.callback`_ for an example) + + +Functions +......... + +_`.function.no-thread`: The interface functions are not designed to be +either thread-safe or re-entrant. Clients of the interface are +responsible for synchronization, and for ensuring that client-provided +functions invoked by the splay module (`.type.tree.compare.function`_, +`.type.tree.key.function`_, `.type.splay.test.node.function`_, +`.type.splay.test.tree.function`_, `.type.splay.update.node.function`_) do +not call functions of the splay module. + +``Bool SplayTreeCheck(SplayTree splay)`` + +_`.function.splay.tree.check`: This is a check function for the +``SplayTree`` type (see guide.impl.c.adt.method.check and +design.mps.check_). + +``void SplayTreeInit(SplayTree splay, TreeCompareFunction compare, TreeKeyFunction nodeKey, SplayUpdateNodeFunction updateNode)`` + +_`.function.splay.tree.init`: This function initialises a +``SplayTree`` (see guide.impl.c.adt.method.init). The ``nodeKey`` +function extracts a key from a tree node, and the ``compare`` function +defines a total ordering on keys of nodes (see `.req.order`_). The +effect of supplying a compare function that does not implement a total +ordering is undefined. The ``updateNode`` function is used to keep +client properties up to date when the tree structure changes; the +value ``SplayTrivUpdate`` may be used for this function if there is no +need to maintain client properties. (See `.usage.initialization`_ for +an example use). + +``void SplayTreeFinish(SplayTree splay)`` + +_`.function.splay.tree.finish`: This function clears the fields of a +``SplayTree`` (see guide.impl.c.adt.method.finish). Note that it does +not attempt to finish or deallocate any associated ``Tree`` +objects; clients wishing to destroy a non-empty ``SplayTree`` must +first explicitly descend the tree and call ``TreeFinish()`` on +each node from the bottom up. + +``Bool SplayTreeInsert(SplayTree splay, Tree tree, void *key)`` + +_`.function.splay.tree.insert`: This function is used to insert into a +splay tree a new node which is associated with the supplied key (see +`.req.add`_). It first splays the tree at the key. If an attempt is +made to insert a node that compares ``CompareEQUAL`` to an existing +node in the tree, then ``FALSE`` will be returned and the node will +not be inserted. (See `.usage.insert`_ for an example use). + +``Bool SplayTreeDelete(SplayTree splay, Tree tree, void *key)`` + +_`.function.splay.tree.delete`: This function is used to delete from a +splay tree a node which is associated with the supplied key (see +`.req.remove`_). If the tree does not contain the given node, or the +given node does not compare ``CompareEQUAL`` with the given key, then +``FALSE`` will be returned, and the node will not be deleted. The +function first splays the tree at the given key. (See `.usage.delete`_ +for an example use). + +``Bool SplayTreeFind(Tree *nodeReturn, SplayTree splay, TreeKey key)`` + +_`.function.splay.tree.find`: Search the splay tree for a node that +compares ``CompareEQUAL`` to the given key (see `.req.locate`_), and +splay the tree at the key. Return ``FALSE`` if there is no such node +in the tree, otherwise set ``*nodeReturn`` to the node and return +``TRUE``. + +``Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, SplayTree splay, TreeKey key)`` + +_`.function.splay.tree.neighbours`: Search a splay tree for the two +nodes that are the neighbours of the given key (see +`.req.neighbours`_). Splay the tree at the key. If any node in the +tree compares ``CompareEQUAL`` with the given key, return ``FALSE``. +Otherwise return ``TRUE``, set ``*leftReturn`` to the left neighbour +of the key (or ``TreeEMPTY`` if the key has no left neighbour), and +set ``*rightReturn`` to the right neighbour of the key (or +``TreeEMPTY`` if the key has no right neighbour). See `.usage.insert`_ +for an example of use. + +``Tree SplayTreeFirst(SplayTree splay)`` + +_`.function.splay.tree.first`: If the tree has no nodes, return +``TreeEMPTY``. Otherwise, splay the tree at the first node, and return +that node (see `.req.iterate`_). + +``Tree SplayTreeNext(SplayTree splay, TreeKey key)`` + +_`.function.splay.tree.next`: If the tree contains a right neighbour +for ``key``, splay the tree at that node and return it. Otherwise +return ``TreeEMPTY``. See `.req.iterate`_. + +``Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth, TreeDescribeFunction nodeDescribe)`` + +_`.function.splay.tree.describe`: This function prints (using +``WriteF()``) to the stream a textual representation of the given +splay tree, using ``nodeDescribe`` to print client-oriented +representations of the nodes (see `.req.debug`_). Provided for +debugging only. + +``Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, SplayTestNodeFunction testNode, SplayTestTreeFunction testTree, void *closure)`` + +_`.function.splay.find.first`: Find the first node in the tree that +satisfies some client property, as determined by the ``testNode`` and +``testTree`` functions (see `.req.property.find`_). ``closure`` +is an arbitrary value, and is passed to the ``testNode`` +and ``testTree`` functions. If there is no satisfactory node, return ``FALSE``; +otherwise set ``*nodeReturn`` to the node and return ``TRUE``. See +`.usage.delete`_ for an example. + +``Bool SplayFindLast(Tree *nodeReturn, SplayTree splay, SplayTestNodeFunction testNode, SplayTestTreeFunction testTree, void *closure)`` + +_`.function.splay.find.last`: As ``SplayFindFirst()``, but find the +last node in the tree that satisfies the client property. + +``void SplayNodeRefresh(SplayTree splay, Tree tree, TreeKey key)`` + +_`.function.splay.node.refresh`: Call the ``updateNode`` function on the +given node, and on any other nodes that may require updating. The +client key for the node must also be supplied; the function splays the +tree at this key. (See `.usage.insert`_ for an example use). This +function must be called whenever the client property (see `.prop`_) at +a node changes (see `.req.property.change`_). + +``void SplayNodeUpdate(SplayTree splay, Tree node)`` + +_`.function.splay.node.update`: Call the ``updateNode`` function on the +given node, but leave other nodes unchanged. This may be called when a +new node is created, to get the client property off the ground. + + +Client-determined properties +---------------------------- + +_`.prop`: To support `.req.property.find`_, this splay tree +implementation provides additional features to permit clients to cache +maximum (or minimum) values of client properties for all the nodes in +a subtree. The splay tree implementation uses the cached values as +part of ``SplayFindFirst()`` and ``SplayFindLast()`` via the +``testNode`` and ``testTree`` functions. The client is free to choose +how to represent the client property, and how to compute and store the +cached value. + +_`.prop.update`: The cached values depend upon the topology of the +tree, which may vary as a result of operations on the tree. The client +is given the opportunity to compute new cache values whenever +necessary, via the ``updateNode`` function (see +`.function.splay.tree.init`_). This happens whenever the tree is +restructured. The client may use the ``SplayNodeRefresh()`` function to +indicate that the client attributes at a node have changed (see +`.req.property.change`_). A call to ``SplayNodeRefresh()`` splays the +tree at the specified node, which may provoke calls to the +``updateNode`` function as a result of the tree restructuring. The +``updateNode`` function will also be called whenever a new splay node is +inserted into the tree. + +_`.prop.example`: For example, if implementing an address-ordered tree +of free blocks using a splay tree, a client might choose to use the +base address of each block as the key for each node, and the size of +each block as the client property. The client can then maintain as a +cached value in each node the size of the largest block in the subtree +rooted at that node. This will permit a fast search for the first or +last block of at least a given size. See `.usage.callback`_ for an +example ``updateNode`` function for such a client. + +_`.prop.ops`: The splay operations must cause client properties for +nodes to be updated in the following circumstances (see `.impl`_ for +details): + +_`.prop.ops.rotate`: rotate left, rotate right -- We need to update +the value at the original root, and the new root, in that order. + +_`.prop.ops.link`: link left, link right -- We know that the line of +right descent from the root of the left tree and the line of left +descent from the root of the right tree will both need to be updated. +This is performed at the assembly stage. (We could update these chains +every time we do a link left or link right instead, but this would be +less efficient) + +_`.prop.ops.assemble`: assemble -- This operation also invalidates the +lines of right and left descent of the left and right trees +respectively which need to be updated (see below). It also invalidates +the root which must be updated last. + +_`.prop.ops.assemble.reverse`: To correct the chains of the left and +right trees without requiring stack or high complexity, we use a +judicious amount of pointer reversal. + +_`.prop.ops.assemble.traverse`: During the assembly, after the root's +children have been transplanted, we correct the chains of the left and +right trees. For the left tree, we traverse the right child line, +reversing pointers, until we reach the node that was the last node +prior to the transplantation of the root's children. Then we update +from that node back to the left tree's root, restoring pointers. +Updating the right tree is the same, mutatis mutandis. + + +Usage +----- + +_`.usage`: Here's a simple example of a client which uses a splay tree +to implement an address ordered tree of free blocks. The significant +client usages of the splay tree interface might look as follows:- + +_`.usage.client-tree`: Tree structure to embed a ``SplayTree`` (see +`.type.splay.tree`_):: + + typedef struct FreeTreeStruct { + SplayTreeStruct splayTree; /* Embedded splay tree */ + /* no obvious client fields for this simple example */ + } FreeTreeStruct; + +_`.usage.client-node`: Node structure to embed a ``Tree`` (see `.type.tree`_):: + + typedef struct FreeBlockStruct { + TreeStruct treeStruct; /* embedded splay node */ + Addr base; /* base address of block is also the key */ + Size size; /* size of block is also the client property */ + Size maxSize; /* cached value for maximum size in subtree */ + } FreeBlockStruct; + +_`.usage.callback`: ``updateNode`` callback function (see +`.type.splay.update.node.function`_):: + + void FreeBlockUpdateNode(SplayTree splay, Tree tree) + { + /* Compute the maximum size of any block in this subtree. */ + /* The value to cache is the maximum of the size of this block, */ + /* the cached value for the left subtree (if any) and the cached */ + /* value of the right subtree (if any) */ + + FreeBlock freeNode = FreeBlockOfTree(tree); + + Size maxSize = freeNode.size; + + if (TreeHasLeft(tree)) { + FreeBlock leftNode = FreeBlockOfTree(TreeLeft(tree)); + if(leftNode.maxSize > maxSize) + maxSize = leftNode->maxSize; + } + + if (TreeHasRight(tree)) { + FreeBlock rightNode = FreeBlockOfTree(TreeRight(tree)); + if(rightNode.maxSize > maxSize) + maxSize = rightNode->maxSize; + } + + freeNode->maxSize = maxSize; + } + +_`.usage.compare`: Comparison function (see `.type.tree.compare.function`_):: + + Compare FreeBlockCompare(Tree tree, TreeKey key) { + Addr base1, base2, limit2; + FreeBlock freeNode = FreeBlockOfTree(tree); + + base1 = (Addr)key; + base2 = freeNode->base; + limit2 = AddrAdd(base2, freeNode->size); + + if (base1 < base2) + return CompareLESS; + else if (base1 >= limit2) + return CompareGREATER; + else + return CompareEQUAL; + } + +_`.usage.test.tree`: Test tree function (see +`.type.splay.test.tree.function`_):: + + Bool FreeBlockTestTree(SplayTree splay, Tree tree, + void *closure) { + /* Closure environment has wanted size as value of *closure. */ + /* Look at the cached value for the node to see if any */ + /* blocks in the subtree are big enough. */ + + Size size = *(Size *)closure; + FreeBlock freeNode = FreeBlockOfTree(tree); + return freeNode->maxSize >= size; + } + +_`.usage.test.node`: Test node function (see +`.type.splay.test.node.function`_):: + + Bool FreeBlockTestNode(SplayTree splay, Tree tree, + void *closure) { + /* Closure environment has wanted size as value of *closure. */ + /* Look at the size of the node to see if is big enough. */ + + Size size = *(Size *)closure; + FreeBlock freeNode = FreeBlockOfTree(tree); + return freeNode->size >= size; + } + +_`.usage.initialization`: Client's initialization function (see +`.function.splay.tree.init`_):: + + void FreeTreeInit(FreeTree freeTree) { + /* Initialize the embedded splay tree. */ + SplayTreeInit(&freeTree->splayTree, FreeBlockCompare, FreeBlockUpdateNode); + } + +_`.usage.insert`: Client function to add a new free block into the +tree, merging it with an existing block if possible:: + + void FreeTreeInsert(FreeTree freeTree, Addr base, Addr limit) { + SplayTree splayTree = &freeTree->splayTree; + Tree leftNeighbour, rightNeighbour; + TreeKey key = base; /* use the base of the block as the key */ + Res res; + + /* Look for any neighbouring blocks. (.function.splay.tree.neighbours) */ + res = SplayTreeNeighbours(&leftNeighbour, &rightNeighbour, + splayTree, key); + AVER(res == ResOK); /* this client doesn't duplicate free blocks */ + + /* Look to see if the neighbours are contiguous. */ + + if (leftNeighbour != TreeEMPTY && + FreeBlockLimitOfSplayNode(leftNeighbour) == base) { + /* Inserted block is contiguous with left neighbour, so merge it. */ + /* The client housekeeping is left as an exercise to the reader. */ + /* This changes the size of a block, which is the client */ + /* property of the splay node. See `.function.splay.node.refresh`_ */ + SplayNodeRefresh(splayTree, leftNeighbour, key); + + } else if (rightNeighbour != TreeEMPTY && + FreeBlockBaseOfSplayNode(rightNeighbour) == limit) { + /* Inserted block is contiguous with right neighbour, so merge it. */ + /* The client housekeeping is left as an exercise to the reader. */ + /* This changes the size of a block, which is the client */ + /* property of the splay node. See `.function.splay.node.refresh`_ */ + SplayNodeRefresh(splayTree, rightNeighbour, key); + + } else { + /* Not contiguous - so insert a new node */ + FreeBlock newBlock = (FreeBlock)allocate(sizeof(FreeBlockStruct)); + Tree newTree = &newBlock->treeStruct; + + newBlock->base = base; + newBlock->size = AddrOffset(base, limit); + TreeInit(newTree); /* `.function.tree.init`_ */ + SplayNodeUpdate(splayTree, newTree); /* `.function.splay.node.update`_ */ + /* `.function.splay.tree.insert`_ */ + res = SplayTreeInsert(splayTree, newTree, key); + AVER(res == ResOK); /* this client doesn't duplicate free blocks */ + } + } + +_`.usage.delete`: Client function to allocate the first block of a +given size in address order. For simplicity, this allocates the entire +block:: + + Bool FreeTreeAllocate(Addr *baseReturn, Size *sizeReturn, + FreeTree freeTree, Size size) { + SplayTree splayTree = &freeTree->splayTree; + Tree splayNode; + Bool found; + + /* look for the first node of at least the given size. */ + /* closure parameter is not used. See `.function.splay.find.first.`_ */ + found = SplayFindFirst(&splayNode, splayTree, + FreeBlockTestNode, FreeBlockTestTree, + NULL, size); + + if (found) { + FreeBlock freeNode = FreeBlockOfTree(splayNode); + Void *key = (void *)freeNode->base; /* use base of block as the key */ + Res res; + + /* allocate the block */ + *baseReturn = freeNode->base; + *sizeReturn = freeNode->size; + + /* `.function.splay.tree.delete`_ */ + res = SplayTreeDelete(splayTree, splayNode, key); + AVER(res == ResOK); /* Must be possible to delete node */ + + /* Delete the block */ + deallocate(freeNode, (sizeof(FreeBlockStruct)); + + return TRUE; + + } else { + /* No suitable block */ + return FALSE; + } + } + + +Implementation +-------------- + +_`.impl`: For more details of how splay trees work, see [ST85]_. +For more details of how to implement operations on splay trees, see +[Sleator96]_. Here we describe the operations involved. + + +Top-down splaying +................. + +_`.impl.top-down`: The method chosen to implement the splaying +operation is called "top-down splay". This is described as "procedure +top-down splay" in [ST85]_, but the implementation here additionally +permits attempts to access items which are not known to be in the +tree. Top-down splaying is particularly efficient for the common case +where the location of the node in a tree is not known at the start of +an operation. Tree restructuring happens as the tree is descended, +whilst looking for the node. + +_`.impl.splay`: The key to the operation of the splay tree is the +internal function ``SplaySplay()``. It searches the tree for a node +with a given key. In the process, it brings the found node, or an +arbitrary neighbour if not found, to the root of the tree. This +"bring-to-root" operation is performed top-down during the search, and +it is not the simplest possible bring-to-root operation, but the +resulting tree is well-balanced, and will give good amortised cost for +future calls to ``SplaySplay()``. See [ST85]_. + +_`.impl.splay.how`: To perform this top-down splay, the tree is broken +into three parts, a left tree, a middle tree and a right tree. We +store the left tree and right tree in the right and left children +respectively of a "sides" node to eliminate some boundary conditions. +The initial condition is that the middle tree is the entire splay +tree, and the left and right trees are empty. We also keep pointers to +the last node in the left tree, and the first node in the right tree. +Note that, at all times, the three trees are each validly ordered, and +they form a partition with the ordering left, middle, right. The splay +is then performed by comparing the middle tree with the following six +cases, and performing the indicated operations, until none apply. + +_`.impl.splay.cases`: Note that figure 3 of [ST85]_ describes only 3 +cases: *zig*, *zig-zig* and *zig-zag*. The additional cases described +here are the symmetric variants which are respectively called *zag*, +*zag-zag* and *zag-zig*. In the descriptions of these cases, ``root`` +is the root of the middle tree; ``node->left`` is the left child of +``node``; ``node->right`` is the right child of ``node``. The +comparison operators (``<``, ``>``, ``==``) are defined to compare a +key and a node in the obvious way by comparing the supplied key with +the node's associated key. + +_`.impl.splay.zig`: The "zig" case is where ``key < root``, and +either: + +- ``key == root->left``; +- ``key < root->left && root->left->left == NULL``; or +- ``key > root->left && root->left->right == NULL``. + +The operation for the zig case is: link right (see +`.impl.link.right`_). + +_`.impl.splay.zag`: The "zag" case is where ``key > root``, and +either: + +- ``key == root->right``; +- ``key < root->right && root->right->left == NULL``; or +- ``key > root->right && root->right->right == NULL``. + +The operation for the zag case is: link left (see `.impl.link.left`_). + +_`.impl.splay.zig.zig`: The "zig-zig" case is where + +- ``key < root && key < root->left && root->left->left != NULL``. + +The operation for the zig-zig case is: rotate right (see +`.impl.rotate.right`_) followed by link right (see +`.impl.link.right`_). + +_`.impl.splay.zig.zag`: The "zig-zag" case is where + +- ``key < root && key > root->left && root->left->right != NULL``. + +The operation for the zig-zag case is: link right (see +`.impl.link.right`_) followed by link left (see `.impl.link.left`_). + +_`.impl.splay.zag.zig`: The "zag-zig" case is where + +- ``key > root && key < root->right && root->right->left != NULL``. + +The operation for the zag-zig case is: link left (see +`.impl.link.left`_) followed by link right (see `.impl.link.right`_). + +_`.impl.splay.zag.zag`: The "zag-zag" case is where + +- ``key > root && key > root->right && root->right->right != NULL``. + +The operation for the zag-zag case is: rotate left (see +`.impl.rotate.left`_) followed by link left (see `.impl.link.left`_). + +_`.impl.splay.terminal.null`: A special terminal case is when + +- ``root == NULL``. + +This can only happen at the beginning, and cannot arise from the +operations above. In this case, the splay operation must return +``NULL``, and "not found". + +_`.impl.splay.terminal.found`: One typical terminal case is when + +- ``key == root``. + +This case is tested for at the beginning, in which case "found" is +returned immediately. If this case happens as a result of other +operations, the splay operation is complete, the three trees are +assembled (see `.impl.assemble`_), and "found" is returned. + +_`.impl.splay.terminal.not-found`: The other typical terminal cases are: + +- ``key < root && root->left == NULL``; and +- ``key > root && root->right == NULL``. + +In these cases, the splay operation is complete, the three trees are +assembled (see `.impl.assemble`_), and "not found" is returned. + +_`.impl.rotate.left`: The "rotate left" operation (see [ST85]_ +figure 1) rearranges the middle tree as follows (where any of sub-trees +A, B and C may be empty): + +.. figure:: splay-rotate-left.svg + :align: center + :alt: Diagram: the rotate left operation. + +_`.impl.rotate.right`: The "rotate right" operation (see [ST85]_ +figure 1) rearranges the middle tree as follows (where any of sub-trees +A, B and C may be empty): + +.. figure:: splay-rotate-right.svg + :align: center + :alt: Diagram: the rotate right operation. + +_`.impl.link.left`: The "link left" operation (see [ST85]_ figure +11a for symmetric variant) rearranges the left and middle trees as +follows (where any of sub-trees A, B, L and R may be empty): + +.. figure:: splay-link-left.svg + :align: center + :alt: Diagram: the link left operation. + +The last node of the left tree is now x. + +_`.impl.link.right`: The "link right" operation (see [ST85]_ figure +11a) rearranges the middle and right trees as follows (where any of +sub-trees A, B, L and R may be empty): + +.. figure:: splay-link-right.svg + :align: center + :alt: Diagram: the link left operation. + +The first node of the right tree is now x. + +_`.impl.assemble`: The "assemble" operation (see [ST85]_ figure 12) +merges the left and right trees with the middle tree as follows (where +any of sub-trees A, B, L and R may be empty): + +.. figure:: splay-assemble.svg + :align: center + :alt: Diagram: the assemble operation. + + +Top-level operations +.................... + +_`.impl.insert`: ``SplayTreeInsert()``: (See [Sleator96]_, chapter +4, function insert). If the tree has no nodes, [how does it smell?] +add the inserted node and we're done; otherwise splay the tree around +the supplied key. If the splay successfully found a matching node, +return failure. Otherwise, add the inserted node as a new root, with +the old (newly splayed, but non-matching) root as its left or right +child as appropriate, and the opposite child of the old root as the +other child of the new root. + +_`.impl.delete`: ``SplayTreeDelete()``: (See [Sleator96]_, chapter +4, function delete). Splay the tree around the supplied key. Check +that the newly splayed root is the same node as given by the caller, +and that it matches the key; return failure if not. If the given node +(now at the root) has fewer than two children, replace it (as root), +with the non-null child or null. Otherwise, set the root of the tree +to be the left child (arbitrarily) of the node to be deleted, and +splay around the same key. The new root will be the last node in the +sub-tree and will have a null right child; this is set to be the right +child of the node to be deleted. + +_`.impl.find`: ``SplayTreeFind()``: Splay the node around the +supplied key. If the splay found a matching node, return it; otherwise +return failure. + +_`.impl.neighbours`: ``SplayTreeNeighbours()``: Splay the tree around +the supplied key. If the splay found a matching node, return failure. +Otherwise, determine whether the (non-matching) found node is the left +or right neighbour of the key (by comparison with the key). Set the +tree root to be the right or left child of that first neighbour +respectively, and again splay the tree around the supplied key. The +new root will be the second neighbour, and will have a null left or +right child respectively. Set this null child to be the first +neighbour. Return the two neighbours. + +_`.impl.neighbours.note`: Note that it would be possible to implement +``SplayTreeNeighbours()`` with only one splay, and then a normal +binary tree search for the left or right neighbour of the root. This +would be a cheaper operation, but would give poorer amortised cost if +the call to ``SplayTreeNeighbours()`` typically precedes a call to +``SplayTreeInsert()`` (which is expected to be a common usage +pattern - see `.usage.insert`_). It's also possible to implement +``SplayTreeNeighbours()`` by simply keeping track of both neighbours +during a single splay. This has about the same cost as a single splay, +and hence about the same amortised cost if the call to +``SplayTreeNeighbours()`` typically precedes a call to +``SplayTreeInsert()``. + +_`.impl.next`: ``SplayTreeNext()``: Splay the tree around the supplied +``oldKey``. During iteration the "old node" found is probably already +at the root, in which case this will be a null operation with little +cost. If this old node has no right child, return ``NULL``. Otherwise, +split the tree into a right tree (which contains just the right child +of the old node) and a left tree (which contains the old node, its +left child and no right child). The next node is the first node in the +right tree. Find this by splaying the right tree around ``oldKey`` +(which is known to compare ``CompareLESS`` than any keys in the right +tree). Rejoin the full tree, using the right tree as the root and +setting the left child of root to be the left tree. Return the root of +this tree. + + +Testing +------- + +_`.test`: There is no plan to test splay trees directly. It is +believed that the testing described in design.mps.cbs.test_ will be +sufficient to test this implementation. + +.. _design.mps.cbs.test: cbs#.test + + +Error Handling +-------------- + +_`.error`: This module detects and reports most common classes of +protocol error. The cases it doesn't handle will result in undefined +behaviour and probably cause an ``AVER`` to fire. These are: + +_`.error.bad-pointer`: Passing an invalid pointer in place of a +``SplayTree`` or ``Tree``. + +_`.error.bad-compare`: Initialising a ``SplayTree`` with a compare +function that is not a valid compare function, or which doesn't +implement a total ordering on splay nodes. + +_`.error.bad-describe`: Passing an invalid describe function to +``SplayTreeDescribe()``. + +_`.error.out-of-stack`: Stack exhaustion under ``SplayTreeDescribe()``. + + +Future +------ + +_`.future.parent`: The iterator could be made more efficient (in an +amortized sense) if it didn't splay at each node. To implement this +(whilst meeting `.req.stack`_) we really need parent pointers from the +nodes. We could use the (first-child, right-sibling/parent) trick +described in [ST85]_ to implement this, at a slight cost to all +other tree operations, and an increase in code complexity. [ST85]_ +doesn't describe how to distinguish the first-child between left-child +and right-child, and the right-sibling/parent between right-sibling +and parent. One could either use the comparator to make these +distinctions, or steal some bits from the pointers. + + +References +---------- + +.. [ST85] "Self-Adjusting Binary Search Trees"; Daniel Dominic Sleator, + Robert Endre Tarjan; AT&T Bell Laboratories, Murray Hill, NJ; 1985-07; + Journal of the ACM, Vol. 32, Num. 3, pp. 652-686, July 1985; + . + +.. [Sleator96] "Splay Trees"; Daniel Dominic Sleator; CMU, 22/02/96; + CMU 15-211; . + + +Document History +---------------- + +- 1998-05-01 Gavin Matthews. Initial draft. + +- 1998-09-09 Gavin Matthews. Added client properties. + +- 1999-03-10 David Jones. Polished for review (chiefly adding a + definitions section). + +- 1999-03-31 Tony Mann. Edited after review. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2014-02-22 RB_ Fixing abuses of Res and ResFAIL. + +- 2014-03-11 RB_ Updating in response to code review. Removing + .future.tree and .future.reverse, both now implemented. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/stack-scan-areas.svg b/mps/design/stack-scan-areas.svg new file mode 100644 index 00000000000..285cdad802b --- /dev/null +++ b/mps/design/stack-scan-areas.svg @@ -0,0 +1,553 @@ + + + + + + + 2016-03-08 09:44Z + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mutator stack + + + + mpsi frame + + + + reference + + + + + heap + object + + + + + callee-save register + + + + jmp_buf + + + + + internal MPS stack + + + + cold + + + warm + + + hot + + + + + StackScan frame + + + + + + + heap + object + + + + + + heap + object + + + + + + + scanned + area + + + + diff --git a/mps/design/stack-scan.txt b/mps/design/stack-scan.txt new file mode 100644 index 00000000000..e98621b079a --- /dev/null +++ b/mps/design/stack-scan.txt @@ -0,0 +1,371 @@ +.. mode: -*- rst -*- + +Stack and register scanning +=========================== + +:Tag: design.mps.stack-scan +:Author: Gareth Rees +:Date: 2014-10-22 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: stack and register scanning; design + + +Introduction +------------ + +_`.intro`: This is the design of the stack and register scanning +module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: This module locates and scans references in the control +stack and registers of the *current* thread (the one that has called +in to the MPS). + +_`.other`: The thread manager module is responsible for scanning the +control stack and registers of *other* threads. See +design.mps.thread-manager.if.scan_. + +.. _design.mps.thread-manager.if.scan: thread-manager#.if.scan + +_`.origin`: This design was originally proposed in +mail.richard.2012-08-03.14-36_. Calling conventions for supported +platforms are documented in [Fog]_ and [x86_64_registers]_. + +.. _mail.richard.2012-08-03.14-36: https://info.ravenbrook.com/mail/2012/08/03/14-36-35/0/ + + +Requirements +------------ + +_`.req.stack.hot`: Must locate the hot end of the mutator's stack. (This +is needed for conservative garbage collection of uncooperative code, +where references might be stored by the mutator on its stack.) + +_`.req.stack.cold.not`: There is no requirement to locate the cold end +of the stack. (The mutator supplies this as an argument to +``mps_root_create_thread()``.) + +_`.req.stack.platform`: Must support the platform's stack +conventions. + +_`.req.stack.platform.full-empty`: The implementation must take into +account whether the stack is *full* (the stack pointer points to the +last full location) or *empty* (the stack pointer points to the +first empty location). + +_`.req.stack.platform.desc-asc`: The implementation must take into +account whether the stack is *descending* (the hot end of the stack is +at a lower address than the cold end) or *ascending* (the hot end of +the stack is at a higher address than the cold end). + +_`.req.registers`: Must locate and scan all references in the +mutator's *root registers*, the subset of registers which might +contain references that do not also appear on the stack. (This is +needed for conservative garbage collection of uncooperative code, +where references might appear in registers.) + +_`.req.entry`: Should save the mutator's context (stack and registers) +at the point where it enters the MPS. (This avoids scanning registers +and stack that belong to the MPS rather than the mutator, leading to +unnecessary pinning and zone pollution; see job003525_.) + +.. _job003525: https://www.ravenbrook.com/project/mps/issue/job003525/ + +_`.req.setjmp`: The implementation must follow the C Standard in its +use of the ``setjmp()`` macro. (So that it is reliable and portable.) + +_`.req.assembly.not`: The implementation should not use assembly +language. (So that it can be developed in tools like Microsoft Visual +Studio that don't support this.) + + +Design +------ + +_`.sol.entry-points`: To meet `.req.entry`_, the mutator's registers +and stack must be recorded when the mutator enters the MPS, if there +is a possibility that the MPS might need to know the mutator context. + +_`.sol.entry-points.fragile`: The analysis of which entry points might +need to save the context (see `.analysis.entry-points`_ below) is fragile. +It might be incorrect now, or become incomplete if we refactor the +internals of tracing and polling. As a defence against errors of this +form, ``StackScan()`` asserts that the context was saved, but if the +client program continues from the assertion, it saves the context +anyway and continues. + +_`.sol.registers`: Implementations spill the root registers onto the +stack so that they can be scanned there. + +_`.sol.registers.root`: The *root registers* are the subset of the +callee-save registers that may contain pointers. + +_`.sol.registers.root.justify`: The caller-save registers will have +been spilled onto the stack by the time the MPS is entered, so will be +scanned by the stack scan. + +_`.sol.setjmp`: The values in callee-save registers can be found by +invoking ``setjmp()``. This forces any of the caller's callee-save +registers into either the ``jmp_buf`` or the current stack frame. + +_`.sol.setjmp.scan`: Although we might be able to decode the jump +buffer in a platform-dependent way, it's hard to guarantee that an +uncooperative compiler won't temporarily store a reference in any +register or stack location. We must conservatively scan the whole of +both. + +_`.sol.setjmp.justify`: The [C1990]_ standard specifies that +``jmp_buf``: + + is an array type suitable for holding the information needed to + restore a calling environment. The environment of a call to the + ``setjmp()`` macro consists of information sufficient for a call + to the ``longjmp()`` function to return execution to the correct + block and invocation of that block, were it called recursively. + +We believe that any reasonable implementation of ``setjmp()`` must +copy the callee-save registers either into the jump buffer or into the +stack frame that invokes it in order to work as described. Otherwise, +once the callee-save registers have been overwritten by other function +calls, a ``longjmp()`` would result in the callee-save registers +having the wrong values. A ``longjmp()`` can come from anywhere, and +so the function using ``setjmp()`` can't rely on callee-save registers +being saved by callees. + +_`.sol.stack.hot`: We could decode the frame of the function that +invokes ``setjmp()`` from the jump buffer in a platform-specific way, +but we can do something simpler (if more hacky) by calling the stub +function ``StackHot()`` which takes the address of its argument. So +long as this stub function is not inlined into the caller, then on all +supported platforms this yields a pointer that is pretty much at the +hot end of the frame. + +_`.sol.stack.hot.noinline`: The reason that ``StackHot()`` must not be +inlined is that after inlining, the compiler might place ``stackOut`` +at a colder stack address than the ``StackContextStruct``, causing the +latter not to be scanned. See `mail.gdr.2018-07-11.09-48`_. + +.. _mail.gdr.2018-07-11.09-48: https://info.ravenbrook.com/mail/2018/07/11/09-48-49/0/ + +_`.sol.stack.nest`: We can take care of scanning the jump buffer +itself by storing it in the same stack frame. That way a scan from the +hot end determined by `.sol.stack.hot`_ to the cold end will contain +all of the roots. + +_`.sol.stack.platform`: As of version 1.115, all supported platforms +are *full* and *descending* so the implementation in ``StackScan()`` +assumes this. New platforms must check this assumption. + +_`.sol.xc.alternative`: On macOS, we could use ``getcontext()`` from +libunwind (see here_), but that produces deprecation warnings and +introduces a dependency on that library. + +.. _here: https://stackoverflow.com/questions/3592914/ + + +Analysis +-------- + +_`.analysis.setjmp`: The [C1990]_ standard says: + + An invocation of the ``setjmp`` macro shall appear only in one of + the following contexts: + + - the entire controlling expression of a selection or iteration + statement; + + - one operand of a relational or equality operator with the other + operand an integral constant expression, with the resulting + expression being the entire controlling expression of a + selection or iteration statement; + + - the operand of a unary ``!`` operator with the resulting + expression being the entire controlling expression of a + selection or iteration statement; or + + - the entire expression of an expression statement (possibly cast + to ``void``). + +And the [C1999]_ standard adds: + + If the invocation appears in any other context, the behavior is + undefined. + +_`.analysis.entry-points`: Here's a reverse call graph (in the master +sources at changelevel 189652) showing which entry points might call +``StackScan()`` and so need to record the stack context:: + + StackScan + └ThreadScan + └RootScan + ├traceScanRootRes + │ └traceScanRoot + │ └rootFlip + │ └traceFlip + │ └TraceStart + │ ├PolicyStartTrace + │ │ └TracePoll + │ │ ├ArenaStep + │ │ │ └mps_arena_step + │ │ └ArenaPoll + │ │ ├mps_alloc + │ │ ├mps_ap_fill + │ │ ├mps_ap_alloc_pattern_end + │ │ ├mps_ap_alloc_pattern_reset + │ │ └ArenaRelease + │ │ ├mps_arena_release + │ │ └ArenaStartCollect + │ │ ├mps_arena_start_collect + │ │ └ArenaCollect + │ │ └mps_arena_collect + │ └TraceStartCollectAll + │ ├ArenaStep [see above] + │ ├ArenaStartCollect [see above] + │ └PolicyStartTrace [see above] + └rootsWalk + └ArenaRootsWalk + └mps_arena_roots_walk + +So the entry points that need to save the stack context are +``mps_arena_step()``, ``mps_alloc()``, ``mps_ap_fill()``, +``mps_ap_alloc_pattern_end()``, ``mps_ap_alloc_pattern_reset()``, +``mps_arena_release()``, ``mps_arena_start_collect()``, +``mps_arena_collect()``, and ``mps_arena_roots_walk()``. + + +Interface +--------- + +``typedef StackContextStruct *StackContext`` + +_`.if.sc`: A structure encapsulating the mutator context. + +``Res StackScan(ScanState ss, void *stackCold, mps_area_scan_t scan_area, void *closure)`` + +_`.if.scan`: Scan the stack of the current thread, between +``stackCold`` and the hot end of the mutator's stack that was recorded +by ``STACK_CONTEXT_SAVE()`` when the arena was entered. This will +include any roots which were in the mutator's callee-save registers on +entry to the MPS (see `.sol.setjmp`_ and `.sol.stack.nest`_). Return +``ResOK`` if successful, or another result code if not. + +_`.if.scan.begin-end`: This function must be called between +``STACK_CONTEXT_BEGIN()`` and ``STACK_CONTEXT_END()``. + +``STACK_CONTEXT_SAVE(sc)`` + +_`.if.save`: Store the mutator context in the structure ``sc``. + +_`.if.save.macro`: This must be implemented as a macro because it +needs to run in the stack frame of the entry point (if it runs in some +other function it does not necessarily get the mutator's registers). +This necessity to have the definition in scope in ``mpsi.c``, while +also having different definitions on different platforms, requires a +violation of design.mps.config.no-spaghetti_ in ss.h. + +.. _design.mps.config.no-spaghetti: config#.no-spaghetti + +``STACK_CONTEXT_BEGIN(arena)`` + +_`.if.begin`: Start an MPS operation that may need to know the mutator +context (see `.sol.entry-points`_). This macro must be used like this:: + + Res res; + ArenaEnter(arena); + STACK_CONTEXT_BEGIN(arena) { + res = ArenaStartCollect(...); + } STACK_CONTEXT_END(arena); + ArenaLeave(arena); + return res; + +That is, it must be paired with ``STACK_CONTEXT_END()``, and there +must be no ``return`` between the two macro invocations. + +This macro stores the mutator context in a ``StackContext`` structure +allocated on the stack, and sets ``arena->stackWarm`` to the hot end +of the current frame (using `.sol.stack.hot`_). + +``STACK_CONTEXT_END(arena)`` + +_`.if.end`: Finish the MPS operation that was started by +``STACK_CONTEXT_BEGIN()``. + +This macro sets ``arena->stackWarm`` to ``NULL``. + + +Implementations +--------------- + +_`.impl`: Generic implementation of ``StackScan()`` in ``ss.c`` scans +the whole area between ``arena->stackWarm`` and the cold end of the +mutator's stack, implementing `.sol.stack.nest`_ and also the backup +strategy in `.sol.entry-points.fragile`_. + +.. figure:: stack-scan-areas.svg + :align: center + :alt: Diagram: scanned areas of the stack. + + +References +---------- + +.. [C1990] + International Standard ISO/IEC 9899:1990. "Programming languages — C". + +.. [C1999] + International Standard ISO/IEC 9899:1999. "`Programming languages — C `_". + +.. [Fog] + Agner Fog; + "`Calling conventions for different C++ compilers and operating systems `_"; + Copenhagen University College of Engineering; + 2014-08-07. + +.. [x86_64_registers] + Microsoft Corporation; + "`Caller/Callee Saved Registers `_". + + +Document History +---------------- + +- 2014-10-22 GDR_ Initial draft. + +- 2016-03-03 RB_ Reorganised based mostly on `.sol.stack.hot`_ and + `.sol.stack.nest`_. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/strategy.txt b/mps/design/strategy.txt new file mode 100644 index 00000000000..f541d0275a9 --- /dev/null +++ b/mps/design/strategy.txt @@ -0,0 +1,602 @@ +.. mode: -*- rst -*- + +MPS Strategy +============ + +:Tag: design.mps.strategy +:Author: Nick Barnes +:Organization: Ravenbrook Limited +:Date: 2013-06-04 +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: strategy; design + + +Introduction +------------ + +_`.intro` This is the design of collection strategy for the MPS. + +_`.readership` MPS developers. + + +Overview +-------- + +_`.overview` The MPS uses "strategy" code to make three decisions: + +- when to start a collection trace; + +- what to condemn; + +- how to schedule tracing work. + +This document describes the current strategy, identifies some +weaknesses in it, and outlines some possible future development +directions. + + +Requirements +------------ + +[TODO: source some from req.dylan, or do an up-to-date requirements +analysis -- NB 2013-03-25] + +Garbage collection is a trade-off between time and space: it consumes +some [CPU] time in order to save some [memory] space. Strategy shifts +the balance point. A better strategy will take less time to produce +more space. Examples of good strategy might include: + +- choosing segments to condemn which contain high proportions of dead + objects; + +- starting a trace when a large number of objects have just died; + +- doing enough collection soon enough that the client program never + suffers low-memory problems; + +- using otherwise-idle CPU resources for tracing. + +Conversely, it would be bad strategy to do the reverse of each of +these (condemning live objects; tracing when there's very little +garbage; not collecting enough; tracing when the client program is +busy). + +Abstracting from these notions, requirements on strategy would +relate to: + +- Maximum pause time and other utilization metrics (for example, + bounded mutator utilization, minimum mutator utilization, total MPS + CPU usage); + +- Collecting enough garbage (for example: overall heap size; + low-memory requirements). + +- Allowing client control (for example, client recommendations for + collection timing or condemnation). + +There are other possible strategy considerations which are so far +outside the scope of current strategy and MPS design that this +document disregards them. For example, either inferring or allowing +the client to specify preferred relative object locations ("this +object should be kept in the same cache line as that one"), to improve +cache locality. + + +Generations +----------- + +The largest part of the current MPS strategy implementation is the +support for generational garbage collections. + + +General data structures +....................... + +The fundamental structure of generational garbage collection is the +``Chain``, which describes a sequence of generations. + +A chain specifies the "capacity" and "mortality" for each generation. +When creating an automatically collected pool, the client code may +specify the chain which will control collections for that pool. The +same chain may be used for multiple pools. If no chain is specified, +the pool uses the arena's default generation chain. + +Each generation in a chain has a ``GenDesc`` structure, allocated in +an array pointed to from the chain. In addition to the generations in +the chains, the arena has a unique ``GenDesc`` structure, named +``topGen`` and described in comments as "the dynamic generation" +(misleadingly: in fact it is the *least* dynamic generation). + +Each automatically collected pool has a set of ``PoolGen`` structures, +one for each generation that it can allocate or promote into. The +``PoolGen`` structures for each generation point to the ``GenDesc`` +for that generation, and are linked together in a ring on the +``GenDesc``. These structures are used to gather accounting +information for strategy decisions. + +The non-moving automatic pool classes (AMS, AWL and LO) do not support +generational collection, so they allocate into a single generation. +The moving automatic pool classes (AMC and AMCZ) have one pool +generations for each generation in the chain, plus one pool generation +for the arena's "top generation". + + +AMC data structures +................... + +An AMC pool creates an array of pool generation structures of type +``amcGen`` (a subclass of ``PoolGen``). Each pool generation points to +the *forwarding buffer* for that generation: this is the buffer that +surviving objects are copied into. + +AMC segments point to the AMC pool generation that the segment belongs +to, and AMC buffers point to the AMC pool generation that the buffer +will be allocating into. + +The forwarding buffers are set up during AMC pool creation. Each +generation forwards into the next higher generation in the chain, +except for the top generation, which forwards to itself. Thus, objects +are "promoted" up the chain of generations until they end up in the +top generations, which is shared between all generational pools. + + +Collections +........... + +Collections in the MPS start in one of two ways: + +1. A collection of the world starts via ``TraceStartCollectAll()``. + This simply condemns all segments in all automatic pools. + +2. A collection of some set of generations starts via + ``PolicyStartTrace()``. See `.policy.start`_. + + +Zones +..... + +Each generation in each chain has a zoneset associated with it +(``gen->zones``); the condemned zoneset is the union of some number of +generation's zonesets. + +An attempt is made to use distinct zonesets for different generations. +Segments in automatic pools are allocated using ``PoolGenAlloc()`` +which creates a ``LocusPref`` using the zoneset from the generation's +``GenDesc``. The zoneset for each generation starts out empty. If the +zoneset is empty, an attempt is made to allocate from a free zone. The +``GenDesc`` zoneset is augmented with whichever zones the new segment +occupies. + +Note that this zoneset can never shrink. + + +Parameters +.......... + +_`.param.intro`: A generation has two parameters, *capacity* and +*mortality*, specified by the client program. + +_`.param.capacity`: The *capacity* of a generation is the amount of +*new* allocation in that generation (that is, allocation since the +last time the generation was condemned) that will cause the generation +to be collected by ``TracePoll()``. + +_`.param.capacity.misnamed`: The name *capacity* is unfortunate since +it suggests that the total amount of memory in the generation will not +exceed this value. But that will only be the case for pool classes +that always promote survivors to another generation. When there is +*old* allocation in the generation (that is, prior to the last time +the generation was condemned), as there is in the case of non-moving +pool classes, the size of a generation is unrelated to its capacity. + +_`.param.mortality`: The *mortality* of a generation is the proportion +(between 0 and 1) of memory in the generation that is expected to be +dead when the generation is collected. It is used in ``TraceStart()`` +to estimate the amount of data that will have to be scanned in order +to complete the trace. + + +Accounting +.......... + +_`.accounting.intro`: Pool generations maintain the sizes of various +categories of data allocated in that generation for that pool. This +accounting information is reported via the event system, but also used +in two places: + +_`.accounting.poll`: ``ChainDeferral()`` uses the *new size* of +each generation to determine which generations in the chain are over +capacity and so might need to be collected by ``PolicyStartTrace()``. + +_`.accounting.condemn`: ``PolicyStartTrace()`` uses the *new size* of +each generation to determine which generations in the chain will be +collected; it also uses the *total size* of the generation to compute +the mortality. + +_`.accounting.check`: Computing the new size for a pool generation is +far from straightforward: see job003772_ and job004007_ for some +(former) errors in this code. In order to assist with checking that +this has been computed correctly, the locus module uses a double-entry +book-keeping system to account for every byte in each pool generation. +This uses seven accounts: + +.. _job003772: https://www.ravenbrook.com/project/mps/issue/job003772/ +.. _job004007: https://www.ravenbrook.com/project/mps/issue/job004007/ + +_`.account.total`: Memory acquired from the arena. + +_`.account.total.negated`: From the point of view of the double-entry +system, the *total* should be negative as it is owing to the arena, +but it is inconvenient to represent negative sizes, and so the +positive value is stored instead. + +_`.account.total.negated.justification`: We don't have a type for +signed sizes; but if we represented it in two's complement using the +unsigned ``Size`` type then Clang's unsigned integer overflow detector +would complain. + +_`.account.free`: Memory that is not in use (free or lost to +fragmentation). + +_`.account.buffered`: Memory in a buffer that was handed out to the +client program via ``BufferFill()``, and which has not yet been +condemned. + +_`.account.new`: Memory in use by the client program, allocated +since the last time the generation was condemned. + +_`.account.old`: Memory in use by the client program, allocated +prior to the last time the generation was condemned. + +_`.account.newDeferred`: Memory in use by the client program, +allocated since the last time the generation was condemned, but which +should not cause collections via ``TracePoll()``. (Due to ramping; see +below.) + +_`.account.oldDeferred`: Memory in use by the client program, +allocated prior to the last time the generation was condemned, but +which should not cause collections via ``TracePoll()``. (Due to +ramping; see below.) + +_`.accounting.op`: The following operations are provided: + +_`.accounting.op.alloc`: Allocate a segment in a pool generation. +Debit *total*, credit *free*. (But see `.account.total.negated`_.) + +_`.accounting.op.free`: Free a segment. First, ensure that the +contents of the segment are accounted as free, by artificially ageing +any memory accounted as *new* or *newDeferred* (see +`.accounting.op.age`_) and then artificially reclaiming any memory +accounted as *old* or *oldDeferred* (see `.accounting.op.reclaim`_). +Finally, debit *free*, credit *total*. (But see +`.account.total.negated`_.) + +_`.accounting.op.fill`: Fill a buffer. Debit *free*, credit *buffered*. + +_`.accounting.op.empty`: Empty a buffer. Debit *buffered*, credit +*new* or *newDeferred* with the allocated part of the buffer, credit +*free* with the unused part of the buffer. + +_`.accounting.op.age`: Condemn memory. Debit *buffered* (if part or +all of a buffer was condemned) and either *new* or *newDeferred*, +credit *old* or *oldDeferred*. Note that the condemned part of the +buffer remains part of the buffer until the buffer is emptied, but is +now accounted as *old* or *oldDeferred*. The uncondemned part of the +buffer, if any, remains accounted as *buffered* until it is either +emptied or condemned in its turn. + +_`.accounting.op.reclaim`: Reclaim dead memory. Debit *old* or +*oldDeferred*, credit *free*. + +_`.accounting.op.undefer`: Stop deferring the accounting of memory. Debit *oldDeferred*, credit *old*. Debit *newDeferred*, credit *new*. + + +Ramps +..... +The intended semantics of ramping are pretty simple. It allows the +client to advise us of periods of large short-lived allocation on a +particular AP. Stuff allocated using that AP during its "ramp" will +probably be dead when the ramp finishes. How the MPS makes use of this +advice is up to us, but for instance we might segregate those objects, +collect them less enthusiastically during the ramp and then more +enthusiastically soon after the ramp finishes. Ramps can nest. + +A ramp is entered by calling:: + + mps_ap_alloc_pattern_begin(ap, mps_alloc_pattern_ramp()) + +or similar, and left in a similar way. + +This is implemented on a per-pool basis, for AMC only (it's ignored by +the other automatic pools). PoolAMC throws away the identity of the AP +specified by the client. The implementation is intended to work by +changing the generational forwarding behaviour, so that there is a "ramp +generation" - one of the regular AMC generations - which forwards to +itself if collected during a ramp (instead of promoting to an older +generation). It also tweaks the strategy calculation code, in a way +with consequences I am documenting elsewhere. + +Right now, the code sets this ramp generation to the last generation +specified in the pool's "chain": it ordinarily forwards to the +"after-ramp" generation, which is the "dynamic generation" (i.e. the +least dynamic generation, i.e. the arena-wide "top generation"). My +recollection, and some mentions in design/poolamc, suggests that the +ramp generation used to be chosen differently from this. + +So far, it doesn't sound too ghastly, I guess, although the subversion +of the generational system seems a little daft. Read on.... + +An AMC pool has a ``rampMode`` (which is really a state of a state +machine), taking one of five values: OUTSIDE, BEGIN, RAMPING, FINISH, +and COLLECTING (actually the enum values are called RampX for these +X). We initialize in OUTSIDE. The pool also has a ``rampCount``, +which is the ramp nesting depth and is used to allow us to ignore ramp +transitions other than the outermost. According to design/poolamc, +there's an invariant (in BEGIN or RAMPING, ``rampCount > 0``; in +COLLECTING or OUTSIDE, ``rampCount == 0``), but this isn't checked in +``AMCCheck()`` and in fact is false for COLLECTING (see below). + +There is a small set of events causing state machine transitions: + +- entering an outermost ramp; +- leaving an outermost ramp; +- condemning any segment of a ramp generation (detected in AMCWhiten); +- reclaiming any AMC segment. + +Here's pseudo-code for all the transition events: + +Entering an outermost ramp: + if not FINISH, go to BEGIN. + +Leaving an outermost ramp: + if RAMPING, go to FINISH. Otherwise, go to OUTSIDE. + +Condemning a ramp generation segment: + If BEGIN, go to RAMPING and make the ramp generation forward + to itself (detach the forwarding buffer and reset its generation). + If FINISH, go to COLLECTING and make the ramp generation + forward to the after-ramp generation. + +Reclaiming any AMC segment: + If COLLECTING: + if ``rampCount > 0``, go to BEGIN. Otherwise go to OUTSIDE. + +Now, some deductions: + +#. When OUTSIDE, the count is always zero, because (a) it starts that + way, and the only ways to go OUTSIDE are (b) by leaving an + outermost ramp (count goes to zero) or (c) by reclaiming when the + count is zero. + +#. When BEGIN, the count is never zero (consider the transitions to + BEGIN and the transition to zero). + +#. When RAMPING, the count is never zero (again consider transitions + to RAMPING and the transition to zero). + +#. When FINISH, the count can be anything (the transition to FINISH + has zero count, but the Enter transition when FINISH can change + that and then it can increment to any value). + +#. When COLLECTING, the count can be anything (from the previous fact, + and the transition to COLLECTING). + +#. *This is a bug!!* The ramp generation is not always reset (to + forward to the after-ramp generation). If we get into FINISH and + then see another ramp before the next condemnation of the ramp + generation, we will Enter followed by Leave. The Enter will keep us + in FINISH, and the Leave will take us back to OUTSIDE, skipping the + transition to the COLLECTING state which is what resets the ramp + generation forwarding buffer. [TODO: check whether I made an issue + and/or fixed it; NB 2013-06-04] + +The simplest change to fix this is to change the behaviour of the Leave +transition, which should only take us OUTSIDE if we are in BEGIN or +COLLECTING. We should also update design/poolamc to tell the truth, and +check the invariants, which will be these: + + OUTSIDE => zero + BEGIN => non-zero + RAMPING => non-zero + +A cleverer change might radically rearrange the state machine +(e.g. reduce the number of states to three) but that would require +closer design thought and should probably be postponed until we have a +clearer overall strategy plan. + +While I'm writing pseudo-code versions of ramp-related code, I should +mention this other snippet, which is the only other code relating to +ramping (these notes are useful when thinking about the broader strategy +code): + + In ``AMCBufferFill()``, if we're RAMPING, and filling the forwarding + buffer of the ramp generation, and the ramp generation is the + forwarding buffer's generation, set ``amcSeg->new`` to FALSE. Otherwise, + add the segment size to ``poolGen.newSize``. + +And since I've now mentioned the ``amcSeg->new`` flag, here are the only +other uses of that: + +- it initializes as TRUE. + +- When leaving an outermost ramp, go through all the segments in the + pool. Any non-white segment in the rampGen with new set to FALSE has + its size added to ``poolGen->newSize`` and gets new set to TRUE. + +- in ``amcSegWhiten()``, if new is TRUE, the segment size is deducted + from ``poolGen.newSize`` and new is set to FALSE. + + +Policy +------ + +_`.policy`: Functions that make decisions about what action to take +are collected into the policy module (policy.c). The purpose of doing +so is to make it easier to understand this set of decisions and how +they interact, and to make it easier to maintain and update the policy. + + +Assignment of zones +................... + +``Res PolicyAlloc(Tract *tractReturn, Arena arena, LocusPref pref, Size size, Pool pool)`` + +_`.policy.alloc`: Allocate ``size`` bytes of memory on behalf of +``pool``, based on the preferences described by ``pref``. If +successful, update ``*tractReturn`` to point to the first tract in the +allocated memory and return ``ResOK``. Otherwise, return a result code +describing the problem, for example ``ResCOMMIT_LIMIT``. + +_`.policy.alloc.impl`: This tries various methods in succession until +one succeeds. First, it tries to allocate from the arena's free land +in the requested zones. Second, it tries allocating from free zones. +Third, it tries extending the arena and then trying the first two +methods again. Fourth, it tries allocating from any zone that is not +blacklisted. Fifth, it tries allocating from any zone at all. + +_`.policy.alloc.issue`: This plan performs poorly under stress. See +for example job003898_. + +.. _job003898: https://www.ravenbrook.com/project/mps/issue/job003898/ + + + +Deciding whether to collect the world +..................................... + +``Bool PolicyShouldCollectWorld(Arena arena, double availableTime, Clock now, Clock clocks_per_sec)`` + +_`.policy.world`: Determine whether now is a good time for +``mps_arena_step()`` to start a collection of the world. Return +``TRUE`` if so, ``FALSE`` if not. The ``availableTime`` argument is an +estimate of the time that's available for the collection, ``now`` is +the current time as returned by ``ClockNow()``, and ``clocks_per_sec`` +is the result of calling ``ClocksPerSec()``. + +_`.policy.world.impl`: There are two conditions: the estimate of the +available time must be enough to complete the collection, and the last +collection of the world must be long enough in the past that the +``mps_arena_step()`` won't be spending more than a certain fraction of +runtime in collections. (This fraction is given by the +``ARENA_MAX_COLLECT_FRACTION`` configuration parameter.) + + + +Starting a trace +................ + +``Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn, Arena arena, Bool collectWorldAllowed)`` + +_`.policy.start`: Consider starting a trace. If a trace was started, +update ``*traceReturn`` to point to the trace and return TRUE. +Otherwise, leave ``*traceReturn`` unchanged and return FALSE. + +_`.policy.start.world`: If ``collectWorldAllowed`` is TRUE, consider +starting a collection of the whole world, and if such a collection is +started, set ``*collectWorldReturn`` to TRUE. + +This decision uses the "Lisp Machine" strategy, which tries to +schedule collections of the world so that the collector just keeps +pace with the mutator: that is, it starts a collection when the +predicted completion time of the collection is around the time when +the mutator is predicted to reach the current memory limit. See +[Pirinen]_. + +_`.policy.start.world.hack`: The ``collectWorldAllowed`` flag was +added to fix job004011_ by ensuring that the MPS starts at most one +collection of the world in each call to ``ArenaPoll()``. But this is +is fragile and inelegant. Ideally the MPS would be able to deduce that +a collection of a set of generations can't possibly make progress +(because nothing that refers to this set of generations has changed), +and so not start such a collection. + +.. _job004011: https://www.ravenbrook.com/project/mps/issue/job004011/ + +_`.policy.start.chain`: If ``collectWorldAllowed`` is FALSE, or if it +is not yet time to schedule a collection of the world, +``PolicyStartTrace()`` considers collecting a set of zones +corresponding to a set of generations on a chain. + +It picks these generations by calling ``ChainDeferral()`` for each +chain; this function indicates if the chain needs collecting, and if +so, how urgent it is to collect that chain. The most urgent chain in +need of collection (if any) is then condemned by calling +``policyCondemnChain()``, which chooses the set of generations to +condemn, and condemns all the segments in those generations. + + +Trace progress +.............. + +``Bool PolicyPoll(Arena arena)`` + +_`.policy.poll`: Return TRUE if the MPS should do some tracing work; +FALSE if it should return to the mutator. + +``Bool PolicyPollAgain(Arena arena, Clock start, Bool moreWork, Work tracedWork)`` + +_`.policy.poll.again`: Return TRUE if the MPS should do another unit +of work; FALSE if it should return to the mutator. ``start`` is the +clock time when the MPS was entered; ``moreWork`` and ``tracedWork`` +are the results of the last call to ``TracePoll()``. + +_`.policy.poll.impl`: The implementation keep doing work until either +the maximum pause time is exceeded (see `design.mps.arena.pause-time`_), +or there is no more work to do. Then it schedules the next collection +so that there is approximately one call to ``TracePoll()`` for every +``ArenaPollALLOCTIME`` bytes of allocation. + +.. _design.mps.arena.pause-time: arena#.pause-time + + +References +---------- + +.. [Pirinen] + "The Lisp Machine Strategy"; + Pekka Pirinin; + 1998-04-27; + + + +Document History +---------------- +- 2013-06-04 NB_ Checked this in although it's far from complete. + Pasted in my 'ramping notes' from email, which mention some bugs + which I may have fixed (TODO: check this). +- 2014-01-29 RB_ The arena no longer manages generation zonesets. +- 2014-05-17 GDR_ Bring data structures and condemn logic up to date. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ +.. _NB: https://www.ravenbrook.com/consultants/nb/ +.. _RB: https://www.ravenbrook.com/consultants/rb + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/telemetry.txt b/mps/design/telemetry.txt new file mode 100644 index 00000000000..b090404c76b --- /dev/null +++ b/mps/design/telemetry.txt @@ -0,0 +1,484 @@ +.. mode: -*- rst -*- + +Telemetry +========= + +:Tag: design.mps.telemetry +:Author: Gavin Matthews +:Date: 1997-04-11 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: telemetry; design + + +Introduction +------------ + +_`.intro`: This documents the design of the telemetry mechanism within +the MPS. + +_`.readership`: This document is intended for any MPS developer. + +_`.source`: Various meetings and brainstorms, including +meeting.general.1997-03-04(0), `mail.richard.1997-07-03.17-01`_, +`mail.gavinm.1997-05-01.12-40`_. + +.. _mail.gavinm.1997-05-01.12-40: https://info.ravenbrook.com/project/mps/mail/1997/05/01/12-40/0.txt +.. _mail.richard.1997-07-03.17-01: https://info.ravenbrook.com/project/mps/mail/1997/07/03/17-01/0.txt + + +Overview +-------- + +_`.over`: Telemetry permits the emission of events from the MPS. These +can be used to drive a graphical tool, or to debug, or whatever. The +system is flexible and robust, but doesn't require heavy support from +the client. + + +Requirements +------------ + +_`.req.simple`: It must be possible to generate code both for the MPS +and any tool without using complicated build tools. + +_`.req.open`: We must not constrain the nature of events before we are +certain of what we want them to be. + +_`.req.multi`: We must be able to send events to multiple streams. + +_`.req.share`: It must be possible to share event descriptions between +the MPS and any tool. + +_`.req.version`: It must be possible to version the set of events so +that any tool can detect whether it can understand the MPS. + +_`.req.back`: Tools should be able to understand older and newer +version of the MPS, so far as is appropriate. + +_`.req.type`: It must be possible to transmit a rich variety of types +to the tool, including doubles, and strings. + +_`.req.port`: It must be possible to transmit and receive events +between different platforms. + +_`.req.control`: It must be possible to control whether and what +events are transmitted at least at a coarse level. + +_`.req.examine`: There should be a cheap means to examine the contents +of logs. + +_`.req.pm`: The event mechanism should provide for post mortem to +detect what significant events led up to death. + +_`.req.perf`: Events should not have a significant effect on +performance when unwanted. + +_`.req.small`: Telemetry streams should be small. + +_`.req.avail`: Events should be available in all varieties, subject to +performance requirements. + +_`.req.impl`: The plinth support for telemetry should be easy to write +and flexible. + +_`.req.robust`: The telemetry protocol should be robust against some +forms of corruption, e.g. packet loss. + +_`.req.intern`: It should be possible to support string-interning. + + +Architecture +------------ + +_`.arch`: Event annotations are scattered throughout the code, but +there is a central registration of event types and properties. Events +are written to a buffer via a specialist structure, and are optionally +written to the plinth. Events can take any number of parameters of a +range of types, indicated as a format both in the annotation and the +registry. + + +Analysis +-------- + +_`.analysis`: The proposed order of development, with summary of +requirements impact is as follows (★ for positive impact, ⇓ for +negative impact): + +========================= == == == == == == == == == == == == == == == == == ======= +solution si op mu sh ve ty po co ex pm pe sm av im ro in ba status +========================= == == == == == == == == == == == == == == == == == ======= +`.sol.format`_ · · · · · ★ · · · · · · · · · · · merged +`.sol.struct`_ · · · · · ★ · · · · ★ ⇓ · · · · · merged +`.sol.string`_ · · · · · ★ · · · · · · · · · ★ · merged +`.sol.relation`_ ★ · · ★ · · · · ★ · · ★ · · · · · merged +`.sol.dumper`_ · · · · · · · · ★ · · · · · · · · merged +`.sol.kind`_ · ⇓ · · · · · ★ · ★ · · · · · · · merged +`.sol.control`_ · · · · · · · ★ · · ★ · · · · · · merged +`.sol.variety`_ · · · · · · · · · ★ ★ · ★ · · · · +========================= == == == == == == == == == == == == == == == == == ======= + +The following are not yet ordered: + +========================= == == == == == == == == == == == == == == == == == ======= +solution si op mu sh ve ty po co ex pm pe sm av im ro in ba status +========================= == == == == == == == == == == == == == == == == == ======= +`.sol.buffer`_ · · · · · · · ★ · ★ ★ · · · · · . +`.sol.traceback`_ · · · · · · · · · ★ · · · · · · . +`.sol.client`_ · · · · · · · · · · · · · · · ★ . +`.sol.head`_ · · · · · · ★ · · · · · · · · · . +`.sol.version`_ · · · · ★ · · · · · · · · · · · ★ +`.sol.exit`_ · · · · · · · · · ★ · · · · · · . +`.sol.block`_ · · · · · · · · · · ★ ⇓ · · ★ · · +`.sol.code`_ · · · · · · · · · · · ★ · · · · ★ +`.sol.msg`_ · · ★ · · · ★ · · · · · · ★ ★ · . +========================= == == == == == == == == == == == == == == == == == ======= + +_`.file-format`: One of the objectives of this plan is to minimise the +impact of the changes to the log file format. This is to be achieved +firstly by completing all necessary support before changes are +initiated, and secondly by performing all changes at the same time. + + +Ideas +----- + +_`.sol.format`: Event annotations indicate the types of their +arguments, for example, ``EVENT_WD`` for a ``Word`` and a ``double``. +(`.req.type`_) + +_`.sol.struct`: Copy event data into a structure of the appropriate +type, for example, ``EventWDStruct``. (`.req.type`_, `.req.perf`_, but +not `.req.small`_ because of padding) + +_`.sol.string`: Permit at most one string per event, at the end, and +use the ``char[1]`` hack, and specialised code; deduce the string +length from the event length and also ``NUL``-terminate (`.req.type`_, +`.req.intern`_) + +_`.sol.buffer`: Enter all events initially into internal buffers, and +conditionally send them to the message stream. (`.req.pm`_, +`.req.control`_, `.req.perf`_) + +_`.sol.variety`: In optimized varieties, have internal events (see +`.sol.buffer`_) for a subset of events and no external events; in +normal varieties have all internal events, and the potential for +external events. (`.req.avail`_, `.req.pm`_, `.req.perf`_) + +_`.sol.kind`: Divide events by some coarse type into around 6 groups, +probably related to frequency. (`.req.control`_, `.req.pm`_, but not +`.req.open`_) + +_`.sol.control`: Hold flags to determine which events are emitted +externally. (`.req.control`_, `.req.perf`_) + +_`.sol.dumper`: Write a simple tool to dump event logs as text. +(`.req.examine`_) + +_`.sol.msg`: Redesign the plinth interface to send and receive +messages, based on any underlying IPC mechanism, for example, append +to file, TCP/IP, messages, shared memory. (`.req.robust`_, +`.req.impl`_, `.req.port`_, `.req.multi`_) + +_`.sol.block`: Buffer the events and send them as fixed size blocks, +commencing with a timestamp, and ending with padding. (`.req.robust`_, +`.req.perf`_, but not `.req.small`_) + +_`.sol.code`: Commence each event with two bytes of event code, and +two bytes of length. (`.req.small`_, `.req.back`_) + +_`.sol.head`: Commence each event stream with a platform-independent +header block giving information about the session, version (see +`.sol.version`_), and file format; file format will be sufficient to +decode the (platform-dependent) rest of the file. (`.req.port`_) + +_`.sol.exit`: Provide a mechanism to flush events in the event of +graceful sudden death. (`.req.pm`_) + +_`.sol.version`: Maintain a three part version number for the file +comprising major (incremented when the format of the entire file +changes (other than platform differences)), median (incremented when +an existing event changes its form or semantics), and minor +(incremented when a new event type is added); tools should normally +fail when the median or major is unsupported. (`.req.version`_, +`.req.back`_) + +_`.sol.relation`: Event types will be defined in terms of a relation +specifying their name, code, optimised behaviour (see +`.sol.variety`_), kind (see `.sol.kind`_), and format (see +`.sol.format`_); both the MPS and tool can use this by suitable +``#define`` hacks. (`.req.simple`_, `.req.share`_, `.req.examine`_, +`.req.small`_ (no format information in messages)) + +_`.sol.traceback`: Provide a mechanism to output recent events (see +`.sol.buffer`_) as a form of backtrace when ``AVER`` statements fire +or from a debugger, or whatever. (`.req.pm`_) + +_`.sol.client`: Provide a mechanism for user events. (`.req.intern`_) + + + +Implementation +-------------- + +Annotation +.......... + +_`.annot`: An event annotation is of the form:: + + EVENT3(FooCreate, pointer, address, word) + +_`.annot.string`: If there is a string in the format, it must be the +last parameter (and hence there can be only one). There is currently +a maximum string length, defined by ``EventMaxStringLength`` in +impl.h.eventcom. + +_`.annot.type`: The event type should be given as the first parameter +to the event macro, as registered in impl.h.eventdef. + +_`.annot.param`: The parameters of the event should be given as the +remaining parameters of the event macro, in order as indicated in the +event parameters definition in impl.h.eventdef. + + +Registration +............ + +_`.reg`: All event types and parameters should be registered in +impl.h.eventdef, in the form of a higher-order list macros. + +_`.reg.just`: This use of a higher-order macros enables great +flexibility in the use of this file. + +_`.reg.rel`: The event type registration is of the form:: + + EVENT(X, FooCreate, 0x1234, TRUE, Arena) + +_`.reg.type`: The first parameter of the relation is the event type. +This needs no prefix, and should correspond to that used in the +annotation. + +_`.reg.code`: The second parameter is the event code, a 16-bit value +used to represent this event type. Codes should not be re-used for new +event types, to allow interpretation of event log files of all ages. + +_`.reg.always`: The third parameter is a boolean value indicating +whether this event type should be implemented in all varieties. See +`.control.buffer`_. Unless your event is on the critical path +(typically per reference or per object), you will want this to be +``TRUE``. + +_`.reg.kind`: The fourth parameter is a kind keyword indicating what +category this event falls into. See `.control`_. The possible values +are: + +- ``Arena`` -- per space or arena or global +- ``Pool`` -- pool-related +- ``Trace`` -- per trace or scan +- ``Seg`` -- per segment +- ``Ref`` -- per reference or fix +- ``Object`` -- per object or allocation +- ``User`` -- invoked by the user through the MPS interface + +This list can be seen in impl.h.eventcom. + +_`.reg.doc`: Add a docstring column. [RB 2012-09-03] + +_`.reg.params`: The event parameters registration is of the form:: + + #define EVENT_FooCreate_PARAMS(PARAM, X) \ + PARAM(X, 0, P, firstParamPointer) \ + PARAM(X, 1, U, secondParamUnsigned) + +_`.reg.param.index`: The first column is the index, and must start at +zero and increase by one for each row. + +_`.reg.param.sort`: The second column is the parameter "sort", which, +when appended to ``EventF``, yields a type for the parameter. It is a +letter from the following list: + +- ``P`` -- ``void *`` +- ``A`` -- ``Addr`` +- ``W`` -- ``Word`` +- ``U`` -- ``unsigned int`` +- ``S`` -- ``char *`` +- ``D`` -- ``double`` +- ``B`` -- ``Bool`` + +The corresponding event parameter must be assignment compatible with +the type. + +_`.param.types`: When an event has parameters whose type is not in the +above list, use the following guidelines: All ``C`` pointer types not +representing strings use ``P``; ``Size``, ``Count``, ``Index`` use +``W``; others should be obvious. + +_`.reg.param.name`: The third column is the parameter name. It should +be a valid C identifier and is used for debugging display and human +readable output. + +_`.reg.param.doc`: Add a docstring column. [RB 2012-09-03] + +_`.reg.dup`: It is permissible for the one event type to be used for +more than one annotation. There are generally two reasons for this: + +- Variable control flow for successful function completion; +- Platform/Otherwise-dependent implementations of a function. + +Note that all annotations for one event type must have the same format +(as implied by `.sol.format`_). + + +Control +....... + +_`.control`: There are two types of event control, buffer and output. + +_`.control.buffer`: Buffer control affects whether particular events +implemented at all, and is controlled statically by variety using the +always value (see `.reg.always`_) for the event type. The hot variety +does compiles out annotations with ``always=FALSE``. The cool variety +does not, so always buffers a complete set of events. + +_`.control.output`: Output control affects whether events written to +the internal buffer are output via the plinth. This is set on a +per-kind basis (see `.reg.kind`_), using a control bit table stored in +EventKindControl. By default, all event kinds are off. You may switch +some kinds on using a debugger. + +For example, to enable ``Pool`` events using gdb (see impl.h.eventcom for +numeric codes):: + + $ gdb ./xci3gc/cool/amcss + (gdb) break GlobalsInit + (gdb) run + ... + (gdb) print EventKindControl |= 2 + $2 = 2 + (gdb) continue + ... + (gdb) quit + $ mpseventcnv -v | sort | head + 0000178EA03ACF6D PoolInit 9C1E0 9C000 0005E040 + 0000178EA03C2825 PoolInitMFS 9C0D8 9C000 1000 C + 0000178EA03C2C27 PoolInitMFS 9C14C 9C000 1000 44 + 0000178EA03C332C PoolInitMV 9C080 9C000 1000 20 10000 + 0000178EA03F4DB4 BufferInit 2FE2C4 2FE1B0 0 + 0000178EA03F4EC8 BufferInitSeg 2FE2C4 2FE1B0 0 + 0000178EA03F57DA AMCGenCreate 2FE1B0 2FE288 + 0000178EA03F67B5 BufferInit 2FE374 2FE1B0 0 + 0000178EA03F6827 BufferInitSeg 2FE374 2FE1B0 0 + 0000178EA03F6B72 AMCGenCreate 2FE1B0 2FE338 + +_`.control.env`: The initial value of ``EventKindControl`` is read +from the C environment when the ANSI Plinth is used, and so event +output can be controlled like this:: + + MPS_TELEMETRY_CONTROL=127 amcss + +or like this:: + + MPS_TELEMETRY_CONTROL="Pool Arena" amcss + +where the variable is set to a space-separated list of names defined by ``EventKindENUM``. + +_`.control.just`: These controls are coarse, but very cheap. + +_`.control.external`: The MPS interface functions +``mps_telemetry_set()`` and ``mps_telemetry_reset()`` can be used to +change ``EventKindControl``. + +_`.control.tool`: The tools will be able to control +``EventKindControl``. + + +Debugging +......... + +_`.debug.buffer`: Each event kind is logged in a separate buffer, +``EventBuffer[kind]``. + +_`.debug.buffer.reverse`: The events are logged in reverse order from +the top of the buffer, with the last logged event at +``EventLast[kind]``. This allows recovery of the list of recent events +using the ``event->any.size`` field. + +_`.debug.dump`: The contents of all buffers can be dumped with the +``EventDump`` function from a debugger, for example:: + + gdb> print EventDump(mps_lib_get_stdout()) + +_`.debug.describe`: Individual events can be described with the +EventDescribe function, for example:: + + gdb> print EventDescribe(EventLast[3], mps_lib_get_stdout(), 0) + +_`.debug.core`: The event buffers are preserved in core dumps and can +be used to work out what the MPS was doing before a crash. Since the +kinds correspond to frequencies, ancient events may still be available +in some buffers, even if they have been flushed to the output stream. +Some digging may be required. + + +Dumper tool +........... + +_`.dumper`: A primitive dumper tool is available in impl.c.eventcnv. +For details, see guide.mps.telemetry. + + +Allocation replayer tool +........................ + +_`.replayer`: A tool for replaying an allocation sequence from a log +is available in impl.c.replay. + + +Document History +---------------- + +- 1997-04-11 Gavin Matthews. Incomplete design. + +- 1997-07-07 Gavin Matthews. Rewritten after discussion in Pool Hall. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2012-09-03 RB_ Removed basic untruths and added some discussion of + debugging, though this starts to resemble a manual rather than a + design document, and needs to be reworked. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/tests.txt b/mps/design/tests.txt new file mode 100644 index 00000000000..4588b3b0406 --- /dev/null +++ b/mps/design/tests.txt @@ -0,0 +1,478 @@ +.. mode: -*- rst -*- + +Tests +===== + +:Tag: design.mps.tests +:Author: Richard Kistruck +:Date: 2008-12-04 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: tests; design + + +Introduction +------------ + +_`.intro`: This document contains a guide to the Memory Pool System +tests. + +_`.readership`: This document is intended for any MPS developer. + + +Running tests +------------- + +_`.run`: Run these commands:: + + cd code + make -f VARIETY= # Unix + nmake /f VARIETY= # Windows + +where ```` is the appropriate makefile for the platform (see +`manual/build.txt`_), ```` is the variety (see +design.mps.config.var.codes_) and ```` is the collection of tests +(see `.target`_ below). For example:: + + make -f lii6ll VARIETY=cool testrun + +If ```` is omitted, tests are run in both the cool and hot +varieties. + +.. _design.mps.config.var.codes: config#.var.codes +.. _manual/build.txt: https://www.ravenbrook.com/project/mps/master/manual/build.txt + + +Test targets +------------ + +_`.target`: The makefiles provide the following targets for common +sets of tests: + +_`.target.testall`: The ``testall`` target runs all test cases (even +if known to fail). + +_`.target.testrun`: The ``testrun`` target runs the "smoke tests". +This subset of tests are quick checks that the MPS is working. They +run quickly enough for it to be practical to run them every time the +MPS is built. + +_`.target.testci`: The ``testci`` target runs the continuous +integration tests, the subset of tests that are expected to pass in +full-featured build configurations. + +_`.target.testansi`: The ``testansi`` target runs the subset of the +tests that are expected to pass in the generic ("ANSI") build +configuration (see design.mps.config.opt.ansi_). + +_`.target.testpollnone`: The ``testpollnone`` target runs the subset +of the tests that are expected to pass in the generic ("ANSI") build +configuration (see design.mps.config.opt.ansi_) with the option +``CONFIG_POLL_NONE`` (see design.mps.config.opt.poll_). + +.. _design.mps.config.opt.ansi: config#.opt.ansi +.. _design.mps.config.opt.poll: config#.opt.poll + +_`.target.testratio`: The ``testratio`` target compares the +performance of the HOT and RASH varieties. See `.ratio`_. + +_`.target.testscheme`: The ``testscheme`` target builds the example +Scheme interpreter (example/scheme) and runs its test suite. + +_`.target.testmmqa`: The ``testmmqa`` target runs the tests in the +MMQA test suite. See `.mmqa`_. + + +Test features +------------- + +_`.randomize`: Each time a test case is run, it randomly chooses some +of its parameters (for example, the sizes of objects, or how many +links to create in a graph of references). This allows a fast test +to cover many cases over time. + +_`.randomize.seed`: The random numbers are chosen pseudo-randomly +based on a seed initialized from environmental data (the time and the +processor cycle count). The seed is reported at test startup, for +example:: + + code$ xci6ll/cool/apss + xci6ll/cool/apss: randomize(): choosing initial state (v3): 2116709187. + ... + xci6ll/cool/apss: Conclusion: Failed to find any defects. + +Here, the number 2116709187 is the random seed. + +_`.randomize.specific-seed` Each test can be run with a specified seed +by passing the seed on the command line, for example:: + + code$ xci6ll/cool/apss 2116709187 + xci6ll/cool/apss: randomize(): resetting initial state (v3) to: 2116709187. + ... + xci6ll/cool/apss: Conclusion: Failed to find any defects. + +_`.randomize.repeatable`: This ensures that the single-threaded tests +are repeatable. (Multi-threaded tests are not repeatable even if the +same seed is used; see job003719_.) + +.. _job003719: https://www.ravenbrook.com/project/mps/issue/job003719/ + + +Test list +--------- + +See `manual/code-index`_ for the full list of automated test cases. + +.. _manual/code-index: https://www.ravenbrook.com/project/mps/master/manual/html/code-index.html + +_`.test.finalcv`: Registers objects for finalization, makes them +unreachable, deregisters them, etc. Churns to provoke minor (nursery) +collection. + +_`.test.finaltest`: Creates a large binary tree, and registers every +node. Drops the top reference, requests collection, and counts the +finalization messages. + +_`.test.zcoll`: Collection scheduling, and collection feedback. + +_`.test.zmess`: Message lifecycle and finalization messages. + + +Test database +------------- + +_`.db`: The automated tests are described in the test database +(tool/testcases.txt). + +_`.db.format`: This is a self-documenting plain-text database which +gives for each test case its name and an optional set of features. For +example the feature ``=P`` means that the test case requires polling +to succeed, and therefore is expected to fail in build configurations +without polling (see design.mps.config.opt.poll_). + +_`.db.format.simple`: The format must be very simple because the test +runner on Windows is written as a batch file (.bat), in order to avoid +having to depend on any tools that are did not come as standard with +Windows XP, and batch files are inflexible. (But note that we no +longer support Windows XP, so it would now be possible to rewrite the +test runner in PowerShell if we thought that made sense.) + +_`.db.testrun`: The test runner (tool/testrun.sh on Unix or +tool/testrun.bat on Windows) parses the test database to work out +which tests to run according to the target. For example the +``testpollnone`` target must skip all test cases with the ``P`` +feature. + + +Test runner +----------- + +_`.runner.req.automated`: The test runner must execute without user +interaction, so that it can be used for continuous integration. + +_`.runner.req.output.pass`: Test cases are expected to pass nearly all the +time, and in these cases we almost never want to see the output, so +the test runner must suppress the output for passing tests. + +_`.runner.req.output.fail`: However, if a test case fails then the +test runner must preserve the output from the failing test, including +the random seed (see `.randomize.seed`_), so that this can be analyzed +and the test repeated. Moreover, it must print the output from the +failing test, so that if the test is being run on a `continuous +integration`_ system (see `.ci`_), then the output of the failing +tests is included in the failure report. (See job003489_.) + +.. _job003489: https://www.ravenbrook.com/project/mps/issue/job003489/ + + +Performance test +---------------- + +_`.ratio`: The ``testratio`` target checks that the hot variety +is not too much slower than the rash variety. A failure of this test +usually is expected to indicate that there are assertions on the +critical path using ``AVER`` instead of ``AVER_CRITICAL`` (and so on). +This works by running gcbench for the AMC pool class and djbench for +the MVFF pool class, in the hot variety and the rash variety, +computing the ratio of CPU time taken in the two varieties, and +testing that this falls under an acceptable limit. + +_`.ratio.cpu-time`: Note that we use the CPU time (reported by +``/usr/bin/time``) and not the elapsed time (as reported by the +benchmark) because we want to be able to run this test on continuous +integration machines that might be heavily loaded. + +_`.ratio.platform`: This target is currently supported only on Unix +platforms using GNU Makefiles. + + +Adding a new test +----------------- + +To add a new test to the MPS, carry out the following steps. (The +procedure uses the name "newtest" throughout but you should of +course replace this with the name of your test case.) + +_`.new.source`: Create a C source file in the code directory, +typically named "newtest.c". In additional to the usual copyright +boilerplate, it should contain a call to ``testlib_init()`` (this +ensures reproducibility of pseudo-random numbers), and a ``printf()`` +reporting the absence of defects (this output is recognized by the +test runner):: + + #include + #include "testlib.h" + + int main(int argc, char *argv[]) + { + testlib_init(argc, argv); + /* test happens here */ + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; + } + +_`.new.unix`: If the test case builds on the Unix platforms (FreeBSD, +Linux and macOS), edit code/comm.gmk adding the test case to the +``TEST_TARGETS`` macro, and adding a rule describing how to build it, +typically:: + + $(PFM)/$(VARIETY)/newtest: $(PFM)/$(VARIETY)/newtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + +_`.new.windows`: If the test case builds on Windows, edit +code/commpre.nmk adding the test case to the ``TEST_TARGETS`` macro, +and edit code/commpost.nmk adding a rule describing how to build it, +typically:: + + $(PFM)\$(VARIETY)\newtest.exe: $(PFM)\$(VARIETY)\newtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +_`.new.macos`: If the test case builds on macOS, open +code/mps.xcodeproj/project.pbxproj for edit and open this project in +Xcode. If the project navigator is not visible at the left, select +View → Navigators → Show Project Navigator (⌘1). Right click on the +Tests folder and choose Add Files to "mps"…. Select code/newtest.c +and then click Add. Move the new file into alphabetical order in the +Tests folder. Click on "mps" at the top of the project navigator to +reveal the targets. Select a test target that is similar to the one +you have just created. Right click on that target and select Duplicate +(⌘D). Select the new target and change its name to "newtest". Select +the "Build Phases" tab and check that "Dependencies" contains the mps +library, and that "Compile Sources" contains newtest.c and +testlib.c. Close the project. + +_`.new.database`: Edit tool/testcases.txt and add the new test case to +the database. Use the appropriate flags to indicate the properties of +the test case. These flags are used by the test runner to select the +appropriate sets of test cases. For example tests marked ``=P`` are +expected to fail in build configurations without polling (see +design.mps.config.opt.poll_). + +_`.new.manual`: Edit manual/source/code-index.rst and add the new test +case to the "Automated test cases" section. + + +Continuous integration +---------------------- + +[This section might need to become a document in its own right. CI +has grown in importance and complexity. RB 2023-01-15] + +_`.ci`: Ravenbrook uses both `GitHub CI`_ and `Travis CI`_ for +continuous integration of the MPS via GitHub. + +.. _Travis CI: https://travis-ci.com/ + +.. _GitHub CI: https://docs.github.com/en/actions/automating-builds-and-tests/about-continuous-integration + +[This section needs: definition of CI goals and requirements, what we +need CI to do and why, how the testci target meets those +requirements. 'taint really a design without this. Mention how CI +supports the pull request merge procedure (except that exists on a +separate branch at the moment). RB 2023-01-15] + +[Need to discuss compilers and toolchains. RB 2023-01-15] + +_`.ci.run.posix`: On Posix systems where we have autoconf, the CI +services run commands equivalent to:: + + ./configure + make install + make test + +which exercises the testci target, as defined by `Makefile.in +<../Makefile.in>`_ in the root of the MPS tree. + +_`.ci.run.windows`: On Windows the CI services run commands that do at +least:: + + make /f w3i6mv.nmk all testci + +as defined by the `.ci.github.config`_. + +_`.ci.run.other.targets`: On some platforms we arrange to run the testansi, +testpollnone, testratio, and testscheme targets. [Need to explain +why, where, etc. RB 2023-01-15] + +_`.ci.run.other.checks`: We could also run various non-build checks +using CI to check: + +- document formatting +- shell script syntax + +[In the branch of writing, these do not yet exist. They are the +subject of `GitHub pull request #113 +`_ of +branch/2023-01-13/rst-check. When merged, they can be linked. RB +2023-01-15] + +_`.ci.when:`: CI is triggered on the `mps GitHub repo`_ by: + +- commits (pushes) +- new pull requests +- manually, using tools (see `.ci.tools`_) + +.. _mps GitHub repo: https://github.com/ravenbrook/mps + +_`.ci.results`: CI results are visible via the GitHub web interface: + +- in pull requests, under "Checks", + +- on the `branches page `_ + as green ticks or red crosses that link to details. + +as well as in logs specific to the type of CI. + +_`.ci.results.travis`: Results from Travis CI can be found at the +`Travis CI build history for the MPS GitHub repo +`_. + +_`.ci.results.github`: Results from GitHub CI can be found at `build +and test actions on the Actions tab at the Ravenbrook GitHub repo +`_. + +_`.ci.github`: [Insert overview of GitHub CI here. RB 2023-01-15] + +_`.ci.github.platforms`: GitHub provides runners_ for Linux, Windows, +and macOS, but only on x86_64. See `.ci.travis.platforms`_ for ARM64 +and FreeBSD. + +.. _runners: https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources + +_`.ci.github.config`: GitHub CI is configured using the +`build-and-test.yml <../.github/workflows/build-and-test.yml>`_ file +in the .github/workflows directory of the MPS tree. + +_`.ci.travis`: [Insert overview of Travis CI here. RB 2023-01-15] + +_`.ci.travis.platforms`: Where possible, we use `GitHub CI`_ for +platforms, because `Travis CI is slow and expensive`_. However +`GitHub CI`_ does not provide ARM64 or FreeBSD, so we use `Travis CI`_ +for those. + +.. _Travis CI is slow and expensive: https://github.com/Ravenbrook/mps/issues/109 + +_`.ci.travis.config`: Travis is configured using the `.travis.yml +<../.travis.yml>`_ file at top level of the MPS tree. + +_`.ci.tools`: The MPS tree contains some simple tools for managing CI +without the need to install whole packages such as the GitHub CLI or +Travis CI's Ruby gem. + +_`.ci.tools.kick`: `tool/github-ci-kick <../tool/github-ci-kick>`_ and +`tool/travis-ci-kick <../tool/travis-ci-kick>`_ both trigger CI builds +without the need to push a change or make a pull request in the `mps +GitHub repo`_. In particular, they are useful for applying CI to work +that was pushed while CI was disabled, for whatever reason. + + +MMQA tests +---------- + +_`.mmqa`: The Memory Management Quality Assurance test suite is +another suite of test cases. + +_`.mmqa.why`: The existence of two test suites originates in the +departmental structure at Harlequin Ltd where the MPS was originally +developed. Tests written by members of the Memory Management Group +went into the code directory along with the MPS itself, while tests +written by members of the Quality Assurance Group went into the test +directory. (Conway's Law states that "organizations which design +systems … are constrained to produce designs which are copies of the +communication structures of these organizations" [Conway_1968]_.) + +_`.mmqa.run`: See test/README for how to run the MMQA tests. + + +Other tests +----------- + +_`.coverage`: The program tool/testcoverage compiles the MPS with +coverage enabled, runs the smoke tests (`.target.testrun`_) and +outputs a coverage report. + +_`.opendylan`: The program tool/testopendylan pulls Open Dylan from +GitHub and builds it against the MPS. + + +References +---------- + +.. [Conway_1968] + "How do Committees Invent?"; + Melvin E. Conway; *Datamation* 14:5, pp. 28–31; April 1968; + + + +Document History +---------------- + +- 2008-12-04 Richard Kistruck. Create. Describe finalization tests. + +- 2010-03-03 Richard Kistruck. Correction: it's fin1658a.c and + job001658, not 1638. + +- 2010-03-03 Richard Kistruck. Add zmess.c, zcoll.c. zmess.c subsumes + and replaces fin1658a.c. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2018-06-15 GDR_ Procedure for adding a new smoke test. + +- 2023-01-15 RB_ Bringing CI section up to date with Travis + configuration. Removing obsolete Jenkins info. Adding GitHub CI. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +.. end diff --git a/mps/design/testthr.txt b/mps/design/testthr.txt new file mode 100644 index 00000000000..0bea7c1b3a1 --- /dev/null +++ b/mps/design/testthr.txt @@ -0,0 +1,143 @@ +.. mode: -*- rst -*- + +Multi-threaded testing +====================== + +:Tag: design.mps.testthr +:Author: Gareth Rees +:Date: 2014-10-21 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. +:Index terms: pair: threads; testing + + +Introduction +------------ + +_`.intro`: This is the design of the multi-threaded testing module +in the Memory Pool System. + +_`.readership`: Any MPS developer. + +_`.overview`: The MPS is designed to work in a multi-threaded +environment (see design.mps.thread-safety_) and this needs to be +tested on all supported platforms. The multi-threaded testing module +provides an interface for creating and joining threads, so that +multi-threaded test cases are portable to all platforms on which the +MPS runs. + +.. _design.mps.thread-safety: thread-safety + + +Requirements +------------ + +_`.req.create`: The module must provide an interface for creating +threads and running code in them. (Because there is no such interface +in the Standard C Library.) + +_`.req.join`: The module must provide an interface for joining a +running thread: that is, waiting for the thread to finish and +collecting a result. (Because we want to be able to test that the MPS +behaves correctly when interacting with a finished thread.) + +_`.req.portable`: The module must be easily portable to all the +platforms on which the MPS runs. + +_`.req.usable`: The module must be simple to use, not requiring +elaborate setup or tear-down or error handling. (Because we want test +cases to be easy to write.) + + +Implementation +-------------- + +_`.impl.posix`: To meet `.req.portable`_ and `.req.usable`_, the +module presents an interface that is essentially identical to the +POSIX Threads interface [pthreads]_, except for the names. On POSIX +platforms the implementation is trivial; on Windows it is +necessary to translate the concepts back and forth. + +_`.impl.storage`: To meet `.req.usable`_, the module defines the +``testthr_t`` type in the header ``testthr.h`` (even though this +requires an ``#if``), so that test cases can easily declare variables +and allocate storage for thread identifiers. + +_`.impl.error`: To meet `.req.usable`_, the module does not propagate +error codes, but calls ``error()`` from the test library if anything +goes wrong. There is thus no need for the test cases to check result +codes. + + +Interface +--------- + +``typedef testthr_t`` + +The type of thread identifiers. + +``typedef void *(*testthr_routine_t)(void *)`` + +The type of a function that can be called when a thread is created. + +``void testthr_create(testthr_t *thread_o, testthr_routine_t start, void *arg)`` + +Create a thread. Store the identifier of the newly created thread in +``*thread_o``, and call ``start()``, passing ``arg`` as the single +parameter. + +``void testthr_join(testthr_t *thread, void **result_o)`` + +Wait for a thread to complete. Suspend execution of the calling thread +until the target thread terminates (if necessary), and if ``result_o`` +is non-NULL, update ``*result_o`` with the return value of the +thread's ``start()`` function. + + +References +---------- + +.. [pthreads] + The Open Group; + "The Single UNIX Specification, Version 2---Threads"; + + + +Document History +---------------- + +- 2014-10-21 GDR_ Initial draft. + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/mps/design/thread-manager.txt b/mps/design/thread-manager.txt new file mode 100644 index 00000000000..b9291e3c76d --- /dev/null +++ b/mps/design/thread-manager.txt @@ -0,0 +1,415 @@ +.. mode: -*- rst -*- + +Thread manager +============== + +:Tag: design.mps.thread-manager +:Author: Richard Brooksby +:Date: 1995-11-20 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: thread manager; design + + +Introduction +------------ + +_`.intro`: This is the design of the thread manager module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: The thread manager implements two features that allow +the MPS to work in a multi-threaded environment: exclusive access to +memory, and scanning of roots in a thread's registers and control +stack. + + +Requirements +------------ + +_`.req.exclusive`: The thread manager must provide the MPS with +exclusive access to the memory it manages in critical sections of the +code. (This is necessary to avoid for the MPS to be able to flip +atomically from the point of view of the mutator.) + +_`.req.scan`: The thread manager must be able to locate references in +the registers and control stack of the current thread, or of a +suspended thread. (This is necessary in order to implement +conservative collection, in environments where the registers and +control stack contain ambiguous roots. Scanning of roots is carried +out during the flip, hence while other threads are suspended.) + +_`.req.register.multi`: It must be possible to register the same +thread multiple times. (This is needed to support the situation where +a program that does not use the MPS is calling into MPS-using code +from multiple threads. On entry to the MPS-using code, the thread can +be registered, but it may not be possible to ensure that the thread is +deregistered on exit, because control may be transferred by some +non-local mechanism such as an exception or ``longjmp()``. We don't +want to insist that the client program keep a table of threads it has +registered, because maintaining the table might require allocation, +which might provoke a collection. See request.dylan.160252_.) + +.. _request.dylan.160252: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160252/ + +_`.req.thread.die`: It would be nice if the MPS coped with threads +that die while registered. (This makes it easier for a client program +to interface with foreign code that terminates threads without the +client program being given an opportunity to deregister them. See +request.dylan.160022_ and request.mps.160093_.) + +.. _request.dylan.160022: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160022 +.. _request.mps.160093: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/mps/160093/ + +_`.req.thread.intr`: It would be nice if on POSIX systems the MPS does +not cause system calls in the mutator to fail with EINTR due to the +MPS thread-management signals being delivered while the mutator is +blocked in a system call. (See `GitHub issue #9`_.) + +.. _GitHub issue #9: https://github.com/ravenbrook/mps/issues/9 + +_`.req.thread.errno`: It would be nice if on POSIX systems the MPS +does not cause system calls in the mutator to update ``errno`` due to +the MPS thread-management signals being delivered while the mutator is +blocked in a system call, and the MPS signal handlers updating +``errno``. (See `GitHub issue #10`_.) + +.. _GitHub issue #10: https://github.com/ravenbrook/mps/issues/10 + +_`.req.thread.lasterror`: It would be nice if on Windows systems the +MPS does not cause system calls in the mutator to update the value +returned from ``GetLastError()`` when the exception handler is called +due to a fault. This may cause the MPS to destroy the previous value +there. (See `GitHub issue #61`_.) + +.. _GitHub issue #61: https://github.com/Ravenbrook/mps/issues/61 + +Design +------ + +_`.sol.exclusive`: In order to meet `.req.exclusive`_, the arena +maintains a ring of threads (in ``arena->threadRing``) that have been +registered by the client program. When the MPS needs exclusive access +to memory, it suspends all the threads in the ring except for the +currently running thread. When the MPS no longer needs exclusive +access to memory, it resumes all threads in the ring. + +_`.sol.exclusive.assumption`: This relies on the assumption that any +thread that might refer to, read from, or write to memory in +automatically managed pool classes is registered with the MPS. This is +documented in the manual under ``mps_thread_reg()``. + +_`.sol.thread.term`: The thread manager cannot reliably detect that a +thread has terminated. The reason is that threading systems do not +guarantee behaviour in this case. For example, POSIX_ says, "A +conforming implementation is free to reuse a thread ID after its +lifetime has ended. If an application attempts to use a thread ID +whose lifetime has ended, the behavior is undefined." For this reason, +the documentation for ``mps_thread_reg()`` specifies that it is an +error if a thread dies while registered. + +.. _POSIX: https://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_09_02 + +_`.sol.thread.term.attempt`: Nonetheless, the thread manager makes a +"best effort" to continue running after detecting a terminated thread, +by moving the thread to a ring of dead threads, and avoiding scanning +it. This might allow a malfunctioning client program to limp along. + +_`.sol.thread.intr`: The POSIX specification for sigaction_ says that +if the ``SA_RESTART`` flag is set, and if "a function specified as +interruptible is interrupted by this signal, the function shall +restart and shall not fail with ``EINTR`` unless otherwise specified." + +.. |sigaction| replace:: ``sigaction()`` +.. _sigaction: https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigaction.html + +_`.sol.thread.intr.linux`: Linux does not fully implement the POSIX +specification, so that some system calls are "never restarted after +being interrupted by a signal handler, regardless of the use of +SA_RESTART; they always fail with the error EINTR when interrupted by +a signal handler". The exceptional calls are listed in the |signal|_ +manual. There is nothing that the MPS can do about this except to warn +users in the reference manual. + +.. |signal| replace:: signal(7) +.. _signal: https://man7.org/linux/man-pages/man7/signal.7.html + +_`.sol.thread.errno`: The POSIX specification for sigaction_ says, +"Note in particular that even the "safe" functions may modify +``errno``; the signal-catching function, if not executing as an +independent thread, should save and restore its value." All MPS +signals handlers therefore save and restore ``errno`` using the macros +``ERRNO_SAVE`` and ``ERRNO_RESTORE``. + +_`.sol.thread.lasterror`: The documentation for ``AddVectoredExceptionHandler`` +does not mention ``GetLastError()`` at all, but testing_ the behaviour +reveals that any value in ``GetLastError()`` is not preserved. Therefore, +this value is saved using ``LAST_ERROR_SAVE`` and ``LAST_ERROR_RESTORE``. + +.. _testing: https://github.com/Ravenbrook/mps/issues/61 + +Interface +--------- + +``typedef struct mps_thr_s *Thread`` + +_`.if.thread`: The type of threads. It is a pointer to an opaque +structure, which must be defined by the implementation. + +``Bool ThreadCheck(Thread thread)`` + +_`.if.check`: The check function for threads. See design.mps.check_. + +.. _design.mps.check: check + +``Bool ThreadCheckSimple(Thread thread)`` + +_`.if.check.simple`: A thread-safe check function for threads, for use +by ``mps_thread_dereg()``. It can't use ``AVER(TESTT(Thread, +thread))``, as recommended by design.mps.sig.check.arg.unlocked_, +since ``Thread`` is an opaque type. + +.. _design.mps.sig.check.arg.unlocked: sig#.check.arg.unlocked + +``Arena ThreadArena(Thread thread)`` + +_`.if.arena`: Return the arena that the thread is registered with. +Must be thread-safe as it needs to be called by ``mps_thread_dereg()`` +before taking the arena lock. + +``Res ThreadRegister(Thread *threadReturn, Arena arena)`` + +_`.if.register`: Register the current thread with the arena, +allocating a new ``Thread`` object. If successful, update +``*threadReturn`` to point to the new thread and return ``ResOK``. +Otherwise, return a result code indicating the cause of the error. + +``void ThreadDeregister(Thread thread, Arena arena)`` + +_`.if.deregister`: Remove ``thread`` from the list of threads managed +by the arena and free it. + +``void ThreadRingSuspend(Ring threadRing, Ring deadRing)`` + +_`.if.ring.suspend`: Suspend all the threads on ``threadRing``, except +for the current thread. If any threads are discovered to have +terminated, move them to ``deadRing``. + +``void ThreadRingResume(Ring threadRing, Ring deadRing)`` + +_`.if.ring.resume`: Resume all the threads on ``threadRing``. If any +threads are discovered to have terminated, move them to ``deadRing``. + +``Thread ThreadRingThread(Ring threadRing)`` + +_`.if.ring.thread`: Return the thread that owns the given element of +the thread ring. + +``Res ThreadScan(ScanState ss, Thread thread, Word *stackCold, mps_area_scan_t scan_area, void *closure)`` + +_`.if.scan`: Scan the stacks and root registers of ``thread``, using +``ss`` and ``scan_area``. ``stackCold`` points to the cold end of the +thread's stack---this is the value that was supplied by the client +program when it called ``mps_root_create_thread()``. In the common +case, where the stack grows downwards, ``stackCold`` is the highest +stack address. Return ``ResOK`` if successful, another result code +otherwise. + + +Implementations +--------------- + +Generic implementation +...................... + +_`.impl.an`: In ``than.c``. + +_`.impl.an.single`: Supports a single thread. (This cannot be enforced +because of `.req.register.multi`_.) + +_`.impl.an.register.multi`: There is no need for any special treatment +of multiple threads, because ``ThreadRingSuspend()`` and +``ThreadRingResume()`` do nothing. + +_`.impl.an.suspend`: ``ThreadRingSuspend()`` does nothing because +there are no other threads. + +_`.impl.an.resume`: ``ThreadRingResume()`` does nothing because no +threads are ever suspended. + +_`.impl.an.scan`: Just calls ``StackScan()`` since there are no +suspended threads. + + +POSIX threads implementation +............................ + +_`.impl.ix`: In ``thix.c`` and ``pthrdext.c``. See +design.mps.pthreadext_. + +.. _design.mps.pthreadext: pthreadext + +_`.impl.ix.multi`: Supports multiple threads. + +_`.impl.ix.register`: ``ThreadRegister()`` records the thread id +the current thread by calling |pthread_self|_. + +.. |pthread_self| replace:: ``pthread_self()`` +.. _pthread_self: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_self.html + +_`.impl.ix.register.multi`: Multiply-registered threads are handled +specially by the POSIX thread extensions. See +design.mps.pthreadext.req.suspend.multiple_ and +design.mps.pthreadext.req.resume.multiple_. + +.. _design.mps.pthreadext.req.suspend.multiple: pthreadext#.req.suspend.multiple +.. _design.mps.pthreadext.req.resume.multiple: pthreadext#.req.resume.multiple + +_`.impl.ix.suspend`: ``ThreadRingSuspend()`` calls +``PThreadextSuspend()``. See design.mps.pthreadext.if.suspend_. + +.. _design.mps.pthreadext.if.suspend: pthreadext#.if.suspend + +_`.impl.ix.resume`: ``ThreadRingResume()`` calls +``PThreadextResume()``. See design.mps.pthreadext.if.resume_. + +.. _design.mps.pthreadext.if.resume: pthreadext#.if.resume + +_`.impl.ix.scan.current`: ``ThreadScan()`` calls ``StackScan()`` if +the thread is current. + +_`.impl.ix.scan.suspended`: ``PThreadextSuspend()`` records the +context of each suspended thread, and ``ThreadRingSuspend()`` stores +this in the ``Thread`` structure, so that is available by the time +``ThreadScan()`` is called. + + +Windows implementation +...................... + +_`.impl.w3`: In ``thw3.c``. + +_`.impl.w3.multi`: Supports multiple threads. + +_`.impl.w3.register`: ``ThreadRegister()`` records the following +information for the current thread: + + - A ``HANDLE`` to the process, with access flags + ``THREAD_SUSPEND_RESUME`` and ``THREAD_GET_CONTEXT``. This handle + is needed as parameter to |SuspendThread|_ and + |ResumeThread|_. + + - The result of |GetCurrentThreadId|_, so that the current thread + may be identified in the ring of threads. + +.. |SuspendThread| replace:: ``SuspendThread()`` +.. _SuspendThread: https://docs.microsoft.com/en-gb/windows/desktop/api/processthreadsapi/nf-processthreadsapi-suspendthread +.. |ResumeThread| replace:: ``ResumeThread()`` +.. _ResumeThread: https://docs.microsoft.com/en-gb/windows/desktop/api/processthreadsapi/nf-processthreadsapi-resumethread +.. |GetCurrentThreadId| replace:: ``GetCurrentThreadId()`` +.. _GetCurrentThreadId: https://docs.microsoft.com/en-gb/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getcurrentthreadid + +_`.impl.w3.register.multi`: There is no need for any special treatment +of multiple threads, because Windows maintains a suspend count that is +incremented on |SuspendThread|_ and decremented on +|ResumeThread|_. + +_`.impl.w3.suspend`: ``ThreadRingSuspend()`` calls |SuspendThread|_. + +_`.impl.w3.resume`: ``ThreadRingResume()`` calls |ResumeThread|_. + +_`.impl.w3.scan.current`: ``ThreadScan()`` calls ``StackScan()`` if +the thread is current. This is because |GetThreadContext|_ doesn't +work on the current thread: the context would not necessarily have the +values which were in the saved registers on entry to the MPS. + +.. |GetThreadContext| replace:: ``GetThreadContext()`` +.. _GetThreadContext: https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getthreadcontext + +_`.impl.w3.scan.suspended`: Otherwise, ``ThreadScan()`` calls +|GetThreadContext|_ to get the root registers and the stack +pointer. + + +macOS implementation +.................... + +_`.impl.xc`: In ``thxc.c``. + +_`.impl.xc.multi`: Supports multiple threads. + +_`.impl.xc.register`: ``ThreadRegister()`` records the Mach port of +the current thread by calling |mach_thread_self|_. + +.. |mach_thread_self| replace:: ``mach_thread_self()`` +.. _mach_thread_self: https://www.gnu.org/software/hurd/gnumach-doc/Thread-Information.html + +_`.impl.xc.register.multi`: There is no need for any special treatment +of multiple threads, because Mach maintains a suspend count that is +incremented on |thread_suspend|_ and decremented on +|thread_resume|_. + +.. |thread_suspend| replace:: ``thread_suspend()`` +.. _thread_suspend: https://www.gnu.org/software/hurd/gnumach-doc/Thread-Execution.html +.. |thread_resume| replace:: ``thread_resume()`` +.. _thread_resume: https://www.gnu.org/software/hurd/gnumach-doc/Thread-Execution.html + +_`.impl.xc.suspend`: ``ThreadRingSuspend()`` calls +|thread_suspend|_. + +_`.impl.xc.resume`: ``ThreadRingResume()`` calls |thread_resume|_. + +_`.impl.xc.scan.current`: ``ThreadScan()`` calls ``StackScan()`` if +the thread is current. + +_`.impl.xc.scan.suspended`: Otherwise, ``ThreadScan()`` calls +|thread_get_state|_ to get the root registers and the stack pointer. + +.. |thread_get_state| replace:: ``thread_get_state()`` +.. _thread_get_state: https://www.gnu.org/software/hurd/gnumach-doc/Thread-Execution.html + + +Document History +---------------- + +- 1995-11-20 RB_ Incomplete design. + +- 2002-06-21 RB_ Converted from MMInfo database design document. + +- 2013-05-26 GDR_ Converted to reStructuredText. + +- 2014-10-22 GDR_ Complete design. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/thread-safety.txt b/mps/design/thread-safety.txt new file mode 100644 index 00000000000..3ecf5be7978 --- /dev/null +++ b/mps/design/thread-safety.txt @@ -0,0 +1,328 @@ +.. mode: -*- rst -*- + +Thread safety in the MPS +======================== + +:Tag: design.mps.thread-safety +:Author: David Moore +:Date: 1995-10-03 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: thread safety; design + + +Introduction +------------ + +_`.intro`: This describes how thread safety is achieved in the MPS. + +_`.overview`: The MPS is expected to run in an environment with +multiple threads calling into the MPS. The initial approach is very +simple. Some of the code is known to operate with exclusive access to +the data it manipulates, so this code is safe. For the rest of the +code, shared data structures are locked by the use of a single binary +lock (design.mps.lock_) per arena. This lock is claimed on entry to +the MPS and released on exit from it. So there is at most a single +thread (per arena) running "inside" the MPS at a time. + +.. _design.mps.lock: lock + + +Requirements +------------ + +_`.req.threads`: Code must work correctly in presence of multiple +threads all calling into the MPS. + +_`.req.arena`: The MPS must safely manage per-arena non-shared data. + +_`.req.global.mutable`: The MPS must safely manage global data that +may be updated many times (that is, the arena ring). + +_`.req.global.once`: The MPS must safely manage global data that is +updated at most once (that is, the protocol classes). + +_`.req.deadlock`: The MPS must not deadlock. + +_`.req.fork`: On Unix platforms, the MPS should be able to continue in +the child process after a ``fork()``. (Source: job004062_.) + +.. _job004062: https://www.ravenbrook.com/project/mps/issue/job004062/ + +_`.req.perf`: Performance should not be unreasonably hindered. + + +Analysis +-------- + +_`.analysis.simple`: To have the code functioning correctly it should be +easy to change correctly. So a simple approach is desirable. We have +to also ensure that performance is not unreasonably downgraded. + + +Performance cost of locking +........................... + +_`.lock-cost`: The cost of locking in performance terms are: + +- _`.lock-cost.overhead`: the overhead of claiming and releasing locks; + +- _`.lock-cost.pause`: the pauses caused by one thread being blocked + on another thread. + +- _`.lock-cost.wait`: the time wasted by one thread being blocked on + another thread. + +_`.analysis.perf.signif`: `.lock-cost.pause`_ is significant if there are +MPS functions that take a long time. Using more locks, e.g. having a +lock per pool as well as a lock per arena, is a way of decreasing the +locking conflict between threads (`.lock-cost.pause`_ and +`.lock-cost.wait`_). However this could increase +`.lock-cost.overhead`_ significantly. + +_`.analysis.perf.work`: But all MPS functions imply a small work-load +unless a collection is taking place. In the case of a collection, in +practice and certainly in the near future, all threads will most +likely be suspended while the collection work is going on. (The pages +being scanned will need to be unprotected which implies the mutator +will have to be stopped.) We also have to remember that unless we are +running on genuine multiprocessor `.lock-cost.wait`_ is irrelevant. + +_`.analysis.perf.alloc`: During typical use we expect that it is +allocation that is the most frequent activity. Allocation buffers +(design.mps.buffer_) are designed to allow allocation in concurrent +threads without needing a lock. So the most significant time a thread +spends in the MPS will be on a buffer-fill or during a collection. The +next most significant use is likely to be buffer create and deletion, +as a separate buffer will be required for each thread. + +.. _design.mps.buffer: buffer + +_`.analysis.perf.lock`: So overall the performance cost of locking is, I +estimate, most significantly the overhead of calling the locking +functions. Hence it would be undesirable from a performance point of +view to have more than one lock. + + +Recursive vs binary locks +......................... + +_`.analysis.reentrance`: The simplest way to lock the code safely is to +define which code runs inside or outside the lock. Calling from the +outside to the inside implies a lock has to be claimed. Returning +means the lock has to be released. Control flow from outside to +outside and from inside to inside needs no locking action. To +implement this a function defined on the external interface needs to +claim the lock on entry and release it on exit. Our code currently +uses some external functions with the lock already held. There are two +ways to implement this: + +#. _`.recursive`: Each external function claims a recursive lock. + + - simple; + - have to worry about locking depth; + - extra locking overhead on internal calls of external functions; + +#. _`.binary`: Each external function claims a binary lock. Replace + each internal call of an external function with a call to a newly + defined internal one. + + - more code + - slightly easier to reason about + +_`.analysis.strategy`: It seems that the `.recursive`_ strategy is the +easiest to implement first, but could be evolved into a `.binary`_ +strategy. (That evolution has now happened. tony 1999-08-31). + + +Fork safety +........... + +In order to support ``fork()``, we need to solve the following problems: + +_`.analysis.fork.lock`: Any MPS lock might be held by another thread at +the point where ``fork()`` is called. The lock would be protecting the +integrity of some data structure. But in the child the thread holding +the lock no longer exists, and so there is no way to restore the +integrity. + +_`.analysis.fork.threads`: In the child process after a ``fork()``, there +is only one thread, which is a copy of the thread that called +``fork()`` in the parent process. All other threads no longer exist. +But the MPS maintains references to these threads, via the +``ThreadStruct`` object` created by calls to ``mps_thread_reg()``. If +we try to communicate with these threads it will fail or crash. + +_`.analysis.fork.exc-thread`: On macOS, the MPS handles protection faults +using a dedicated thread. But in the child process after a ``fork()``, +this dedicated thread no longer exists. Also, the Mach port on which +the dedicated thread receives its messages does not exist in the child +either. + +_`.analysis.fork.mach-port`: On macOS, the MPS identifies threads via +their Mach port numbers, which are stashed in the ``ThreadStruct`` and +used to identify the current thread, for example in +``ThreadSuspend()``. But in the child process after ``fork()`` the +running thread has a different Mach port number than it did in the +parent. + + +Design +------ + +_`.sol.locks`: Use MPS locks (design.mps.lock_) to implement the +locking. + +.. _design.mps.lock: lock + +_`.sol.arena`: Each arena has a binary lock that protects the +non-shared data for that arena. Functions in the public interface fall +into the following categories: + +- _`.sol.arena.entry`: Must be called with the arena lock not held + (thus, these functions are not callable from format methods and + other callbacks). Claims arena binary lock on entry, releases it on + exit. The usual case. For example, ``mps_arena_park()``. + +- _`.sol.arena.recursive`: May be called with the arena lock held (for + example, from format methods and other callbacks). Claim arena lock + recursively on entry, release it on exit. For example, + ``mps_addr_fmt()``. + +- _`.sol.arena.lock-free`: May be called at any time and does not + claim or release any locks, because it is documented as being up to + the client program to ensure thread safety (for example, + ``mps_ld_add()``). + +- _`.sol.arena.maybe-entry`: Must be called with the arena lock not + held. In the common case, does not claim or release any locks + (because it is documented as being up to the client program to + ensure thread safety, as for `.sol.arena.lock-free`_), but may need + to claim and release the arena binary lock (as for + `.sol.arena.entry`_). For example, ``mps_reserve()``, + ``mps_commit()``, ``mps_ap_frame_push()``, and + ``mps_ap_frame_pop()``. + +_`.sol.global.mutable`: There is a global binary lock (see +design.mps.lock.req.global.binary_) that protects mutable data shared +between all arenas (that is, the arena ring lock: see +design.mps.arena.static.ring.lock_). + +.. _design.mps.lock.req.global.binary: lock#.req.global.binary +.. _design.mps.arena.static.ring.lock: arena#.static.ring.lock + +_`.sol.global.once`: There is a global recursive lock (see +design.mps.lock.req.global.recursive_) that protects static data which +must be initialized at most once (that is, the protocol classes). Each +static data structure is accessed only via an "ensure" function that +claims the global recursive lock, checks to see if the data structure +has been initialized yet, and does so if necessary (see +design.mps.protocol.impl.define-class.lock_). + +.. _design.mps.lock.req.global.recursive: lock#.req.global.recursive +.. _design.mps.protocol.impl.define-class.lock: protocol#.impl.define-class.lock + +_`.sol.deadlock`: A strict ordering is required between the global and +arena locks to prevent deadlock. The binary global lock may not be +claimed while either the arena or recursive global lock is held; the +arena lock may not be claimed while the recursive global lock is held. +Each arena lock is independent of all other arena locks; that is, a +thread may not attempt to claim more than one arena lock at a time. +See design.mps.arena.lock.avoid_. + +.. _design.mps.arena.lock.avoid: arena#.lock.avoid + +_`.sol.check`: The MPS interface design requires that a function must +check the signatures on the data structures pointed to by its +parameters (see design.mps.sig.check.arg_). In particular, for +functions in the class `.sol.arena.entry`_ it is necessary to check +some data structure signatures before taking the arena lock. The +checking interface provides a ``TESTT()`` macro that checks the +signature in a thread-safe way (see +design.mps.sig.check.arg.unlocked_). + +.. _design.mps.sig.check.arg: sig#.check.arg +.. _design.mps.sig.check.arg.unlocked: sig#.check.arg.unlocked + + +Fork safety +----------- + +_`.sol.fork.atfork`: The MPS solves the fork-safety problems by +calling |pthread_atfork|_ to install handler functions that are +called in the parent process just before fork (the "prepare" handler), +and in the parent and child processes just after fork (the "parent" +and "child" handlers respectively). + +.. |pthread_atfork| replace:: ``pthread_atfork()`` +.. _pthread_atfork: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html + +_`.sol.fork.lock`: In the prepare handler, the MPS takes all the +locks: that is, the global locks, and then the arena lock for every +arena. Note that a side-effect of this is that the shield is entered +for each arena. In the parent handler, the MPS releases all the locks. +In the child handler, the MPS would like to release the locks but this +does not work on any supported platform, so instead it reinitializes +them, by calling ``LockInitGlobal()``. + +_`.sol.fork.thread`: On macOS, in the prepare handler, the MPS +identifies for each arena the current thread, that is, the one calling +``fork()`` which will survive into the child process, and marks this +thread by setting a flag in the appropriate ``ThreadStruct``. In the +parent handler, this flag is cleared. On all Unix platforms, in the +child handler, all threads (except for the current thread) are marked +as dead and transferred to the ring of dead threads. (The MPS can't +destroy the thread structures at this point because they are owned by +the client program.) + +_`.sol.fork.exc-thread`: On macOS, in the child handler, the exception +port and dedicated thread are re-created, and the current thread +re-registered with the exception port. + +_`.sol.fork.mach-port`: On macOS, in the child handler, the thread +flagged as forking gets its port number updated. + + +Document History +---------------- + +- 1995-10-03 David Moore. Incomplete design. + +- 2002-06-21 RB_ Converted from MMInfo database design document. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +- 2018-06-14 GDR_ Added fork safety design. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/trace.txt b/mps/design/trace.txt new file mode 100644 index 00000000000..891352bfd15 --- /dev/null +++ b/mps/design/trace.txt @@ -0,0 +1,321 @@ +.. mode: -*- rst -*- + +Tracer +====== + +:Tag: design.mps.trace +:Author: David Jones +:Date: 1996-09-25 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: tracer; design + + +Introduction +------------ + +.. warning:: + + This document is currently a mixture of very old design notes (the + preformatted section immediately following) and some newer stuff. + It doesn't yet form anything like a complete picture. + + +Architecture +------------ + +_`.instance.limit`: There is a limit on the number of traces that can +be created at any one time. This limits the number of concurrent +traces. This limitation is expressed in the symbol ``TraceLIMIT``. + +.. note:: + + ``TraceLIMIT`` is currently set to 1 as the MPS assumes in various + places that only a single trace is active at a time. See + request.mps.160020_ "Multiple traces would not work". David Jones, + 1998-06-15. + +.. _request.mps.160020: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/mps/160020 + +_`.rate`: See `mail.nickb.1997-07-31.14-37`_. + +.. _mail.nickb.1997-07-31.14-37: https://info.ravenbrook.com/project/mps/mail/1997/07/31/14-37/0.txt + +.. note:: + + Now revised? See request.epcore.160062_ and + change.epcore.minnow.160062. David Jones, 1998-06-15. + +.. _request.epcore.160062: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/160062 + +_`.exact.legal`: Exact references must either point outside the arena +(to non-managed address space) or to a tract allocated to a pool. +Exact references that are to addresses which the arena has reserved +but hasn't allocated memory to are illegal (such a reference cannot +possibly refer to a real object, and so cannot be exact). We check +that this is the case in ``TraceFix()``. + +.. note:: + + Depending on the future semantics of ``PoolDestroy()`` we might + need to adjust our strategy here. See `mail.dsm.1996-02-14.18-18`_ + for a strategy of coping gracefully with ``PoolDestroy()``. + + .. _mail.dsm.1996-02-14.18-18: https://info.ravenbrook.com/project/mps/mail/1996/02/14/18-18/0.txt + +_`.fix.fixed.all`: ``ss->fixedSummary`` is accumulated (in +``TraceFix()``) for all pointers, whether or not they are genuine +references. We could accumulate fewer pointers here; if a pointer +fails the ``TractOfAddr()`` test then we know it isn't a reference, so +we needn't accumulate it into the fixed summary. The design allows +this, but it breaks a useful post-condition on scanning (if the +accumulation of ``ss->fixedSummary`` was moved the accuracy of +``ss->fixedSummary`` would vary according to the "width" of the white +summary). See `mail.pekka.1998-02-04.16-48`_ for improvement suggestions. + +.. _mail.pekka.1998-02-04.16-48: https://info.ravenbrook.com/project/mps/mail/1998/02/04/16-48/0.txt + + +Analysis +-------- + +_`.fix.copy-fail`: Fixing can always succeed, even if copying the +referenced object has failed (due to lack of memory, for example), by +backing off to treating a reference as ambiguous. Assuming that fixing +an ambiguous reference doesn't allocate memory (which is no longer +true for AMC for example). See request.dylan.170560_ for a slightly +more sophisticated way to proceed when you can no longer allocate +memory for copying. + +.. _request.dylan.170560: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170560 + + +Ideas +----- + +_`.flip.after`: To avoid excessive barrier impact on the mutator +immediately after flip, we could scan during flip other objects which +are "near" the roots, or otherwise known to be likely to be accessed +in the near future. + + +Implementation +-------------- + +Speed +..... + +_`.fix`: The function implementing the fix operation should be called +``TraceFix()`` and this name is pervasive in the MPS and its documents +to describe this function. Nonethless, optimisation and strict +aliasing rules have meant that we need to use the external name for +it, ``_mps_fix2()``. + +_`.fix.speed`: The fix path is critical to garbage collection speed. +Abstractly, the fix operation is applied to all references in the +non-white heap and all references in the copied heap. Remembered sets +cut down the number of segments we have to scan. The zone test cuts +down the number of references we call fix on. The speed of the +remainder of the fix path is still critical to system performance. +Various modifications to and aspects of the system are concerned with +maintaining the speed along this path. See +`design.mps.critical_path`_. + +.. _design.mps.critical_path: critical_path + +_`.fix.tractofaddr`: A reference that passes the zone test is then +looked up to find the tract it points to, an operation equivalent to +calling ``TractOfAddr()``. + +_`.fix.tractofaddr.inline`: ``TraceFix()`` doesn't actually call +``TractOfAddr()``. Instead, it expands this operation inline (calling +``ChunkOfAddr()``, then ``INDEX_OF_ADDR()``, checking the appropriate +bit in the chunk's ``allocTable``, and finally looking up the tract in +the chunk's page table). The reason for inlining this code is that we +need to know whether the reference points to a chunk (and not just +whether it points to a tract) in order to check the `.exact.legal`_ +condition. + +_`.fix.whiteseg`: The reason for looking up the tract is to determine +whether the reference is to a white segment. + +.. note:: + + It is likely to be more efficient to maintain a separate lookup + table from address to white segment, rather than indirecting + through the chunk and the tract. See job003796_. + +.. _job003796: https://www.ravenbrook.com/project/mps/issue/job003796/ + +_`.fix.noaver`: ``AVER()`` statements in the code add bulk to the code +(reducing I-cache efficacy) and add branches to the path (polluting +the branch pedictors) resulting in a slow down. Replacing the +``AVER()`` statements with ``AVER_CRITICAL()`` on the critical path +improves the overall speed of the Dylan compiler by as much as 9%. See +`design.mps.critical_path`_. + +_`.fix.nocopy`: ``amcSegFix()`` used to copy objects by using the +format's copy method. This involved a function call (through an +indirection) and in ``dylan_copy`` a call to ``dylan_skip`` (to +recompute the length) and call to ``memcpy`` with general parameters. +Replacing this with a direct call to ``memcpy`` removes these +overheads and the call to ``memcpy`` now has aligned parameters. The +call to ``memcpy`` is inlined by the C compiler. This change results +in a 4–5% speed-up in the Dylan compiler. + +_`.reclaim`: Because the reclaim phase of the trace (implemented by +``TraceReclaim()``) examines every segment it is fairly time +intensive. Richard Tucker's profiles presented in +request.dylan.170551_ show a gap between the two varieties variety.hi +and variety.wi. + +.. _request.dylan.170551: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170551 + +_`.reclaim.noaver`: Accordingly, reclaim methods use +``AVER_CRITICAL()`` instead of ``AVER()``. + + +Life cycle of a trace object +---------------------------- + +``TraceCreate()`` creates a trace in state ``TraceINIT`` + +Some segments get condemned (made white). + +``TraceStart()`` gets called which: + +- Derives an initial reference partition based on the existing + white set. The white zone set and the segments' summaries are used to + create an initial grey set. + +- Emits a ``GCStart()`` message. + +- Initialises ``trace->rate`` by estimating the required scanning + rate. + +- Moves the trace into the state ``TraceUNFLIPPED``. + +- Immediately calls ``traceFlip`` which flips the trace and moves + it into state ``TraceFLIPPED``. + +Whilst a trace is alive every so often its ``TraceAdvance()`` method +gets invoked (via ``TracePoll()``) in order to do a step of tracing +work. ``TraceAdvance()`` is responsible for ticking through the trace's +top-level state machine. Most of the interesting work, the tracing, +happens in the ``TraceFLIPPED`` state. + +The trace transitions through its states in the following sequence: +``TraceINIT`` → (``TraceUNFLIPPED``) → ``TraceFLIPPED`` → +``TraceRECLAIM`` → ``TraceFINISHED``. + +Whilst ``TraceUNFLIPPED`` appears in the code, no trace does any work +in this state; all traces are immediately flipped to be in the +``TraceFLIPPED`` state (see above). + +Once the trace is in the ``TraceFINISHED`` state it performs no more +work and it can be safely destroyed. Generally the callers of +``TraceAdvance()`` will destroy the trace. + + +Making progress: scanning grey segments +....................................... + +Most of the interesting work of a trace, the actual tracing, happens +in the ``TraceFLIPPED`` state (work *would* happen in the +``TraceUNFLIPPED`` state, but that is not implemented). + +The tracer makes progress by choosing a grey segment to scan, and +scanning it. The actual scanning is performed by pools. + +Note that at all times a reference partition is maintained. + +The order in which the trace scans things determines the semantics of +certain types of references (in particular, weak and final +references). Or, to put it another way the desired semantics of weak +and final references impose certain restrictions on the order in which +the trace can scan things. + +.rank: The tracer uses a system of *reference ranks* (or just ranks) +so that it can impose an order on its scanning work. The ranks are +ordered. [TODO: Explain how ordering is also required for transforms. +See impl.c.trans.rank-order. RB 2023-06-16] + +The tracer proceeds band by band. The first band is all objects it can +reach by following references of the first rank. The second band is +all subsequent objects it can reach by following references of the +second and first ranks. The third band is all subsequent objects it +can reach by following references of the third, second, and first +ranks. And so on. The description of the tracer working like this +originated in [RHSK_2007-06-25]_. + +A trace keeps track of which band it is tracing. This is returned by +the ``TraceBand()`` method. Keeping this band information helps it +implement the semantics of finalization and weakness. The band used to +not be explicitly stored, but this hindered the implementation of good +finalization semantics (in some circumstances finalization messages +were delayed by at least one collection cycle: see job001658_). + +.. _job001658: https://info.ravenbrook.com/project/mps/issue/job001658/ + +The band is used when selecting a grey segment to scan (the selection +occurs in ``traceFindGrey()``). The tracer attempts to first find +segments whose rank is the current band, then segments whose rank is +previous to the current band, and so on. If there are no segments +found then the current band is exhausted and the current band is +incremented to the next rank. When the current band is moved through +all the ranks in this fashion there is no more tracing to be done. + + + +References +---------- + +.. [RHSK_2007-06-25] + "The semantics of rank-based tracing"; + Richard Kistruck; Ravenbrook Limited; 2007-06-25; + . + + +Document History +---------------- + +- 1996-09-25 David Jones. Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2007-07-02 David Jones. Added notes on tracer progress. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/transform.txt b/mps/design/transform.txt new file mode 100644 index 00000000000..0975215a013 --- /dev/null +++ b/mps/design/transform.txt @@ -0,0 +1,158 @@ +.. mode: -*- rst -*- + + +Transforms +========== + +:Tag: design.mps.transform +:Author: Richard Brooksby +:Date: 2012-09-04 +:Status: complete +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: + pair: transforms; design + + +Introduction +------------ + +This document describes the Transform mechanism of the Memory Pool System. +Transforms allow the client code to replace a set of object references on the +heap. + +The readership of this document is any developer intending to modify the +Transform implementation. + + +Background +---------- + +Göran Rydqvist of Configura originally expressed the requirement for the +MPS to support the change of layout of objects in CET [GR_2010-02-25]_. +Ravenbrook proposed several methods [RHSK_2010-09-21]_ including: + + If you need to add fields, then use a special new MPS function (that + doesn't exist yet):: + + mps_arena_transform_objects(&my_transform_function); + + This traverses the object graph, lets your transform_function + basically ``realloc()`` the field-block, and MPS fixes up all + references from other objects to point to the new field-block. + + Unfortunately, this idea is probably killed off by ambiguous + references :-(. You could only run the patch if you could + *guarantee* there are no ambiguous refs you want. In other words, + any object refs on the stack would become instant death (or worse: + subtle slow death :-). Therefore we don't really like this idea + (unfortunately). There are safer and simpler ways to do it, we + think... + +which Configura selected [GR_2010-09-22]_. + +An initial implementation was made by RHSK and released to Configura as +"experimental", however Configura put it into production. + +During work on adapting the MPS to 64-bit Windows, RB reformed and +reimplemented transforms based on RHSK's original work. + + +Overview +-------- + +The client program builds a table mapping "old" references to "new" ones +in a ``Transform`` object. This is then "applied", causing a garbage +collection trace in which the fix function is substituted by +``transformFix()``, which spots "old" references and replaces them with +"new" ones, in addition to applying the usual garbage collection fix +function. + +This design was arrived at after some pain. The MPS isn't really +designed for generalized transformation of the object graph, and the +pools generally assume that they're doing a garbage collection when +they're asked to condemn, scan, fix, and reclaim stuff. This makes it +very hard to apply the transform without also doing a garbage +collection. Changing this would require a significant reworking of +the MPS to generalise its ideas, and would bloat the pool classes. + + +Not yet written +--------------- + +* Ambiguous references and aborting the transform. + +* How ambiguous references are avoided using ``arena->stackWarm``. + +* Why it does a garbage collection and not just a transforming scan. + [This is partly explained in Overview_ above. RB 2023-06-16] + +* Nice side-effect is that "old" objects are killed. + +* Why the arena must be parked [When writing this up see + impl.c.trans.park and impl.c.trans.assume.parked. RB 2023-06-16]. + +* Why we can't transform arbitrary references (see + impl.c.trans.old-white). + + +References +---------- + +.. [GR_2010-02-25] + "Incremental object" (e-mail); + Göran Rydqvist; Configura; 2010-02-25; + . + +.. [RHSK_2010-09-21] + "Incremental object ideas" (e-mail); + Richard Kistruck; Ravenbrook Limited; 2010-09-21; + . + +.. [GR_2010-09-22] + "Incremental object ideas" (e-mail); + Göran Rydqvist; Configura; 2010-09-22; + . + + +Document History +---------------- + +- 2012-09-04 RB_ First draft. + +- 2022-01-23 GDR_ Converted to reStructuredText. + +- 2023-06-16 RB_ Updated and improved in order to make Transforms part + of the public MPS. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2012–2023 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/type.txt b/mps/design/type.txt new file mode 100644 index 00000000000..a3894d199bb --- /dev/null +++ b/mps/design/type.txt @@ -0,0 +1,722 @@ +.. mode: -*- rst -*- + +General MPS types +================= + +:Tag: design.mps.type +:Author: Richard Brooksby +:Date: 1996-10-23 +:Status: complete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: general types; design + + +Introduction +------------ + +_`.intro`: See impl.h.mpmtypes. + + +Rationale +--------- + +Some types are declared to resolve a point of design, such as the best +type to use for array indexing. + +Some types are declared so that the intention of code is clearer. For +example, ``Byte`` is necessarily ``unsigned char``, but it's better to +say ``Byte`` in your code if it's what you mean. + + +Concrete types +-------------- + +``typedef unsigned AccessSet`` + +_`.access-set`: An ``AccessSet`` is a bitset of ``Access`` modes, +which are ``AccessREAD`` and ``AccessWRITE``. ``AccessSetEMPTY`` is +the empty ``AccessSet``. + + +``typedef struct AddrStruct *Addr`` + +_`.addr`: ``Addr`` is the type used for "managed addresses", that is, +addresses of objects managed by the MPS. + +_`.addr.def`: ``Addr`` is defined as ``struct AddrStruct *``, but +``AddrStruct`` is never defined. This means that ``Addr`` is always an +incomplete type, which prevents accidental dereferencing, arithmetic, +or assignment to other pointer types. + +_`.addr.use`: ``Addr`` should be used whenever the code needs to deal +with addresses. It should not be used for the addresses of memory +manager data structures themselves, so that the memory manager remains +amenable to working in a separate address space. Be careful not to +confuse ``Addr`` with ``void *``. + +_`.addr.ops`: Limited arithmetic is allowed on addresses using +``AddrAdd()`` and ``AddrOffset()`` (impl.c.mpm). Addresses may also be +compared using the relational operators ``==``, ``!=``, ``<``, ``<=``, +``>``, and ``>=``. + +_`.addr.ops.mem`: We need efficient operators similar to ``memset()``, +``memcpy()``, and ``memcmp()`` on ``Addr``; these are called ``AddrSet()``, +``AddrCopy()``, and ``AddrComp()``. When ``Addr`` is compatible with +``void *``, these are implemented through the functions +``mps_lib_memset()``, ``mps_lib_memcpy()``, and ``mps_lib_memcmp()`` +functions in the plinth (impl.h.mpm). + +_`.addr.conv.c`: ``Addr`` is converted to ``mps_addr_t`` in the MPS C +Interface. ``mps_addr_t`` is defined to be the same as ``void *``, so +using the MPS C Interface confines the memory manager to the same +address space as the client data. + +_`.addr.readonly`: For read-only addresses, see `.readonlyaddr`_. + + +``typedef Word Align`` + +_`.align`: ``Align`` is an unsigned integral type which is used to +represent the alignment of managed addresses. All alignments are +positive powers of two. ``Align`` is large enough to hold the maximum +possible alignment. + +_`.align.use`: ``Align`` should be used whenever the code needs to +deal with the alignment of a managed address. + +_`.align.conv.c`: ``Align`` is converted to ``mps_align_t`` in the MPS +C Interface. + + +``typedef unsigned Attr`` + +_`.attr`: Pool attributes. A bitset of pool class attributes, which +are: + +=================== =================================================== +Attribute Description +=================== =================================================== +``AttrGC`` Is garbage collecting, that is, parts may be + reclaimed. Used to decide which segments are + condemned. +``AttrMOVINGGC`` Is moving, that is, objects may move in memory. + Used to update the set of zones that might have + moved and so implement location dependency. +=================== =================================================== + +There is an attribute field in the pool class (``PoolClassStruct``) +which declares the attributes of that class. See +design.mps.pool.field.attr_. + +.. _design.mps.pool.field.attr: pool#.field.attr + + +``typedef int Bool`` + +_`.bool`: The ``Bool`` type is mostly defined so that the intention of +code is clearer. In C, Boolean expressions evaluate to ``int``, so +``Bool`` is in fact an alias for ``int``. + +_`.bool.value`: ``Bool`` has two values, ``TRUE`` and ``FALSE``. These +are defined to be ``1`` and ``0`` respectively, for compatibility with +C Boolean expressions (so one may set a ``Bool`` to the result of a C +Boolean expression). + +_`.bool.use`: ``Bool`` is a type which should be used when a Boolean +value is intended, for example, as the result of a function. Using a +Boolean type in C is a tricky thing. Non-zero values are "true" (when +used as control conditions) but are not all equal to ``TRUE``. Use +with care. + +_`.bool.check`: ``BoolCheck()`` simply checks whether the argument is +``TRUE`` (``1``) or ``FALSE`` (``0``). + +_`.bool.check.inline`: The inline macro version of ``BoolCheck`` casts +the ``int`` to ``unsigned`` and checks that it is ``<= 1``. This is +safe, well-defined, uses the argument exactly once, and generates +reasonable code. + +_`.bool.check.inline.smaller`: In fact we can expect that the "inline" +version of ``BoolCheck()`` to be smaller than the equivalent function +call. On IA-32 for example, a function call will be 3 instructions +(total 9 bytes), the inline code for ``BoolCheck()`` will be 1 +instruction (total 3 bytes) (both sequences not including the test +which is the same length in either case). + +_`.bool.check.inline.why`: As well as being smaller (see +`.bool.check.inline.smaller`_) it is faster. On 1998-11-16 drj +compared ``w3i3mv\hi\amcss.exe`` running with and without the macro +for ``BoolCheck`` on the PC Aaron. "With" ran in 97.7% of the time +(averaged over 3 runs). + +_`.bool.bitfield`: When a Boolean needs to be stored in a bitfield, +the type of the bitfield must be ``unsigned:1``, not ``Bool:1``. +(That's because the two values of the type ``Bool:1`` are ``0`` and +``-1``, which means that assigning ``TRUE`` would require a sign +conversion.) To make it clear why this is done, ``misc.h`` provides +the ``BOOLFIELD`` macro. + +_`.bool.bitfield.assign`: To avoid warnings about loss of data from +GCC with the ``-Wconversion`` option, ``misc.h`` provides the +``BOOLOF`` macro for coercing a value to an unsigned single-bit field. + +_`.bool.bitfield.check`: A Boolean bitfield cannot have an incorrect +value, and if you call ``BoolCheck()`` on such a bitfield then GCC 4.2 +issues the warning "comparison is always true due to limited range of +data type". When avoiding such a warning, reference this tag. + + +``typedef unsigned BufferMode`` + +_`.buffermode`: ``BufferMode`` is a bitset of buffer attributes. See +design.mps.buffer_. It is a sum of the following: + +.. _design.mps.buffer: buffer + +======================== ============================================== +Mode Description +======================== ============================================== +``BufferModeATTACHED`` Buffer is attached to a region of memory. +``BufferModeFLIPPED`` Buffer has been flipped. +``BufferModeLOGGED`` Buffer remains permanently trapped, so that + all reserve and commit events can be logged. +``BufferModeTRANSITION`` Buffer is in the process of being detached. +======================== ============================================== + + +``typedef unsigned char Byte`` + +_`.byte`: ``Byte`` is an unsigned integral type corresponding to the +unit in which most sizes are measured, and also the units of +``sizeof``. + +_`.byte.use`: ``Byte`` should be used in preference to ``char`` or +``unsigned char`` wherever it is necessary to deal with bytes +directly. + +_`.byte.source`: ``Byte`` is a just pedagogic version of ``unsigned +char``, since ``char`` is the unit of ``sizeof``. + + +``typedef Word Clock`` + +_`.clock`: ``Clock`` is an unsigned integral type representing clock +time since some epoch. + +_`.clock.use`: A ``Clock`` value is returned by the plinth function +``mps_clock``. It is used to make collection scheduling decisions and +to calibrate the time stamps on events in the telemetry stream. + +_`.clock.units`: The plinth function ``mps_clocks_per_sec`` defines +the units of a ``Clock`` value. + +_`.clock.conv.c`: ``Clock`` is converted to ``mps_clock_t`` in the MPS +C Interface. + + +``typedef unsigned Compare`` + +_`.compare`: ``Compare`` is the type of tri-state comparison +values. + +================== ==================================================== +Value Description +================== ==================================================== +``CompareLESS`` A value compares less than another value. +``CompareEQUAL`` Two values compare the same. +``CompareGREATER`` A value compares greater than another value. +================== ==================================================== + + +``typedef Word Count`` + +_`.count`: ``Count`` is an unsigned integral type which is large +enough to hold the size of any collection of objects in the MPS. + +_`.count.use`: ``Count`` should be used for a number of objects +(control or managed) where the maximum number of objects cannot be +statically determined. If the maximum number can be statically +determined then the smallest unsigned integer with a large enough +range may be used instead (although ``Count`` may be preferable for +clarity). + +_`.count.use.other`: ``Count`` may also be used to count things that +aren't represented by objects (for example, levels), but only where it +can be determined that the maximum count is less than the number of +objects. + + +``typedef Size Epoch`` + +_`.epoch`: An ``Epoch`` is a count of the number of flips that have +occurred, in which objects may have moved. It is used in the +implementation of location dependencies. + +``Epoch`` is converted to ``mps_word_t`` in the MPS C Interface, as a +field of ``mps_ld_s``. + + +``typedef unsigned FindDelete`` + +_`.finddelete`: ``FindDelete`` represents an instruction to one of the +*find* methods of a ``Land`` as to what it should do if it finds a +suitable block. See design.mps.land_. It takes one of the following +values: + +.. _design.mps.land: land + +==================== ================================================== +Value Description +==================== ================================================== +``FindDeleteNONE`` Don't delete after finding. +``FindDeleteLOW`` Delete from low end of block. +``FindDeleteHIGH`` Delete from high end of block. +``FindDeleteENTIRE`` Delete entire block. +==================== ================================================== + + +``typedef void (*Fun)(void)`` + +_`.fun`: ``Fun`` is the type of a pointer to a function about which +nothing more is known. + +_`.fun.use`: ``Fun`` should be used where it's necessary to handle a +function in a polymorphic way without calling it. For example, if you +need to write a function ``g`` which passes another function ``f`` +through to a third function ``h``, where ``h`` knows the real type of +``f`` but ``g`` doesn't. + + +``typedef Word Index`` + +_`.index`: ``Index`` is an unsigned integral type which is large +enough to hold any array index. + +_`.index.use`: ``Index`` should be used where the maximum size of the +array cannot be statically determined. If the maximum size can be +determined then the smallest unsigned integer with a large enough +range may be used instead. + + +``typedef unsigned LocusPrefKind`` + +_`.locusprefkind`: The type ``LocusPrefKind`` expresses a preference for +addresses within an address space. It takes one of the following +values: + +==================== ==================================== +Kind Description +==================== ==================================== +``LocusPrefHIGH`` Prefer high addresses. +``LocusPrefLOW`` Prefer low addresses. +``LocusPrefZONESET`` Prefer addresses in specified zones. +==================== ==================================== + + +``typedef unsigned MessageType`` + +_`.messagetype`: ``MessageType`` is the type of a message. See +design.mps.message_. It takes one of the following values: + +.. _design.mps.message: message + +=========================== =========================================== +Message type Description +=========================== =========================================== +``MessageTypeFINALIZATION`` A block is finalizable. +``MessageTypeGC`` A garbage collection finished. +``MessageTypeGCSTART`` A garbage collection started. +=========================== =========================================== + + +``typedef unsigned Rank`` + +_`.rank`: ``Rank`` is an enumeration which represents the rank of a +reference. The ranks are: + +============= ===== ================================================== +Rank Index Description +============= ===== ================================================== +``RankAMBIG`` 0 The reference is ambiguous. That is, it must be + assumed to be a reference, but not updated in + case it isn't. +``RankEXACT`` 1 The reference is exact, and refers to an object. +``RankFINAL`` 2 The reference is exact and final, so special + action is required if only final or weak + references remain to the object. +``RankWEAK`` 3 The reference is exact and weak, so should + be deleted if only weak references remain to the + object. +============= ===== ================================================== + +``Rank`` is stored with segments and roots, and passed around. + +``Rank`` is converted to ``mps_rank_t`` in the MPS C Interface. + +The ordering of the ranks is important. It is the order in which the +references must be scanned in order to respect the properties of +references of the ranks. Therefore they are declared explicitly with +their integer values. + +.. note:: Could ``Rank`` be an ``unsigned short`` or ``unsigned char``? + +.. note:: + + This documentation should be expanded and moved to its own + document, then referenced from the implementation more thoroughly. + + +``typedef unsigned RankSet`` + +_`.rankset`: ``RankSet`` is a set of ranks, represented as a bitset. + + +``typedef const struct AddrStruct *ReadonlyAddr`` + +_`.readonlyaddr`: ``ReadonlyAddr`` is the type used for managed +addresses that an interface promises it will only read through, never +write. Otherwise it is identical to ``Addr``. + + +``typedef Addr Ref`` + +_`.ref`: ``Ref`` is a reference to a managed object (as opposed to any +old managed address). ``Ref`` should be used where a reference is +intended. + +.. note:: This isn't too clear -- richard + + +``typedef Word RefSet`` + +_`.refset`: ``RefSet`` is a conservative approximation to a set of +references. See design.mps.collection.refsets_. + +.. _design.mps.collection.refsets: collection#.refsets + + +``typedef int Res`` + +_`.res`: ``Res`` is the type of result codes. A result code indicates +the success or failure of an operation, along with the reason for +failure. Like Unix error codes, the meaning of the code depends on the +call that returned it. These codes are just broad categories with +mnemonic names for various sorts of problems. + +=================== =================================================== +Result code Description +=================== =================================================== +``ResOK`` The operation succeeded. Return parameters may only + be updated if OK is returned, otherwise they must + be left untouched. +``ResCOMMIT_LIMIT`` The arena's commit limit would have been exceeded + as a result of allocation. +``ResFAIL`` Something went wrong which doesn't fall into any of + the other categories. The exact meaning depends + on the call. See documentation. +``ResIO`` An I/O error occurred. Exactly what depends on the + function. +``ResLIMIT`` An internal limitation was reached. For example, + the maximum number of somethings was reached. We + should avoid returning this by not including + static limitations in our code, as far as + possible. (See rule.impl.constrain and + rule.impl.limits.) +``ResMEMORY`` Needed memory (committed memory, not address space) + could not be obtained. +``ResPARAM`` An invalid parameter was passed. Normally reserved + for parameters passed from the client. +``ResRESOURCE`` A needed resource could not be obtained. Which + resource depends on the call. See also + ``ResMEMORY``, which is a special case of this. +``ResUNIMPL`` The operation, or some vital part of it, is + unimplemented. This might be returned by + functions which are no longer supported, or by + operations which are included for future + expansion, but not yet supported. +=================== =================================================== + +_`.res.use`: ``Res`` should be returned from any function which might +fail. Any other results of the function should be passed back in +"return" parameters (pointers to locations to fill in with the +results). + +.. note:: This is documented elsewhere, I think -- richard + +_`.res.use.spec`: The most specific code should be returned. + + +``typedef unsigned RootMode`` + +_`.rootmode`: ``RootMode`` is an unsigned integral type which is used +to represent an attribute of a root: + +============================= ========================================= +Root mode Description +============================= ========================================= +``RootModeCONSTANT`` Client program will not change the root + after it is registered. +``RootModePROTECTABLE`` Root is protectable: the MPS may place + a barrier on any page containing any + part of the root. +``RootModePROTECTABLE_INNER`` Root is protectable: the MPS may place + a barrier on any page completely covered + by part of the root. +============================= ========================================= + +_`.rootmode.const.unused`: ``RootModeCONSTANT`` has no effect. This +mode was introduced in the hope of being able to maintain a remembered +set for the root without needing a write barrier, but it can't work as +described, since you can't reliably create a valid registered constant +root that contains any references. (If you add the references before +registering the root, they may have become invalid; but you can't add +them afterwards because the root is supposed to be constant.) + +_`.rootmode.conv.c`: ``RootMode`` is converted to ``mps_rm_t`` in the +MPS C Interface. + + +``typedef unsigned RootVar`` + +_`.rootvar`: The type ``RootVar`` is the type of the discriminator for +the union within ``RootStruct``. + + +``typedef unsigned Serial`` + +_`.serial`: A ``Serial`` is a number which is assigned to a structure +when it is initialized. The serial number is taken from a field in the +parent structure, which is incremented. Thus, every instance of a +structure has a unique "name" which is a path of structures from the +global root. For example, "the third arena's fifth pool's second +buffer". + +Why? Consistency checking, debugging, and logging. Not well thought +out. + + +``typedef unsigned Shift`` + +_`.shift`: ``Shift`` is an unsigned integral type which can hold the +amount by which a ``Word`` can be shifted. It is therefore large +enough to hold the word width (in bits). + +_`.shift.use`: ``Shift`` should be used whenever a shift value (the +right-hand operand of the ``<<`` or ``>>`` operators) is intended, to +make the code clear. It should also be used for structure fields which +have this use. + +.. note:: Could ``Shift`` be an ``unsigned short`` or ``unsigned char``? + + +``typedef unsigned long Sig`` + +_`.sig`: ``Sig`` is the type of signatures, which are written into +structures when they are created, and invalidated when they are +destroyed. They provide a limited form of run-time type checking and +dynamic scope checking. See design.mps.sig_. + +.. _design.mps.sig: sig + + +``typedef Word Size`` + +_`.size`: ``Size`` is an unsigned integral type large enough to +hold the size of any object which the MPS might manage. + +_`.size.byte`: ``Size`` should hold a size calculated in bytes. + +.. warning:: + + This is violated by ``GenParams.capacity`` (which is measured in + kilobytes). + +_`.size.use`: ``Size`` should be used whenever the code needs to deal +with the size of managed memory or client objects. It should not be +used for the sizes of the memory manager's own data structures, so +that the memory manager is amenable to working in a separate address +space. Be careful not to confuse it with ``size_t``. + +_`.size.ops`: ``SizeIsAligned()``, ``SizeAlignUp()``, +``SizeAlignDown()`` and ``SizeRoundUp()``. + +_`.size.conv.c`: ``Size`` is converted to ``size_t`` in the MPS C +Interface. This constrains the memory manager to the same address +space as the client data. + + +``typedef unsigned TraceId`` + +_`.traceid`: A ``TraceId`` is an unsigned integer which is less than +``TraceLIMIT``. Each running trace has a different ``TraceId`` which +is used to index into the tables and bitfields that record the state +of that trace. See design.mps.trace.instance.limit_. + +.. _design.mps.trace.instance.limit: trace#.instance.limit + + +``typedef unsigned TraceSet`` + +_`.traceset`: A ``TraceSet`` is a bitset of ``TraceId``, +represented in the obvious way:: + + member(ti, ts) ⇔ ((1<`_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/version-library.txt b/mps/design/version-library.txt new file mode 100644 index 00000000000..36ee133e524 --- /dev/null +++ b/mps/design/version-library.txt @@ -0,0 +1,175 @@ +.. mode: -*- rst -*- + +Library version mechanism +========================= + +:Tag: design.mps.version-library +:Author: David Jones +:Date: 1998-08-19 +:Status: complete document +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: library version mechanism; design + + +Introduction +------------ + +_`.intro`: This describes the design of a mechanism to be used to +determine the version (that is, product, version, and release) of an +MPS library. + + +Readership +---------- + +_`.readership`: Any MPS developer. + + +Source +------ + +_`.source`: Various requirements demand such a mechanism. See +request.epcore.160021_: There is no way to tell which version and +release of the MM one is using. + +.. _request.epcore.160021: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/160021 + +Overview +-------- + +_`.overview`: This is the design for determining which version of the +library one has linked against. There are two aspects to the design, +allowing humans to determine the version of an MPS library, and +allowing programs to determine the version of an MPS library. Only the +former is currently designed (a method for humans to determine which +version of an MPS library is being used). + +_`.overview.impl`: The overall design is to have a distinctive string +compiled into the library binary. Various programs and tools will be +able to extract the string and display it. The string will identify +the version of the MPS begin used. + + +Architecture +------------ + +_`.arch.structure`: The design consists of two components: + +#. _`.arch.string`: A string embedded into any delivered library + binaries (which will encode the necessary information). + +#. _`.arch.proc`: Procedures by which the string is modified + appropriately whenever releases are made. + +#. _`.arch.tool`: A tool and its documentation (it is expected that + standard tools can be used). The tool will be used to extract the + version string from a delivered library or an executable linked + with the library. + +The string will contain information to identify the following items: + +#. _`.arch.string.platform`: the platform being used. + +#. _`.arch.string.product`: the name of the product. + +#. _`.arch.string.variety`: the variety of the product. + +#. _`.arch.string.version`: the version and release of the product. + + +Implementation +-------------- + +_`.impl.file`: The version string itself is a declared C object +``MPSVersionString`` in the file ``version.c`` (impl.c.version). It +consists of a concatenation of various strings which are defined in +other modules. + +_`.impl.variety`: The string containing the name of the variety is the +expansion of the macro ``MPS_VARIETY_STRING`` defined by ``config.h`` +(impl.h.config). + +_`.impl.product`: The string containing the name of the product is the +expansion of the macro ``MPS_PROD_STRING`` defined by ``config.h`` +(impl.h.config). Note that there is now only one product, so this is +always ``"mps"`` (see design.mps.config.req.prod_). + +.. _design.mps.config.req.prod: config#.req.prod + +_`.impl.platform`: The string containing the name of the platform is +the expansion of the macro ``MPS_PF_STRING`` defined by ``mpstd.h`` +(impl.h.mpstd). + +_`.impl.date`: The string contains the date and time of compilation by +using the ``__DATE__`` and ``__TIME__`` macros defined by ISO C +§6.8.8. + +_`.impl.version`: The string contains the version and release of the +product. This is by the expansion of the macro ``MPS_RELEASE`` which +is defined in this module (``version.c``). + +_`.impl.proc`: The ``MPS_RELEASE`` macro (see impl.c.version.release) +is edited after making a release so that it contains the name of the +next release to be made from the sources on that branch. For example, after +making version 1.117, the source on the master branch is updated to say:: + + #define MPS_RELEASE "release/1.118.0" + +and after making release 1.117.0, the source on the version/1.117 branch is updated to say:: + + #define MPS_RELEASE "release/1.117.1" + +See the version creation and release build procedures respectively. + +_`.impl.tool`: The version string starts with the characters +``"@(#)"``. This is recognized by the standard Unix utility |what|_. For example:: + + $ what mps.a + mps.a + Ravenbrook MPS, product.mps, release/1.117.0, platform.xci6ll, variety.asserted.logging.nonstats, compiled on Oct 18 2016 13:57:08 + +.. |what| replace:: ``what(1)`` +.. _what: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/what.html + + +Document History +---------------- + +- 1998-08-19 David Jones. Incomplete document. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-03-11 GDR_ Converted to reStructuredText. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/vm.txt b/mps/design/vm.txt new file mode 100644 index 00000000000..457959b83ce --- /dev/null +++ b/mps/design/vm.txt @@ -0,0 +1,421 @@ +.. mode: -*- rst -*- + +Virtual mapping +=============== + +:Tag: design.mps.vm +:Author: richard +:Date: 1998-05-11 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: virtual mapping; design + + +Introduction +------------ + +_`.intro`: This is the design of the virtual mapping module. + +_`.readership`: Any MPS developer; anyone porting the MPS to a new +platform. + +_`.overview`: The virtual mapping module provides a simple, portable, +low-level interface to address space, with functions for reserving, +releasing, mapping and unmapping ranges of addresses. + +_`.motivation`: The virtual mapping module is heavily used by the VM +Arena Class (see design.mps.arena.vm_). + +.. _design.mps.arena.vm: arenavm + + +Requirements +------------ + +_`.req.granularity`: The virtual mapping module must report the +*granularity* with which address space can be managed. (This is +necessary for the arena to be able to portably determine its grain +size; see design.mps.arena.def.grain_.) + +.. _design.mps.arena.def.grain: arena#.def.grain + +_`.req.reserve`: The *reserve* operation must reserves a chunk of +address space. + +_`.req.reserve.exclusive`: The MPS should have exclusive use of the +reserved chunk. (None of our supported operating systems can actually +provide this feature, alas. We rely on co-operation with the client +program.) + +_`.req.reserve.contiguous`: The reserved chunk is a *contiguous* +portion of address space. (Contiguity is needed for zones to work; see +design.mps.arena.vm.overview.gc.zone_.) + +.. _design.mps.arena.vm.overview.gc.zone: arenavm#overview.gc.zone + +_`.req.reserve.size`: The reserved chunk is at least a *specified +size*. (This is necessary for zones to work.) + +_`.req.reserve.align`: The reserved chunk is aligned to a *specified +alignment*. (This is necessary for the arena to be able to manage +address space in terms of grains.) + +_`.req.reserve.overhead`: The reserved chunk is not much larger than +specified, preferably with no more than a grain of overhead. (This is +necessary in order to allow the client program to specify the amount +of address space the MPS uses, so that it can co-operate with other +subsystems that use address space.) + +.. _design.mps.arena.vm.overview.gc.zone: arenavm#overview.gc.zone + +_`.req.reserve.address.not`: There is no requirement to be able to +reserve address space at a particular address. (The zone +implementation uses bits from the middle of the address, so can cope +wherever the portion is placed in the address space.) + +_`.req.reserve.map.not`: The reserve operation should not map the +chunk into main memory or swap space. (The zone strategy is most +efficient if address space is use sparsely, but main memory is a +limited resource.) + +_`.req.release`: The *release* operation should release a previously +reserved chunk of address space so that it may be used by other +subsystems of the client program. (This is needed to support client +programs on systems where address space is tight, and the client's +subsystems need to co-operate in their use of address space.) + +_`.req.reserved`: The virtual mapping module must report the total +amount of reserved memory in each chunk of address space. (This is +needed to implement ``mps_arena_reserved()``.) + +_`.req.map`: The *map* operation must arrange for a (previously +reserved) range of address space to be mapped into main memory or swap +space, so that addresses in the range can be read and written. + +_`.req.unmap`: The *unmap* operation should arrange for a previously +mapped range of address space to no longer be mapped into main memory +or swap space. (This is needed to support client programs on systems +where main memory is scarce, and the client's subsystems need to +co-operate in their use of main memory.) + +_`.req.mapped`: The virtual mapping module must maintain the total +amount of mapped memory in each chunk of address space. (This is +needed to allow the client program to limit the use of main memory by +the MPS via the "commit limit" mechanism.) + +_`.req.bootstrap`: The virtual mapping module must be usable without +allocating heap memory. (This is necessary for the VM arena to get off +the ground.) + +_`.req.params`: The interface should make it possible for MPS to allow +the client program to modify the behaviour of the virtual mapping +implementation. (This is needed to implement the +``MPS_KEY_VMW3_MEM_TOP_DOWN`` keyword argument.) + +_`.req.prot.exec`: The virtual mapping module should allow mutators to +write machine code into memory allocated by the MPS and then execute +that code, for example, to implement just-in-time translation, or +other forms of dynamic compilation. Compare +design.mps.prot.req.prot.exec_. + +.. _design.mps.prot.req.prot.exec: prot#.req.prot.exec + + +Design +------ + +_`.sol.overhead`: To meet `.req.reserve.contiguous`_, +`.req.reserve.align`_ and `.req.reserve.overhead`_, most VM +implementations ask the operating system for ``size + grainSize - +pageSize`` bytes of address space. This ensures that wherever the +operating system places the reserved address space, it contains a +contiguous region of ``size`` bytes aligned to a multiple of +``grainSize``. The overhead is thus ``grainSize - pageSize``, and in +the common case where ``grainSize`` is equal to ``pageSize``, this is +zero. + +_`.sol.bootstrap`: To meet `.req.bootstrap`_, the interface provides +the function ``VMCopy()``. This allows the initialization of a +``VMChunk`` to proceed in four steps. First, allocate space for a +temporary VM descriptor on the stack. Second, call ``VMInit()`` to +reserve address space and initialize the temporary VM descriptor. +Third, call ``VMMap()`` on the new VM to map enough memory to store a +``VMChunk``. Fourth, call ``VMCopy()`` to copy the temporary VM +descriptor into its place in the ``VMChunk``. + +_`.sol.params`: To meet `.req.params`_, the interface provides the +function ``VMParamFromArgs()``, which decodes relevant keyword +arguments into a temporary buffer provided by the caller; this buffer +is then passed to ``VMInit()``. The size of the buffer must be +statically determinable so that the caller can allocate it on the +stack: it is given by the constant ``VMParamSize``. Since this is +potentially platform-dependent it is defined in ``config.h``. + +_`.sol.prot.exec`: The virtual mapping module maps memory as +executable, if this is supported by the platform. + + +Interface +--------- + +``typedef VMStruct *VM`` + +_`.if.vm`: ``VM`` is a descriptor for a reserved chunk of address +space. It points to a ``VMStruct`` structure, which is defined in +``vm.h`` so that it can be inlined in the ``VMChunkStruct`` by the VM +arena class. + +``Size PageSize(void)`` + +_`.if.page.size`: Return the "page size": that is, the granularity +with which the operating system can reserve and map address space. + +_`.if.page.size.cache`: On some systems (for example, Windows), +determining the page size requires a system call, so for speed the +page size is cached in each VM descriptor and should be retrieved by +calling the ``VMPageSize()`` function. + +``Res VMParamFromArgs(void *params, size_t paramSize, ArgList args)`` + +_`.if.param.from.args`: Decode the relevant keyword arguments in the +``args`` parameter, and store a description of them in the buffer +pointed to by ``params`` (which is ``paramSize`` bytes long). It is an +error if the buffer is not big enough store the parameters for the VM +implementation. + +``Res VMInit(VM vm, Size size, Size grainSize, void *params)`` + +_`.if.init`: Reserve a chunk of address space that contains at least +``size`` addresses, starting at an address which is a multiple of +``grainSize``. The ``params`` argument points to a parameter block +that was initialized by a call to ``VMParamFromArgs()``. If +successful, update ``vm`` to describe the reserved chunk, and +return ``ResOK``. Otherwise, return ``ResRESOURCE``. + +``void VMFinish(VM vm)`` + +_`.if.finish`: Release the chunk of address space described by ``vm``. +Any addresses that were mapped through this VM are now unmapped. + +``Res VMMap(VM vm, Addr base, Addr limit)`` + +_`.if.map`: Map the range of addresses from ``base`` (inclusive) to +``limit`` (exclusive) into main memory. It is an error if the range +does not lie between ``VMBase(vm)`` and ``VMLimit(vm)``, or if +``base`` and ``limit`` are not multiples of ``VMPageSize(vm)``. Return +``ResOK`` if successful, ``ResMEMORY`` otherwise. + +``void VMUnmap(VM vm, Addr base, Addr limit)`` + +_`.if.unmap`: Unmap the range of addresses from ``base`` (inclusive) +to ``limit`` (exclusive). The conditions are the same as for +``VMMap()``. + +``Addr VMBase(VM vm)`` + +_`.if.base`: Return the base address of the VM (the lowest address in +the VM that is a multiple of the grain size). + +``Addr VMLimit(VM vm)`` + +_`.if.limit`: Return the limit address of the VM (the limit of the +last grain that is wholly inside the VM). + +``Size VMReserved(VM vm)`` + +_`.if.reserved`: Return the amount of address space (in bytes) +reserved by the VM. This may include addresses that are not available +for mapping because of the requirement for ``VMBase(vm)`` and +``VMLimit(vm)`` to be multiples of the grain size. + +``Size VMMapped(VM vm)`` + +_`.if.mapped`: Return the amount of address space (in bytes) currently +mapped into memory by the VM. + +``void VMCopy(VM dest, VM src)`` + +_`.if.copy`: Copy the VM descriptor from ``src`` to ``dest``. + + +Implementations +--------------- + + +Generic implementation +...................... + +_`.impl.an`: In ``vman.c``. + +_`.impl.an.page.size`: The generic VM uses a fake page size, given by +the constant ``VMAN_PAGE_SIZE`` in ``config.h``. + +_`.impl.an.param`: Decodes no keyword arguments. + +_`.impl.an.reserve`: Address space is "reserved" by calling +``malloc()``. + +_`.impl.an.release`: Address space is "released" by calling +``free()``. + +_`.impl.an.map`: Mapping (and unmapping) just fills the mapped region +with copies of ``VMJunkBYTE`` to emulate the erasure of freshly mapped +pages by virtual memory systems. + + +Unix implementation +................... + +_`.impl.ix`: In ``vmix.c``. + +_`.impl.ix.page.size`: The page size is given by +``sysconf(_SC_PAGESIZE)``. We avoid ``getpagesize()``, which is a +legacy function in Posix: + + Applications should use the sysconf() function instead. + + — `The Single UNIX ® Specification, Version 2 `__ + +_`.impl.ix.param`: Decodes no keyword arguments. + +_`.impl.ix.reserve`: Address space is reserved by calling |mmap|_, +passing ``PROT_NONE`` and ``MAP_PRIVATE | MAP_ANON``. + +.. |mmap| replace:: ``mmap()`` +.. _mmap: https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html + +_`.impl.ix.anon.trans`: Note that ``MAP_ANON`` ("map anonymous +memory not associated with any specific file") is an extension to +POSIX, but it is supported by FreeBSD, Linux, and macOS. A work-around +that was formerly used on systems lacking ``MAP_ANON`` was to map +the file ``/dev/zero``. + +_`.impl.ix.release`: Address space is released by calling |munmap|_. + +.. |munmap| replace:: ``munmap()`` +.. _munmap: https://pubs.opengroup.org/onlinepubs/9699919799/functions/munmap.html + +_`.impl.ix.map`: Address space is mapped to main memory by calling +|mmap|_, passing ``PROT_READ | PROT_WRITE | PROT_EXEC`` and +``MAP_ANON | MAP_PRIVATE | MAP_FIXED``. + +_`.impl.ix.unmap`: Address space is unmapped from main memory by +calling |mmap|_, passing ``PROT_NONE`` and ``MAP_ANON | MAP_PRIVATE | +MAP_FIXED``. + +_`.impl.xc.prot.exec`: The approach in `.sol.prot.exec`_ of always +making memory executable causes a difficulty on macOS on Apple +Silicon. The virtual mapping module uses the same solution as the +protection module, that is, detecting Apple Hardened Runtime, and +retrying without the request for the memory to be executable. See +design.mps.prot.impl.xc.prot.exec_ for details. + +.. _design.mps.prot.impl.xc.prot.exec: prot#.impl.xc.prot.exec + + +Windows implementation +...................... + +_`.impl.w3`: In ``vmw3.c``. + +_`.impl.w3.page.size`: The page size is retrieved by calling +|GetSystemInfo|_ and consulting ``SYSTEMINFO.dwPageSize``. + +.. |GetSystemInfo| replace:: ``GetSystemInfo()`` +.. _GetSystemInfo: https://docs.microsoft.com/en-us/windows/desktop/api/sysinfoapi/nf-sysinfoapi-getsysteminfo + +_`.impl.w3.param`: Decodes the keyword argument +``MPS_KEY_VMW3_MEM_TOP_DOWN``, and if it is set, arranges for +``VMInit()`` to pass the ``MEM_TOP_DOWN`` flag to |VirtualAlloc|_. + +_`.impl.w3.reserve`: Address space is reserved by calling +|VirtualAlloc|_, passing ``MEM_RESERVE`` (and optionally +``MEM_TOP_DOWN``) and ``PAGE_NOACCESS``. + +.. |VirtualAlloc| replace:: ``VirtualAlloc()`` +.. _VirtualAlloc: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366887.aspx + +_`.impl.w3.release`: Address space is released by calling +|VirtualFree|_, passing ``MEM_RELEASE``. + +.. |VirtualFree| replace:: ``VirtualFree()`` +.. _VirtualFree: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366892.aspx + +_`.impl.w3.map`: Address space is mapped to main memory by calling +|VirtualAlloc|_, passing ``MEM_COMMIT`` and +``PAGE_EXECUTE_READWRITE``. + +_`.impl.w3.unmap`: Address space is unmapped from main memory by +calling |VirtualFree|_, passing ``MEM_DECOMMIT``. + + +Testing +------- + +_`.testing`: It is important to test that a VM implementation works in +extreme cases. + +_`.testing.large`: It must be able to reserve a large address space. +Clients will want multi-GB spaces, more than that OSs will allow. If +they ask for too much, ``mps_arena_create()`` (and hence +``VMInit()``) must fail in a predictable way. + +_`.testing.larger`: It must be possible to allocate in a large space; +sometimes committing will fail, because there's not enough space to +replace the "reserve" mapping. See request.epcore.160201_ for details. + +.. _request.epcore.160201: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/160201 + +_`.testing.lots`: It must be possible to have lots of mappings. The OS +must either combine adjacent mappings or have lots of space in the +kernel tables. See request.epcore.160117_ for ideas on how to test +this. + +.. _request.epcore.160117: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/epcore/160117 + + +Document History +---------------- + +- 1998-05-11 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-23 GDR_ Converted to reStructuredText. + +- 2014-06-16 GDR_ Document the whole interface. + +- 2014-10-22 GDR_ Refactor module description into requirements. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/walk.txt b/mps/design/walk.txt new file mode 100644 index 00000000000..e92e6b416a5 --- /dev/null +++ b/mps/design/walk.txt @@ -0,0 +1,157 @@ +.. mode: -*- rst -*- + +Walking formatted objects +========================= + +:Tag: design.mps.walk +:Author: Gareth Rees +:Date: 2020-08-31 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: walk; design + + +Introduction +------------ + +_`.intro`: This is the design of the formatted objects walk interface. +The intended audience is MPS developers. + +_`.source`: Based on [GDR_2020-08-30]_. + + +Use cases +--------- + +_`.case.reload`: A language runtime that offers hot reloading of code +will need to walk all objects belonging to a class (say) in order to +modify the references in the objects so they refer to the updated +class definition. [Strömbäck_2020-08-20]_ + +_`.case.serialize`: A language runtime that offers serialization and +deserialization of the heap will need to walk all formatted objects in +order to identify references to globals (during serialization) and +modify references to refer to the new locations of the globals (after +deserialization). [GDR_2018-08-30]_ + + +Requirements +------------ + +_`.req.walk.all`: It must be possible for the client program to visit +all automatically managed formatted objects using a callback. + +_`.req.walk.assume-format`: The callback should not need to switch on +the format, as this may be awkward in a program which has modules +using different pools with different formats. + +_`.req.walk.examine`: It must be possible for the callback to examine +other automatically managed memory while walking the objects. + +_`.req.walk.modify`: It must be possible for the callback to modify +the references in the objects. + +_`.req.walk.overhead`: The overhead of calling the callback should be +minimized. + +_`.req.walk.perf`: The performance of subsequent collections should +not be affected. + +_`.req.walk.closure`: The callback must have access to arbitrary data +from the caller. + +_`.req.walk.maint`: The interface should be easy to implement and +maintain. + + +Design +------ + +A new public function ``mps_pool_walk()`` visits the live formatted +objects in an automatically managed pool. + +_`.sol.walk.all`: The client program must know which pools it has +created so it can call ``mps_pool_walk()`` for each pool. + +_`.sol.walk.assume-format`: All objects in a pool share the same +format, so the callback does not need to switch on the format. + +_`.sol.walk.examine`: ``mps_pool_walk()`` must only be called when the +arena is parked, and so there is no read barrier on any object. + +_`.sol.walk.modify`: ``mps_pool_walk()`` arranges for write-protection +to be removed from each segment while it is being walked and restored +afterwards if necessary. + +_`.sol.walk.overhead`: The callback is called for contiguous regions +of formatted objects (not just for each object) where possible so that +the per-object function call overhead is minimized. + +_`.sol.walk.perf`: The callback uses the scanning protocol so that +every reference is fixed and the summary is maintained. + +_`.sol.walk.closure`: ``mps_pool_walk()`` takes a closure pointer +which is stored in the ``ScanState`` and passed to the callback. + +_`.sol.walk.maint`: We reuse the scanning protocol and provide a +generic implementation that iterates over the ring of segments in the +pool. We set up an empty white set in the ``ScanState`` so that the +``MPS_FIX1()`` test always fails and ``_mps_fix2()`` is never called. +This avoids any per-pool code to support the interface. + + +References +---------- + +.. [GDR_2018-08-30] + "Save/restore draft proposal"; + Gareth Rees; 2018-08-30; + . + +.. [GDR_2020-08-30] + "Re: Modifying objects during mps_formatted_objects_walk"; + Gareth Rees; 2020-08-30; + . + +.. [Strömbäck_2020-08-20] + "Modifying objects during mps_formatted_objects_walk"; + Filip Strömbäck; 2020-08-20; + . + + +Document History +---------------- + +- 2020-08-31 GDR_ Initial version based on [GDR_2020-08-30]_ + +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2001–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/write-barrier.txt b/mps/design/write-barrier.txt new file mode 100644 index 00000000000..fcd9f5f2ae0 --- /dev/null +++ b/mps/design/write-barrier.txt @@ -0,0 +1,166 @@ +.. mode: -*- rst -*- + +Write barrier +============= + +:Tag: design.mps.write-barrier +:Author: Richard Brooksby +:Date: 2016-03-18 +:Status: incomplete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: write barrier; design + + +Introduction +------------ + +_`.intro`: This document explains the design of the write barrier of the +Memory Pool System (MPS). + +_`.readership`: This document is intended for developers of the MPS. + +_`.source`: This is based on [job003975]_. + + +Overview +-------- + +_`.overview`: The MPS uses a combination of hardware memory protection +and BIBOP techniques to maintain an approximate remembered set. The +remembered set keeps track of areas of memory that refer to each +other, so that the MPS can avoid scanning areas that are irrelevant +during a garbage collection. The MPS write barrier is implemented by +a one-word "summary" of the zones referenced by a segment. That +summary can be compared with the "white set" of a trace by a simple +logical AND operation. + + +Write Barrier Processes +----------------------- + +_`.scan.summary`: As the MPS scans a segment during garbage collection, +it accumulates a summary of references. This summary is represented +by single word ``ZoneSet``, derived from the bit patterns of the +references. After the scan the MPS can decide to store the summary +with the segment, and use it in future garbage collections to avoid +future scans. + +If the summary does not intersect any of the zones containing +condemned objects, the MPS does not have to scan them in order to +determine if those objects are live. + +The mutator could update the references in a segment and make the +summary invalid. To avoid this, when the MPS stores a summary, it +raises a write barrier on the segment memory. If the mutator does +update the segment, the barrier is hit, and the MPS resets the +summary, so that the segment will be scanned in future. + + +[At this point I was interrupted by a man from Porlock.] + + +Write barrier deferral +---------------------- + +_`.deferral`: Both scanning and the write barrier cost CPU time, and +these must be balanced. There is no point spending 1000 CPU units +raising a write barrier to avoid 10 CPU units of scanning cost. +Therefore we do not raise the write barrier immediately. + +_`.deferral.heuristic`: We apply a simple heuristic: A segment which was +found to be "interesting" while scanning is likely to be interesting +again, and so raising the write barrier is not worthwhile. If we scan +a segment several times and find it "boring" then we raise the barrier +to avoid future boring scans. + +_`.def.boring`: A scan is "boring" if it was unnecessary for a garbage +collection because it found no references to condemned objects. + +_`.def.interesting`: A scan is "interesting" if it was not boring +(`.def.boring`_). Note that this does not mean it preserved comdemned +objects, only that we would have scanned it even if we had had the +scan summary beforehand. + +_`.deferral.count`: We store a deferral count with the segment. The +count is decremented after each boring scan (`.def.boring`_). The write +barrier is raised only when the count reaches zero. + +_`.deferral.reset`: The count is reset after three events: + + 1. segment creation (``WB_DEFER_INIT``) + + 2. an interesting scan (``WB_DEFER_DELAY``) + + 3. a barrier hit (``WB_DEFER_HIT``) + +_`.deferral.dabble`: The set of objects condemned by the garbage +collector changes, and so does what is interesting or boring. For +example, a collection of a nursery space in zone 3 might be followed +by a collection of a top generation in zone 7. This will upset +`.deferral.heuristic`_ somewhat. We assume that the garbage collector +will spend most of its time repeatedly collecting the same zones. + + +Improvements +------------ + +_`.improv.by-os`: The overheads hardware barriers varies widely between +operating systems. On Windows it is very cheap to change memory +protection and to handle protection faults. On macOS it is very +expensive. The balance between barriers and scanning work is +different. We should measure the relative costs and tune the deferral +for each separately. + +_`.improv.balance`: Hardware costs of write barriers vary by OS, but +scanning costs vary depending on many factors including client code. +The MPS could dynamically measure these costs, perhaps using fast +cycle counters such as RDTSC, and use this to dynamically balance the +write barrier deferral. + + +References +---------- + +.. [job003975] "Poor performance due to imbalance between protection + and scanning costs"; Richard Brooksby; Ravenbrook + Limited; 2016-03-11; + . + + +Document History +---------------- + +- 2016-03-19 RB_ Created during preparation of + branch/2016-03-13/defer-write-barrier for [job003975]_. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ + + +Copyright and License +--------------------- + +Copyright © 2016–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/design/writef.txt b/mps/design/writef.txt new file mode 100644 index 00000000000..318543cb87e --- /dev/null +++ b/mps/design/writef.txt @@ -0,0 +1,205 @@ +.. mode: -*- rst -*- + +The WriteF function +=================== + +:Tag: design.mps.writef +:Author: Richard Brooksby +:Date: 1996-10-18 +:Status: complete design +:Revision: $Id$ +:Copyright: See `Copyright and License`_. +:Index terms: pair: WriteF function; design + + +Introduction +------------ + +_`.intro`: This document describes the ``WriteF()`` function, which +allows formatted output in a manner similar to ``printf()`` from the +Standard C library, but allows the Memory Pool Manager (MPM) to +operate in a freestanding environment (see design.mps.exec-env_). + +.. _design.mps.exec-env: exec-env + +_`.background`: The documents design.mps.exec-env_ and design.mps.lib_ +describe the design of the library interface and the reason that it +exists. + +.. _design.mps.lib: lib + + +Design +------ + +_`.no-printf`: There is no dependency on ``printf()``. The MPM only +depends on ``mps_io_fputc()`` and ``mps_io_fputs()``, via the library +interface (design.mps.lib_), part of the *plinth*. This makes it much +easier to deploy the MPS in a freestanding environment. This is +achieved by implementing our own output routines. + +_`.writef`: Our output requirements are few, so the code is short. The +only output function which should be used in the rest of the MPM is +``WriteF()``. + +``Res WriteF(mps_lib_FILE *stream, Count depth, ...)`` + +If ``depth`` is greater than zero, then the first output character, +and each output character after a newline in a format string, is +preceded by ``depth`` spaces. + +``WriteF()`` expects a format string followed by zero or more items to +insert into the output, followed by another format string, more items, +and so on, and finally a ``NULL`` format string. For example:: + + res = WriteF(stream, depth, + "Hello: $A\n", (WriteFA)address, + "Spong: $U ($S)\n", (WriteFU)number, (WriteFS)string, + NULL); + if (res != ResOK) + return res; + +This makes ``Describe()`` methods much easier to write. For example, ``BufferDescribe()`` contains the following code:: + + res = WriteF(stream, depth, + "Buffer $P ($U) {\n", + (WriteFP)buffer, (WriteFU)buffer->serial, + " class $P (\"$S\")\n", + (WriteFP)buffer->class, (WriteFS)buffer->class->name, + " Arena $P\n", (WriteFP)buffer->arena, + " Pool $P\n", (WriteFP)buffer->pool, + " ", buffer->isMutator ? "Mutator" : "Internal", " Buffer\n", + " mode $C$C$C$C (TRANSITION, LOGGED, FLIPPED, ATTACHED)\n", + (WriteFC)((buffer->mode & BufferModeTRANSITION) ? 't' : '_'), + (WriteFC)((buffer->mode & BufferModeLOGGED) ? 'l' : '_'), + (WriteFC)((buffer->mode & BufferModeFLIPPED) ? 'f' : '_'), + (WriteFC)((buffer->mode & BufferModeATTACHED) ? 'a' : '_'), + " fillSize $UKb\n", (WriteFU)(buffer->fillSize / 1024), + " emptySize $UKb\n", (WriteFU)(buffer->emptySize / 1024), + " alignment $W\n", (WriteFW)buffer->alignment, + " base $A\n", (WriteFA)buffer->base, + " initAtFlip $A\n", (WriteFA)buffer->initAtFlip, + " init $A\n", (WriteFA)buffer->ap_s.init, + " alloc $A\n", (WriteFA)buffer->ap_s.alloc, + " limit $A\n", (WriteFA)buffer->ap_s.limit, + " poolLimit $A\n", (WriteFA)buffer->poolLimit, + " alignment $W\n", (WriteFW)buffer->alignment, + " rampCount $U\n", (WriteFU)buffer->rampCount, + NULL); + if (res != ResOK) + return res; + +_`.types`: For each format ``$X`` that ``WriteF()`` supports, there is +a type ``WriteFX`` defined in mpmtypes.h, which is the promoted +version of that type. These types are provided both to ensure +promotion and to avoid any confusion about what type should be used in +a cast. It is easy to check the casts against the formats to ensure +that they correspond. + +_`.types.cast`: Every argument to ``WriteF()`` must be cast, because +in variable-length argument lists the "default argument promotion" +rules apply and this could cause an argument to be read incorrectly on +some platforms: for example on a 64-bit platform the ``$W`` format, +which expects a 64-bit argument, is incompatible with a 32-bit +``unsigned`` argument, which will not be promoted to 64 bits by the +default argument promotion rules. (Note that most of these casts are +unnecessary, but requiring them all makes it easy to check that the +necessary ones are all there.) + +_`.types.future`: It is possibly that this type set or similar may be +used in future in some generalisation of varargs in the MPS. + +_`.formats`: The formats supported are as follows. + +======= =========== ================== ======================================= +Code Name Type Example rendering +======= =========== ================== ======================================= +``$A`` address ``Addr`` ``000000019EF60010`` +``$P`` pointer ``void *`` ``000000019EF60100`` +``$F`` function ``void (*)(void)`` ``0001D69E01000000`` (see `.function`_) +``$S`` string ``char *`` ``hello`` +``$C`` character ``char`` ``x`` +``$W`` word ``ULongest`` ``0000000000109AE0`` +``$U`` decimal ``ULongest`` ``42`` +``$B`` binary ``ULongest`` ``00000000000000001011011110010001`` +``$$`` dollar -- ``$`` +======= =========== ================== ======================================= + +Note that ``WriteFC`` is an ``int``, because that is the default +promotion of a ``char`` (see `.types`_). + +_`.snazzy`: We should resist the temptation to make ``WriteF()`` an +incredible snazzy output engine. We only need it for ``Describe()`` +methods. At the moment it's a simple bit of code -- let's keep it that +way. + +_`.function`: The ``F`` code is used for function pointers. The C +standard [C1999c]_ defines conversion between pointer-to-function +types (§6.3.2.3.8), but it does not define conversion between pointers +to functions and pointers to data. To work around this, the bytes of +their representation are written sequentially, and may have a +different endianness to other pointer types. This output could be +smarter, or even look up function names, but see `.snazzy`_. The +particular type ``void (*)(void)`` is chosen because in GCC +(version 8) this suppresses the warning that we would otherwise get +from ``-Wcast-function-type``. See job004156_ and `GCC Warning +Options`_. + +.. _job004156: https://www.ravenbrook.com/project/mps/issue/job004156/ +.. _GCC Warning Options: https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html + + + +References +---------- + +.. [C1999c] + International Standard ISO/IEC 9899:1999; + "Programming languages — C"; + + + +Document History +---------------- + +- 1996-10-18 RB_ Incomplete design. + +- 2002-06-07 RB_ Converted from MMInfo database design document. + +- 2013-05-22 GDR_ Converted to reStructuredText. + +- 2014-04-17 GDR_ ``WriteF()`` now takes a ``depth`` parameter. + +- 2019-03-14 GDR_ Change type of ``WriteFF`` to avoid compiler warning. + +.. _RB: https://www.ravenbrook.com/consultants/rb/ +.. _GDR: https://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2013–2020 `Ravenbrook Limited `_. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/example/scheme/.gitignore b/mps/example/scheme/.gitignore new file mode 100644 index 00000000000..e368d2c9887 --- /dev/null +++ b/mps/example/scheme/.gitignore @@ -0,0 +1,12 @@ +Makefile +scheme +scheme.dSYM +scheme-advanced +scheme-advanced.dSYM +scheme-boehm +scheme-boehm.dSYM +scheme-malloc +scheme-malloc.dSYM +a.out +*.o +core diff --git a/mps/example/scheme/.p4ignore b/mps/example/scheme/.p4ignore new file mode 120000 index 00000000000..3e4e48b0b5f --- /dev/null +++ b/mps/example/scheme/.p4ignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/mps/example/scheme/Makefile.in b/mps/example/scheme/Makefile.in new file mode 100644 index 00000000000..77ed1f4f0e6 --- /dev/null +++ b/mps/example/scheme/Makefile.in @@ -0,0 +1,31 @@ +# example/scheme/Makefile -- Makefile for the MPS Scheme example +# +# $Id$ + +CFLAGS = @CFLAGS@ @CPPFLAGS@ @LDFLAGS@ -std=c99 + +MPS = ../../code + +TARGETS = scheme-malloc scheme scheme-advanced + +TESTS = r5rs mps + +all: $(TARGETS) + +$(TARGETS): %: %.c Makefile + $(CC) $(CFLAGS) -o $@ -I $(MPS) $< $(MPS)/mps.c + + +scheme-boehm: %: %.c Makefile + $(CC) $(CFLAGS) -o $@ $< -lgc + +clean: + rm -f $(TARGETS) scheme-boehm + +test: $(TARGETS) + @for TARGET in $(TARGETS); do \ + for TEST in $(TESTS); do \ + echo "$$TARGET $$TEST:"; \ + ./$$TARGET test-$$TEST.scm || exit; \ + done \ + done diff --git a/mps/example/scheme/josephus.scm b/mps/example/scheme/josephus.scm new file mode 100644 index 00000000000..9f9f23fc6ae --- /dev/null +++ b/mps/example/scheme/josephus.scm @@ -0,0 +1,56 @@ +;;; josephus.scm -- A small benchmark for Scheme +;;; $Id$ +;;; Adapted from + +(define (make-person count) + (define person (make-vector 3)) + (vector-set! person 0 count) + person) + +(define (person-shout person shout deadif) + (if (< shout deadif) + (+ shout 1) + (begin + (vector-set! (vector-ref person 2) 1 (vector-ref person 1)) + (vector-set! (vector-ref person 1) 2 (vector-ref person 2)) + 1))) + +(define (make-chain size) + (define chain (make-vector 1 #f)) + (define last #f) + (define (loop i) + (if (< i size) + (begin + (define current (make-person i)) + (if (not (vector-ref chain 0)) (vector-set! chain 0 current)) + (if last + (begin + (vector-set! last 1 current) + (vector-set! current 2 last))) + (set! last current) + (loop (+ i 1))))) + (loop 0) + (vector-set! (vector-ref chain 0) 2 last) + (vector-set! last 1 (vector-ref chain 0)) + chain) + +(define (chain-kill chain nth) + (define current (vector-ref chain 0)) + (define shout 1) + (define (loop) + (if (not (eq? (vector-ref current 1) current)) + (begin + (set! shout (person-shout current shout nth)) + (set! current (vector-ref current 1)) + (loop)))) + (loop) + (vector-set! chain 0 current) + current) + +(define (loop i) + (if (< i 10000) + (begin + (define chain (make-chain 40)) + (chain-kill chain 3) + (loop (+ i 1))))) +(loop 0) diff --git a/mps/example/scheme/r4rs.scm b/mps/example/scheme/r4rs.scm new file mode 100644 index 00000000000..0f4f9a9b78b --- /dev/null +++ b/mps/example/scheme/r4rs.scm @@ -0,0 +1,376 @@ +;;; r4rs.scm -- essential procedures from R4RS +;;; $Id$ + +;; (caar pair) +;; (cadr pair) +;; ... +;; (cdddar pair) +;; (cddddr pair) +;; These procedures are compositions of car and cdr. Arbitrary +;; compositions, up to four deep, are provided. There are twenty-eight +;; of these procedures in all. +;; See R4RS 6.3. + +(define (caar x) (car (car x))) +(define (cadr x) (car (cdr x))) +(define (cdar x) (cdr (car x))) +(define (cddr x) (cdr (cdr x))) + +(define (caaar x) (car (car (car x)))) +(define (caadr x) (car (car (cdr x)))) +(define (cadar x) (car (cdr (car x)))) +(define (caddr x) (car (cdr (cdr x)))) +(define (cdaar x) (cdr (car (car x)))) +(define (cdadr x) (cdr (car (cdr x)))) +(define (cddar x) (cdr (cdr (car x)))) +(define (cdddr x) (cdr (cdr (cdr x)))) + +(define (caaaar x) (car (car (car (car x))))) +(define (caaadr x) (car (car (car (cdr x))))) +(define (caadar x) (car (car (cdr (car x))))) +(define (caaddr x) (car (car (cdr (cdr x))))) +(define (cadaar x) (car (cdr (car (car x))))) +(define (cadadr x) (car (cdr (car (cdr x))))) +(define (caddar x) (car (cdr (cdr (car x))))) +(define (cadddr x) (car (cdr (cdr (cdr x))))) +(define (cdaaar x) (cdr (car (car (car x))))) +(define (cdaadr x) (cdr (car (car (cdr x))))) +(define (cdadar x) (cdr (car (cdr (car x))))) +(define (cdaddr x) (cdr (car (cdr (cdr x))))) +(define (cddaar x) (cdr (cdr (car (car x))))) +(define (cddadr x) (cdr (cdr (car (cdr x))))) +(define (cdddar x) (cdr (cdr (cdr (car x))))) +(define (cddddr x) (cdr (cdr (cdr (cdr x))))) + + +;; (memq obj list) +;; (memv obj list) +;; (member obj list) +;; These procedures return the first sublist of list whose car is obj, +;; where the sublists of list are the non-empty lists returned by +;; (list-tail list k) for k less than the length of list. If obj does +;; not occur in list, then #f (not the empty list) is returned. Memq +;; uses eq? to compare obj with the elements of list, while memv uses +;; eqv? and member uses equal?. +;; See R4RS 6.3. + +(define (memq obj list) + (cond ((null? list) #f) + ((eq? obj (car list)) list) + (else (memq obj (cdr list))))) + +(define (memv obj list) + (cond ((null? list) #f) + ((eqv? obj (car list)) list) + (else (memv obj (cdr list))))) + +(define (member obj list) + (cond ((null? list) #f) + ((equal? obj (car list)) list) + (else (member obj (cdr list))))) + + +;; (assq obj alist) +;; (assv obj alist) +;; (assoc obj alist) +;; Alist (for "association list") must be a list of pairs. These +;; procedures find the first pair in alist whose car field is obj, and +;; returns that pair. If no pair in alist has obj as its car, then #f +;; (not the empty list) is returned. Assq uses eq? to compare obj with +;; the car fields of the pairs in alist, while assv uses eqv? and +;; assoc uses equal?. +;; See R4RS 6.3. + +(define (assq obj list) + (cond ((null? list) #f) + ((eq? obj (caar list)) (car list)) + (else (assq obj (cdr list))))) + +(define (assv obj list) + (cond ((null? list) #f) + ((eqv? obj (caar list)) (car list)) + (else (assv obj (cdr list))))) + +(define (assoc obj list) + (cond ((null? list) #f) + ((equal? obj (caar list)) (car list)) + (else (assoc obj (cdr list))))) + + +;; (= x1 x2 x3 ...) +;; (<= x1 x2 x3 ...) +;; (>= x1 x2 x3 ...) +;; These procedures return #t if their arguments are (respectively): +;; equal, monotonically nondecreasing, or monotonically nonincreasing. +;; These predicates are required to be transitive. +;; See R4RS 6.5.5. + +(define (no-fold op list) + (cond ((null? list) #t) + ((null? (cdr list)) #t) + ((op (car list) (cadr list)) #f) + (else (no-fold op (cdr list))))) + +(define (= . rest) (and (apply <= rest) (apply >= rest))) +(define (<= . rest) (no-fold > rest)) +(define (>= . rest) (no-fold < rest)) + + +;; (odd? n) +;; (even? n) +;; These numerical predicates test a number for a particular property, +;; returning #t or #f. +;; See R4RS 6.5.5. + +(define (odd? n) (eqv? (remainder n 2) 1)) +(define (even? n) (eqv? (remainder n 2) 0)) + + +;; (max x1 x2 ...) +;; (min x1 x2 ...) +;; These procedures return the maximum or minimum of their arguments. +;; See R4RS 6.5.5. + +(define (extremum op x list) + (if (null? list) x + (extremum op (if (op x (car list)) x (car list)) (cdr list)))) + +(define (max x1 . rest) (extremum > x1 rest)) +(define (min x1 . rest) (extremum < x1 rest)) + + +;; (abs x) +;; Abs returns the magnitude of its argument. +;; See R4RS 6.5.5. + +(define (abs x) (if (< x 0) (- x) x)) + + +;; (quotient n1 n2) +;; (remainder n1 n2) +;; These procedures implement number-theoretic (integer) division: For +;; positive integers n1 and n2, if n3 and n4 are integers such that +;; n1=n2n3+n4 and 0<= n4 n3 +;; (remainder n1 n2) ==> n4 +;; +;; For integers n1 and n2 with n2 not equal to 0, +;; +;; (= n1 (+ (* n2 (quotient n1 n2)) +;; (remainder n1 n2))) +;; ==> #t +;; +;; provided all numbers involved in that computation are exact. +;; See R4RS 6.5.5. + +(define quotient /) +(define (remainder n1 n2) (- n1 (* n2 (quotient n1 n2)))) + + +;; (number->string number) +;; (number->string number radix) +;; Radix must be an exact integer, either 2, 8, 10, or 16. If omitted, +;; radix defaults to 10. The procedure number->string takes a number +;; and a radix and returns as a string an external representation of +;; the given number in the given radix. +;; See R4RS 6.5.6. + +(define (number->string . args) + (letrec ((number (car args)) + (radix (if (null? (cdr args)) 10 (cadr args))) + (digits "0123456789ABCDEF") + (n->s (lambda (n list) + (if (zero? n) list + (n->s (quotient n radix) + (cons (string-ref digits (remainder n radix)) + list)))))) + (cond ((or (< radix 2) (> radix 16)) + (error "radix must be in the range 2-16")) + ((negative? number) + (string-append "-" (number->string (abs number) radix))) + ((zero? number) "0") + (else (list->string (n->s number '())))))) + + +;; (string->number string) +;; (string->number string radix) +;; Returns a number of the maximally precise representation expressed +;; by the given string. Radix must be an exact integer, either 2, 8, +;; 10, or 16. If radix is not supplied, then the default radix is 10. +;; If string is not a syntactically valid notation for a number, then +;; string->number returns #f. + +(define (string->number . args) + (letrec ((string (car args)) + (length (string-length string)) + (radix (if (null? (cdr args)) 10 (cadr args))) + (c->d (lambda (c) + (let ((i (char->integer c))) + (cond ((char-numeric? c) (- i (char->integer #\0))) + ((char-upper-case? c) (- i -10 (char->integer #\A))) + ((char-lower-case? c) (- i -10 (char->integer #\a))) + (else #f))))) + (s->n (lambda (i a) + (if (>= i length) a + (let ((d (c->d (string-ref string i)))) + (cond ((eq? d #f) #f) + ((>= d radix) #f) + (else (s->n (+ i 1) (+ (* a radix) d))))))))) + (s->n 0 0))) + + +;; (char=? char1 char2) +;; (char? char1 char2) +;; (char<=? char1 char2) +;; (char>=? char1 char2) +;; These procedures impose a total ordering on the set of characters. +;; See R4RS 6.6. + +(define (char=? c1 c2) (eqv? (char->integer c1) (char->integer c2))) +(define (charinteger c1) (char->integer c2))) +(define (char>? c1 c2) (> (char->integer c1) (char->integer c2))) +(define (char<=? c1 c2) (<= (char->integer c1) (char->integer c2))) +(define (char>=? c1 c2) (>= (char->integer c1) (char->integer c2))) + + +;; (char-ci=? char1 char2) +;; (char-ci? char1 char2) +;; (char-ci<=? char1 char2) +;; (char-ci>=? char1 char2) +;; These procedures are similar to char=? et cetera, but they treat +;; upper case and lower case letters as the same. For example, +;; `(char-ci=? #\A #\a)' returns #t. +;; See R4RS 6.6. + +(define (char-ci=? c1 c2) (char=? (char-upcase c1) (char-upcase c2))) +(define (char-ci? c1 c2) (char>? (char-upcase c1) (char-upcase c2))) +(define (char-ci<=? c1 c2) (char<=? (char-upcase c1) (char-upcase c2))) +(define (char-ci>=? c1 c2) (char>=? (char-upcase c1) (char-upcase c2))) + + +;; (char-alphabetic? char) +;; (char-numeric? char) +;; (char-whitespace? char) +;; (char-upper-case? letter) +;; (char-lower-case? letter) +;; These procedures return #t if their arguments are alphabetic, +;; numeric, whitespace, upper case, or lower case characters, +;; respectively, otherwise they return #f. The following remarks, +;; which are specific to the ASCII character set, are intended only as +;; a guide: The alphabetic characters are the 52 upper and lower case +;; letters. The numeric characters are the ten decimal digits. The +;; whitespace characters are space, tab, line feed, form feed, and +;; carriage return. + +(define (char-alphabetic? c) (or (char-upper-case? c) (char-lower-case? c))) +(define (char-numeric? c) (and (char>=? c #\0) (char<=? c #\9))) +(define (char-whitespace? c) (memv (char->integer c) '(8 10 12 13 32))) +(define (char-upper-case? c) (and (char>=? c #\A) (char<=? c #\Z))) +(define (char-lower-case? c) (and (char>=? c #\a) (char<=? c #\z))) + + +;; (char-upcase char) +;; (char-downcase char) +;; These procedures return a character char2 such that `(char-ci=? +;; char char2)'. In addition, if char is alphabetic, then the result +;; of char-upcase is upper case and the result of char-downcase is +;; lower case. + +(define (char-upcase c) + (if (char-lower-case? c) + (integer->char (- (+ (char->integer c) (char->integer #\A)) + (char->integer #\a))) + c)) + +(define (char-downcase c) + (if (char-upper-case? c) + (integer->char (- (+ (char->integer c) (char->integer #\a)) + (char->integer #\A))) + c)) + + +;; (string-ci=? string1 string2) +;; Returns #t if the two strings are the same length and contain the +;; same characters in the same positions, otherwise returns #f. +;; String-ci=? treats upper and lower case letters as though they were +;; the same character. +;; See R4RS 6.7. + +(define (string-cmp op1 op2 s1 s2 e1 e2) + (letrec ((l1 (string-length s1)) + (l2 (string-length s2)) + (sc (lambda (i) + (cond ((and (>= i l1) (>= i l2)) #t) + ((>= i l1) e1) + ((>= i l2) e2) + ((op1 (string-ref s1 i) (string-ref s2 i)) #t) + ((not (op2 (string-ref s1 i) (string-ref s2 i))) #f) + (else (sc (+ 1 i))))))) + (sc 0))) + +(define (string-ci=? s1 s2) (string-cmp (lambda _ #f) char-ci=? s1 s2 #f #f)) + + +;; (string? string1 string2) +;; (string<=? string1 string2) +;; (string>=? string1 string2) +;; (string-ci? string1 string2) +;; (string-ci<=? string1 string2) +;; (string-ci>=? string1 string2) +;; These procedures are the lexicographic extensions to strings of the +;; corresponding orderings on characters. For example, string=? s1 s2))) +(define (string>? s1 s2) (not (string<=? s1 s2))) +(define (string<=? s1 s2) (string-cmp char=? s1 s2) (string-cmp char>? char>=? s1 s2 #f #t)) +(define (string-ci=? s1 s2))) +(define (string-ci>? s1 s2) (not (string-ci<=? s1 s2))) +(define (string-ci<=? s1 s2) (string-cmp char-ci=? s1 s2) (string-cmp char-ci>? char-ci>=? s1 s2 #f #t)) + + +;; (map proc list1 list2 ...) +;; The lists must be lists, and proc must be a procedure taking as +;; many arguments as there are lists. If more than one list is given, +;; then they must all be the same length. Map applies proc +;; element-wise to the elements of the lists and returns a list of the +;; results, in order from left to right. The dynamic order in which +;; proc is applied to the elements of the lists is unspecified. +;; See R4RS 6.9. + +(define (map proc . args) + (letrec ((map1 (lambda (f l) (if (null? l) '() + (cons (f (car l)) (map1 f (cdr l)))))) + (map2 (lambda (l) (if (null? (car l)) '() + (cons (apply proc (map1 car l)) + (map2 (map1 cdr l))))))) + (map2 args))) + + +;; (for-each proc list1 list2 ...) +;; The arguments to for-each are like the arguments to map, but +;; for-each calls proc for its side effects rather than for its +;; values. Unlike map, for-each is guaranteed to call proc on the +;; elements of the lists in order from the first element to the last, +;; and the value returned by for-each is unspecified. +;; See R4RS 6.9. + +(define (for-each proc . args) + (letrec ((map1 (lambda (f l) (if (null? l) '() + (cons (f (car l)) (map1 f (cdr l)))))) + (map2 (lambda (l) (if (null? (car l)) #f + (begin (apply proc (map1 car l)) + (map2 (map1 cdr l))))))) + (map2 args))) diff --git a/mps/example/scheme/scheme-advanced.c b/mps/example/scheme/scheme-advanced.c new file mode 100644 index 00000000000..cdf24590d40 --- /dev/null +++ b/mps/example/scheme/scheme-advanced.c @@ -0,0 +1,4648 @@ +/* scheme.c -- SCHEME INTERPRETER EXAMPLE FOR THE MEMORY POOL SYSTEM + * + * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license. + * + * This is a toy interpreter for a subset of the Scheme programming + * language . + * It is by no means the best or even the right way to implement Scheme, + * but it serves the purpose of showing how the Memory Pool System can be + * used as part of a programming language run-time system. + * + * To try it out, "make scheme-advanced" then + * + * $ ./scheme + * (define (triangle n) (if (eqv? n 0) 0 (+ n (triangle (- n 1))))) + * (define (church n f a) (if (eqv? n 0) a (church (- n 1) f (f a)))) + * (church 1000 triangle 0) + * + * This won't produce interesting results but it will cause garbage + * collection cycles. Note that there's never any waiting for the MPS. + * THAT'S THE POINT. + * + * To find the code that's particularly related to the MPS, search for %%MPS. + * + * + * MPS TO DO LIST + * - make an mps_perror + * + * + * SCHEME TO DO LIST + * - unbounded integers, other number types. + * - named let. + * - quasiquote: vectors; nested; dotted. + * - Lots of library. + * - \#foo unsatisfactory in read and print + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mps.h" +#include "mpsavm.h" +#include "mpscamc.h" +#include "mpscawl.h" + + +/* LANGUAGE EXTENSION */ + +#define unless(c) if(!(c)) +#define LENGTH(array) (sizeof(array) / sizeof(array[0])) +#define UNUSED(var) ((void)var) + + +/* CONFIGURATION PARAMETERS */ + + +#define SYMMAX ((size_t)255) /* max length of a symbol */ +#define MSGMAX ((size_t)255) /* max length of error message */ +#define STRMAX ((size_t)255) /* max length of a string */ + + +/* DATA TYPES */ + + +/* obj_t -- scheme object type + * + * obj_t is a pointer to a union, obj_u, which has members for + * each scheme representation. + * + * The obj_u also has a "type" member. Each representation + * structure also has a "type" field first. ANSI C guarantees + * that these type fields correspond [section?]. + * + * Objects are allocated by allocating one of the representation + * structures and casting the pointer to it to type obj_t. This + * allows objects of different sizes to be represented by the + * same type. + * + * To access an object, check its type by reading TYPE(obj), then + * access the fields of the representation, e.g. + * if(TYPE(obj) == TYPE_PAIR) fiddle_with(CAR(obj)); + */ + +typedef union obj_u *obj_t; + +typedef obj_t (*entry_t)(obj_t env, obj_t op_env, obj_t operator, obj_t rands); + +typedef int type_t; +enum { + TYPE_PAIR, + TYPE_INTEGER, + TYPE_SYMBOL, + TYPE_SPECIAL, + TYPE_OPERATOR, + TYPE_STRING, + TYPE_PORT, + TYPE_PROMISE, + TYPE_CHARACTER, + TYPE_VECTOR, + TYPE_TABLE, + TYPE_FWD2, /* two-word forwarding object */ + TYPE_FWD, /* three words and up forwarding object */ + TYPE_PAD1, /* one-word padding object */ + TYPE_PAD /* two words and up padding object */ +}; + +typedef struct type_s { + type_t type; +} type_s; + +typedef struct pair_s { + type_t type; /* TYPE_PAIR */ + obj_t car, cdr; /* first and second projections */ +} pair_s; + +typedef struct symbol_s { + type_t type; /* TYPE_SYMBOL */ + obj_t name; /* its name (a string) */ +} symbol_s; + +typedef struct integer_s { + type_t type; /* TYPE_INTEGER */ + long integer; /* the integer */ +} integer_s; + +typedef struct special_s { + type_t type; /* TYPE_SPECIAL */ + const char *name; /* printed representation, NUL terminated */ +} special_s; + +typedef struct operator_s { + type_t type; /* TYPE_OPERATOR */ + const char *name; /* printed name, NUL terminated */ + entry_t entry; /* entry point -- see eval() */ + obj_t arguments, body; /* function arguments and code */ + obj_t env, op_env; /* closure environments */ +} operator_s; + +typedef struct string_s { + type_t type; /* TYPE_STRING */ + size_t length; /* number of chars in string */ + char string[1]; /* string, NUL terminated */ +} string_s; + +typedef struct port_s { + type_t type; /* TYPE_PORT */ + obj_t name; /* name of stream */ + FILE *stream; +} port_s; + +typedef struct character_s { + type_t type; /* TYPE_CHARACTER */ + char c; /* the character */ +} character_s; + +typedef struct vector_s { + type_t type; /* TYPE_VECTOR */ + size_t length; /* number of elements */ + obj_t vector[1]; /* vector elements */ +} vector_s; + +/* %%MPS: Objects in AWL pools must be formatted so that aligned + * pointers (with bottom bit(s) zero) can be distinguished from other + * data types (with bottom bit(s) non-zero). Here we use a bottom + * bit of 1 for integers. See pool/awl. */ +#define TAG_COUNT(i) (((i) << 1) + 1) +#define UNTAG_COUNT(i) ((i) >> 1) + +typedef struct buckets_s { + struct buckets_s *dependent; /* the dependent object */ + size_t length; /* number of buckets (tagged) */ + size_t used; /* number of buckets in use (tagged) */ + size_t deleted; /* number of deleted buckets (tagged) */ + obj_t bucket[1]; /* hash buckets */ +} buckets_s, *buckets_t; + +typedef unsigned long (*hash_t)(obj_t obj, mps_ld_t ld); +typedef int (*cmp_t)(obj_t obj1, obj_t obj2); + +/* %%MPS: The hash table is address-based, and so depends on the + * location of its keys: when the garbage collector moves the keys, + * the table needs to be re-hashed. The 'ld' structure is used to + * detect this. See topic/location. */ +typedef struct table_s { + type_t type; /* TYPE_TABLE */ + hash_t hash; /* hash function */ + cmp_t cmp; /* comparison function */ + mps_ld_s ld; /* location dependency */ + mps_ap_t key_ap, value_ap; /* allocation points for keys and values */ + buckets_t keys, values; /* hash buckets for keys and values */ +} table_s; + + +/* fwd2, fwd, pad1, pad -- MPS forwarding and padding objects %%MPS + * + * These object types are here to satisfy the MPS Format Protocol. + * See topic/format. + * + * The MPS needs to be able to replace any object with a forwarding + * object or broken heart and since the smallest normal object defined + * above is two words long, we have two kinds of forwarding objects: + * FWD2 is exactly two words long, and FWD stores a size for larger + * objects. There are cleverer ways to do this with bit twiddling, of + * course. + * + * The MPS needs to be able to pad out any area of memory that's a + * multiple of the pool alignment. We've chosen an single word alignment + * for this interpreter, so we have to have a special padding object, PAD1, + * for single words. For padding multiple words we use PAD objects with a + * size field. + * + * See obj_pad, obj_fwd etc. to see how these are used. + */ + +typedef struct fwd2_s { + type_t type; /* TYPE_FWD2 */ + obj_t fwd; /* forwarded object */ +} fwd2_s; + +typedef struct fwd_s { + type_t type; /* TYPE_FWD */ + obj_t fwd; /* forwarded object */ + size_t size; /* total size of this object */ +} fwd_s; + +typedef struct pad1_s { + type_t type; /* TYPE_PAD1 */ +} pad1_s; + +typedef struct pad_s { + type_t type; /* TYPE_PAD */ + size_t size; /* total size of this object */ +} pad_s; + + +typedef union obj_u { + type_s type; /* one of TYPE_* */ + pair_s pair; + symbol_s symbol; + integer_s integer; + special_s special; + operator_s operator; + string_s string; + port_s port; + character_s character; + vector_s vector; + table_s table; + fwd2_s fwd2; + fwd_s fwd; + pad_s pad; +} obj_s; + + +/* structure macros */ + +#define TYPE(obj) ((obj)->type.type) +#define CAR(obj) ((obj)->pair.car) +#define CDR(obj) ((obj)->pair.cdr) +#define CAAR(obj) CAR(CAR(obj)) +#define CADR(obj) CAR(CDR(obj)) +#define CDAR(obj) CDR(CAR(obj)) +#define CDDR(obj) CDR(CDR(obj)) +#define CADDR(obj) CAR(CDDR(obj)) +#define CDDDR(obj) CDR(CDDR(obj)) +#define CDDAR(obj) CDR(CDAR(obj)) +#define CADAR(obj) CAR(CDAR(obj)) + + +/* GLOBAL DATA */ + + +/* total -- total allocated bytes */ + +static size_t total; + + +/* symtab -- symbol table %%MPS + * + * The symbol table is a weak-value hashtable mapping objects of + * TYPE_STRING to objects of TYPE_SYMBOL. When a string is "interned" + * it is looked up in the table, and added only if it is not there. + * This guarantees that all symbols which are equal are actually the + * same object. + */ + +static obj_t symtab; +static mps_root_t symtab_root; + + +/* special objects %%MPS + * + * These global variables are initialized to point to objects of + * TYPE_SPECIAL by main. They are used as markers for various + * special purposes. + * + * These static global variable refer to object allocated in the `obj_pool` + * and so they must also be declared to the MPS as roots. + * See `globals_scan`. + */ + +static obj_t obj_empty; /* (), the empty list */ +static obj_t obj_eof; /* end of file */ +static obj_t obj_error; /* error indicator */ +static obj_t obj_true; /* #t, boolean true */ +static obj_t obj_false; /* #f, boolean false */ +static obj_t obj_undefined; /* undefined result indicator */ +static obj_t obj_tail; /* tail recursion indicator */ +static obj_t obj_deleted; /* deleted key in hashtable */ +static obj_t obj_unused; /* unused entry in hashtable */ + + +/* predefined symbols + * + * These global variables are initialized to point to interned + * objects of TYPE_SYMBOL. They have special meaning in the + * Scheme language, and are used by the evaluator to parse code. + */ + +static obj_t obj_quote; /* "quote" symbol */ +static obj_t obj_quasiquote; /* "quasiquote" symbol */ +static obj_t obj_lambda; /* "lambda" symbol */ +static obj_t obj_begin; /* "begin" symbol */ +static obj_t obj_else; /* "else" symbol */ +static obj_t obj_unquote; /* "unquote" symbol */ +static obj_t obj_unquote_splic; /* "unquote-splicing" symbol */ + + +/* error handler + * + * The error_handler variable is initialized to point at a + * jmp_buf to which the "error" function longjmps if there is + * any kind of error during evaluation. It can be set up by + * any enclosing function that wants to catch errors. There + * is a default error handler in `start`, in the read-eval-print + * loop. The error function also writes an error message + * into "error_message" before longjmping, and this can be + * displayed to the user when catching the error. + * + * [An error code should also be passed so that the error can + * be decoded by enclosing code.] + */ + +static jmp_buf *error_handler = NULL; +static char error_message[MSGMAX+1]; + + +/* MPS globals %%MPS + * + * These are global variables holding MPS values for use by the + * interpreter. In a more sophisticated integration some of these might + * be thread local. See `main` for where these are set up. + * + * `arena` is the global state of the MPS, and there's usually only one + * per process. See topic/arena. + * + * `obj_pool` is the memory pool in which the Scheme objects are allocated. + * It is an instance of the Automatic Mostly Copying (AMC) pool class, which + * is a general-purpose garbage collector for use when there are formatted + * objects in the pool, but ambiguous references in thread stacks and + * registers. See pool/amc. + * + * `obj_ap` is an Allocation Point that allows fast in-line non-locking + * allocation in a memory pool. This would usually be thread-local, but + * this interpreter is single-threaded. See `make_pair` etc. for how this + * is used with the reserve/commit protocol. + * + * `buckets_pool` is the memory pool for hash table buckets. There are + * two allocation points, one for buckets containing exact (strong) + * references, the other for buckets containing weak references. + */ + +static mps_arena_t arena; /* the arena */ +static mps_pool_t obj_pool; /* pool for ordinary Scheme objects */ +static mps_ap_t obj_ap; /* allocation point used to allocate objects */ +static mps_pool_t leaf_pool; /* pool for leaf objects */ +static mps_ap_t leaf_ap; /* allocation point for leaf objects */ +static mps_pool_t buckets_pool; /* pool for hash table buckets */ +static mps_ap_t strong_buckets_ap; /* allocation point for strong buckets */ +static mps_ap_t weak_buckets_ap; /* allocation point for weak buckets */ + + + +/* SUPPORT FUNCTIONS */ + + +/* error -- throw an error condition + * + * The "error" function takes a printf-style format string + * and arguments, writes the message into error_message and + * longjmps to *error_handler. There must be a setjmp at + * the other end to catch the condition and display the + * message. + */ + +static void error(const char *format, ...) +{ + va_list args; + + va_start(args, format); + vsnprintf(error_message, sizeof error_message, format, args); + va_end(args); + + if (error_handler) { + longjmp(*error_handler, 1); + } else { + fflush(stdout); + fprintf(stderr, "Fatal error during initialization: %s\n", + error_message); + abort(); + } +} + + +/* make_* -- object constructors %%MPS + * + * Each object type has a function here that allocates an instance of + * that type. + * + * These functions illustrate the two-phase MPS Allocation Point + * Protocol with `reserve` and `commit`. This protocol allows very fast + * in-line allocation without locking, but there is a very tiny chance that + * the object must be re-initialized. In nearly all cases, however, it's + * just a pointer bump. See topic/allocation. + * + * NOTE: We could reduce duplicated code here using macros, but we want to + * write these out because this is code to illustrate how to use the + * protocol. + */ + +#define ALIGNMENT sizeof(mps_word_t) + +/* Align size upwards to the next multiple of the word size. */ +#define ALIGN_WORD(size) \ + (((size) + ALIGNMENT - 1) & ~(ALIGNMENT - 1)) + +/* Align size upwards to the next multiple of the word size, and + * additionally ensure that it's big enough to store a forwarding + * pointer. Evaluates its argument twice. */ +#define ALIGN_OBJ(size) \ + (ALIGN_WORD(size) >= ALIGN_WORD(sizeof(fwd_s)) \ + ? ALIGN_WORD(size) \ + : ALIGN_WORD(sizeof(fwd_s))) + +static obj_t make_bool(int condition) +{ + return condition ? obj_true : obj_false; +} + +static obj_t make_pair(obj_t car, obj_t cdr) +{ + obj_t obj; + mps_addr_t addr; + /* When using the allocation point protocol it is up to the client + code to ensure that all requests are for aligned sizes, because in + nearly all cases `mps_reserve` is just an increment to a pointer. */ + size_t size = ALIGN_OBJ(sizeof(pair_s)); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_pair"); + obj = addr; + obj->pair.type = TYPE_PAIR; + CAR(obj) = car; + CDR(obj) = cdr; + /* `mps_commit` returns false on very rare occasions (when an MPS epoch + change has happened since reserve) but in those cases the object must + be re-initialized. It's therefore important not to do anything you + don't want to repeat between reserve and commit. Also, the shorter + the time between reserve and commit, the less likely commit is to + return false. */ + } while(!mps_commit(obj_ap, addr, size)); + total += sizeof(pair_s); + return obj; +} + +static obj_t make_integer(long integer) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(integer_s)); + do { + mps_res_t res = mps_reserve(&addr, leaf_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_integer"); + obj = addr; + obj->integer.type = TYPE_INTEGER; + obj->integer.integer = integer; + } while(!mps_commit(leaf_ap, addr, size)); + total += sizeof(integer_s); + return obj; +} + +static obj_t make_symbol(obj_t name) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(symbol_s)); + assert(TYPE(name) == TYPE_STRING); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_symbol"); + obj = addr; + obj->symbol.type = TYPE_SYMBOL; + obj->symbol.name = name; + } while(!mps_commit(obj_ap, addr, size)); + total += size; + return obj; +} + +static obj_t make_string(size_t length, const char *string) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(string_s, string) + length+1); + do { + mps_res_t res = mps_reserve(&addr, leaf_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_string"); + obj = addr; + obj->string.type = TYPE_STRING; + obj->string.length = length; + if (string) memcpy(obj->string.string, string, length+1); + else memset(obj->string.string, 0, length+1); + } while(!mps_commit(leaf_ap, addr, size)); + total += size; + return obj; +} + +static obj_t make_special(const char *string) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(special_s)); + do { + mps_res_t res = mps_reserve(&addr, leaf_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_special"); + obj = addr; + obj->special.type = TYPE_SPECIAL; + obj->special.name = string; + } while(!mps_commit(leaf_ap, addr, size)); + total += sizeof(special_s); + return obj; +} + +static obj_t make_operator(const char *name, + entry_t entry, obj_t arguments, + obj_t body, obj_t env, obj_t op_env) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(operator_s)); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_operator"); + obj = addr; + obj->operator.type = TYPE_OPERATOR; + obj->operator.name = name; + obj->operator.entry = entry; + obj->operator.arguments = arguments; + obj->operator.body = body; + obj->operator.env = env; + obj->operator.op_env = op_env; + } while(!mps_commit(obj_ap, addr, size)); + total += sizeof(operator_s); + return obj; +} + +static obj_t make_port(obj_t name, FILE *stream) +{ + mps_addr_t port_ref; + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(port_s)); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_port"); + obj = addr; + obj->port.type = TYPE_PORT; + obj->port.name = name; + obj->port.stream = stream; + } while(!mps_commit(obj_ap, addr, size)); + total += sizeof(port_s); + + /* %%MPS: Register the port object for finalization. When the object is + no longer referenced elsewhere, a message will be received in `mps_chat` + so that the file can be closed. See topic/finalization. */ + port_ref = obj; + mps_finalize(arena, &port_ref); + + return obj; +} + +static obj_t make_character(char c) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(sizeof(character_s)); + do { + mps_res_t res = mps_reserve(&addr, leaf_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_character"); + obj = addr; + obj->character.type = TYPE_CHARACTER; + obj->character.c = c; + } while(!mps_commit(leaf_ap, addr, size)); + total += sizeof(character_s); + return obj; +} + +static obj_t make_vector(size_t length, obj_t fill) +{ + obj_t obj; + mps_addr_t addr; + size_t size = ALIGN_OBJ(offsetof(vector_s, vector) + length * sizeof(obj_t)); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + size_t i; + if (res != MPS_RES_OK) error("out of memory in make_vector"); + obj = addr; + obj->vector.type = TYPE_VECTOR; + obj->vector.length = length; + for(i = 0; i < length; ++i) + obj->vector.vector[i] = fill; + } while(!mps_commit(obj_ap, addr, size)); + total += size; + return obj; +} + +static buckets_t make_buckets(size_t length, mps_ap_t ap) +{ + buckets_t buckets; + mps_addr_t addr; + size_t size; + size = ALIGN_OBJ(offsetof(buckets_s, bucket) + length * sizeof(buckets->bucket[0])); + do { + mps_res_t res = mps_reserve(&addr, ap, size); + size_t i; + if (res != MPS_RES_OK) error("out of memory in make_buckets"); + buckets = addr; + buckets->dependent = NULL; + buckets->length = TAG_COUNT(length); + buckets->used = TAG_COUNT(0); + buckets->deleted = TAG_COUNT(0); + for(i = 0; i < length; ++i) { + buckets->bucket[i] = obj_unused; + } + } while(!mps_commit(ap, addr, size)); + total += size; + return buckets; +} + +static obj_t make_table(size_t length, hash_t hashf, cmp_t cmpf, int weak_key, int weak_value) +{ + obj_t obj; + mps_addr_t addr; + size_t l, size = ALIGN_OBJ(sizeof(table_s)); + do { + mps_res_t res = mps_reserve(&addr, obj_ap, size); + if (res != MPS_RES_OK) error("out of memory in make_table"); + obj = addr; + obj->table.type = TYPE_TABLE; + obj->table.keys = obj->table.values = NULL; + } while(!mps_commit(obj_ap, addr, size)); + total += size; + obj->table.hash = hashf; + obj->table.cmp = cmpf; + /* round up to next power of 2 */ + for(l = 1; l < length; l *= 2); + obj->table.key_ap = weak_key ? weak_buckets_ap : strong_buckets_ap; + obj->table.value_ap = weak_value ? weak_buckets_ap : strong_buckets_ap; + obj->table.keys = make_buckets(l, obj->table.key_ap); + obj->table.values = make_buckets(l, obj->table.value_ap); + obj->table.keys->dependent = obj->table.values; + obj->table.values->dependent = obj->table.keys; + mps_ld_reset(&obj->table.ld, arena); + return obj; +} + + +/* getnbc -- get next non-blank char from stream */ + +static int getnbc(FILE *stream) +{ + int c; + do { + c = getc(stream); + if(c == ';') { + do + c = getc(stream); + while(c != EOF && c != '\n'); + } + } while(isspace(c)); + return c; +} + + +/* isealpha -- test for "extended alphabetic" char + * + * Scheme symbols may contain any "extended alphabetic" + * character (see section 2.1 of R4RS). This function + * returns non-zero if a character is in the set of + * extended characters. + */ + +static int isealpha(int c) +{ + return strchr("+-.*/<=>!?:$%_&~^", c) != NULL; +} + + +/* hash -- hash a string to an unsigned long + * + * This hash function was derived (with permission) from + * Paul Haahr's hash in the most excellent rc 1.4. + */ + +static unsigned long hash(const char *s, size_t length) { + unsigned long c, h=0; + size_t i = 0; + switch(length % 4) { + do { + c=(unsigned long)s[i++]; h+=(c<<17)^(c<<11)^(c<<5)^(c>>1); + case 3: + c=(unsigned long)s[i++]; h^=(c<<14)+(c<<7)+(c<<4)+c; + case 2: + c=(unsigned long)s[i++]; h^=(~c<<11)|((c<<3)^(c>>1)); + case 1: + c=(unsigned long)s[i++]; h-=(c<<16)|(c<<9)|(c<<2)|(c&3); + case 0: + ; + } while(i < length); + } + return h; +} + + +/* Hash table implementation */ + +/* %%MPS: When taking the hash of an address, we record the dependency + * on its location by calling mps_ld_add. See topic/location. + */ +static unsigned long eq_hash(obj_t obj, mps_ld_t ld) +{ + union {char s[sizeof(obj_t)]; obj_t addr;} u; + if (ld) mps_ld_add(ld, arena, obj); + u.addr = obj; + return hash(u.s, sizeof(obj_t)); +} + +static int eqp(obj_t obj1, obj_t obj2) +{ + return obj1 == obj2; +} + +static unsigned long eqv_hash(obj_t obj, mps_ld_t ld) +{ + switch(TYPE(obj)) { + case TYPE_INTEGER: + return (unsigned long)obj->integer.integer; + case TYPE_CHARACTER: + return (unsigned long)obj->character.c; + default: + return eq_hash(obj, ld); + } +} + +static int eqvp(obj_t obj1, obj_t obj2) +{ + if (obj1 == obj2) + return 1; + if (TYPE(obj1) != TYPE(obj2)) + return 0; + switch(TYPE(obj1)) { + case TYPE_INTEGER: + return obj1->integer.integer == obj2->integer.integer; + case TYPE_CHARACTER: + return obj1->character.c == obj2->character.c; + default: + return 0; + } +} + +static unsigned long string_hash(obj_t obj, mps_ld_t ld) +{ + UNUSED(ld); + unless(TYPE(obj) == TYPE_STRING) + error("string-hash: argument must be a string"); + return hash(obj->string.string, obj->string.length); +} + +static int string_equalp(obj_t obj1, obj_t obj2) +{ + return obj1 == obj2 || + (TYPE(obj1) == TYPE_STRING && + TYPE(obj2) == TYPE_STRING && + obj1->string.length == obj2->string.length && + 0 == strcmp(obj1->string.string, obj2->string.string)); +} + +static int buckets_find(obj_t tbl, buckets_t buckets, obj_t key, int add, size_t *b) +{ + unsigned long i, h, probe; + unsigned long l = UNTAG_COUNT(buckets->length) - 1; + int result = 0; + assert(TYPE(tbl) == TYPE_TABLE); + h = tbl->table.hash(key, add ? &tbl->table.ld : NULL); + probe = (h >> 8) | 1; + h &= l; + i = h; + do { + obj_t k = buckets->bucket[i]; + if(k == obj_unused || tbl->table.cmp(k, key)) { + *b = i; + return 1; + } + if(result == 0 && k == obj_deleted) { + *b = i; + result = 1; + } + i = (i+probe) & l; + } while(i != h); + return result; +} + +static size_t table_size(obj_t tbl) +{ + size_t used, deleted; + assert(TYPE(tbl) == TYPE_TABLE); + used = UNTAG_COUNT(tbl->table.keys->used); + deleted = UNTAG_COUNT(tbl->table.keys->deleted); + assert(used >= deleted); + return used - deleted; +} + +/* Rehash 'tbl' so that it has 'new_length' buckets. If 'key' is found + * during this process, update 'key_bucket' to be the index of the + * bucket containing 'key' and return true, otherwise return false. + * + * %%MPS: When re-hashing the table we reset the associated location + * dependency and re-add a dependency on each object in the table. + * This is because the table gets re-hashed when the locations of + * objects have changed. See topic/location. + */ +static int table_rehash(obj_t tbl, size_t new_length, obj_t key, size_t *key_bucket) +{ + size_t i, length; + buckets_t new_keys, new_values; + int result = 0; + + assert(TYPE(tbl) == TYPE_TABLE); + length = UNTAG_COUNT(tbl->table.keys->length); + new_keys = make_buckets(new_length, tbl->table.key_ap); + new_values = make_buckets(new_length, tbl->table.value_ap); + new_keys->dependent = new_values; + new_values->dependent = new_keys; + mps_ld_reset(&tbl->table.ld, arena); + + for (i = 0; i < length; ++i) { + obj_t old_key = tbl->table.keys->bucket[i]; + if (old_key != obj_unused && old_key != obj_deleted) { + int found; + size_t b; + found = buckets_find(tbl, new_keys, old_key, 1, &b); + assert(found); /* new table shouldn't be full */ + assert(new_keys->bucket[b] == obj_unused); /* shouldn't be in new table */ + new_keys->bucket[b] = old_key; + new_values->bucket[b] = tbl->table.values->bucket[i]; + if (key != NULL && tbl->table.cmp(old_key, key)) { + *key_bucket = b; + result = 1; + } + new_keys->used = TAG_COUNT(UNTAG_COUNT(new_keys->used) + 1); + } + } + + assert(UNTAG_COUNT(new_keys->used) == table_size(tbl)); + tbl->table.keys = new_keys; + tbl->table.values = new_values; + return result; +} + +/* %%MPS: If we fail to find 'key' in the table, and if mps_ld_isstale + * returns true, then some of the keys in the table might have been + * moved by the garbage collector: in this case we need to re-hash the + * table. See topic/location. + */ +static int table_find(obj_t tbl, obj_t key, int add, size_t *b) +{ + if (!buckets_find(tbl, tbl->table.keys, key, add, b)) { + return 0; + } else if ((tbl->table.keys->bucket[*b] == obj_unused + || tbl->table.keys->bucket[*b] == obj_deleted) + && mps_ld_isstale(&tbl->table.ld, arena, key)) { + return table_rehash(tbl, UNTAG_COUNT(tbl->table.keys->length), key, b); + } else { + return 1; + } +} + +static obj_t table_ref(obj_t tbl, obj_t key) +{ + size_t b; + assert(TYPE(tbl) == TYPE_TABLE); + if (table_find(tbl, key, 0, &b)) { + obj_t k = tbl->table.keys->bucket[b]; + if (k != obj_unused && k != obj_deleted) + return tbl->table.values->bucket[b]; + } + return NULL; +} + +static int table_try_set(obj_t tbl, obj_t key, obj_t value) +{ + size_t b; + assert(TYPE(tbl) == TYPE_TABLE); + if (!table_find(tbl, key, 1, &b)) + return 0; + if (tbl->table.keys->bucket[b] == obj_unused) { + tbl->table.keys->bucket[b] = key; + tbl->table.keys->used = TAG_COUNT(UNTAG_COUNT(tbl->table.keys->used) + 1); + } else if (tbl->table.keys->bucket[b] == obj_deleted) { + tbl->table.keys->bucket[b] = key; + assert(tbl->table.keys->deleted > TAG_COUNT(0)); + tbl->table.keys->deleted + = TAG_COUNT(UNTAG_COUNT(tbl->table.keys->deleted) - 1); + } + tbl->table.values->bucket[b] = value; + return 1; +} + +static int table_full(obj_t tbl) +{ + assert(TYPE(tbl) == TYPE_TABLE); + return tbl->table.keys->used >= tbl->table.keys->length / 2; +} + +static void table_set(obj_t tbl, obj_t key, obj_t value) +{ + assert(TYPE(tbl) == TYPE_TABLE); + if (table_full(tbl) || !table_try_set(tbl, key, value)) { + int res; + table_rehash(tbl, UNTAG_COUNT(tbl->table.keys->length) * 2, NULL, NULL); + res = table_try_set(tbl, key, value); + assert(res); /* rehash should have made room */ + } +} + +static void table_delete(obj_t tbl, obj_t key) +{ + size_t b; + assert(TYPE(tbl) == TYPE_TABLE); + if(table_find(tbl, key, 0, &b) + && tbl->table.keys->bucket[b] != obj_unused + && tbl->table.keys->bucket[b] != obj_deleted) + { + tbl->table.keys->bucket[b] = obj_deleted; + tbl->table.keys->deleted + = TAG_COUNT(UNTAG_COUNT(tbl->table.keys->deleted) + 1); + tbl->table.values->bucket[b] = NULL; + } +} + + +static obj_t intern_string(obj_t name) +{ + obj_t symbol; + assert(TYPE(name) == TYPE_STRING); + symbol = table_ref(symtab, name); + if(symbol == NULL) { + symbol = make_symbol(name); + table_set(symtab, name, symbol); + } + return symbol; +} + + +static obj_t intern(const char *string) +{ + return intern_string(make_string(strlen(string), string)); +} + + +static char *symbol_name(obj_t symbol) +{ + assert(TYPE(symbol) == TYPE_SYMBOL); + assert(TYPE(symbol->symbol.name) == TYPE_STRING); + return symbol->symbol.name->string.string; +} + + +static void port_close(obj_t port) +{ + assert(TYPE(port) == TYPE_PORT); + if(port->port.stream != NULL) { + fclose(port->port.stream); + port->port.stream = NULL; + } +} + + +static void print(obj_t obj, long depth, FILE *stream) +{ + if (depth < 0) { + depth = -1; + } + switch(TYPE(obj)) { + case TYPE_INTEGER: { + fprintf(stream, "%ld", obj->integer.integer); + } break; + + case TYPE_SYMBOL: { + fputs(symbol_name(obj), stream); + } break; + + case TYPE_SPECIAL: { + fputs(obj->special.name, stream); + } break; + + case TYPE_PORT: { + assert(TYPE(obj->port.name) == TYPE_STRING); + fprintf(stream, "#[port \"%s\"]", + obj->port.name->string.string); + } break; + + case TYPE_STRING: { + size_t i; + putc('"', stream); + for(i = 0; i < obj->string.length; ++i) { + char c = obj->string.string[i]; + switch(c) { + case '\\': fputs("\\\\", stream); break; + case '"': fputs("\\\"", stream); break; + default: putc(c, stream); break; + } + } + putc('"', stream); + } break; + + case TYPE_PROMISE: { + assert(CAR(obj) == obj_true || CAR(obj) == obj_false); + fprintf(stream, "#[%sevaluated promise ", + CAR(obj) == obj_false ? "un" : ""); + print(CDR(obj), depth - 1, stream); + putc(']', stream); + } break; + + case TYPE_PAIR: { + if(TYPE(CAR(obj)) == TYPE_SYMBOL && + TYPE(CDR(obj)) == TYPE_PAIR && + CDDR(obj) == obj_empty) { + if(CAR(obj) == obj_quote) { + putc('\'', stream); + if(depth == 0) + fputs("...", stream); + else + print(CADR(obj), depth - 1, stream); + break; + } + if(CAR(obj) == obj_quasiquote) { + putc('`', stream); + if(depth == 0) + fputs("...", stream); + else + print(CADR(obj), depth - 1, stream); + break; + } + if(CAR(obj) == obj_unquote) { + putc(',', stream); + if(depth == 0) + fputs("...", stream); + else + print(CADR(obj), depth - 1, stream); + break; + } + if(CAR(obj) == obj_unquote_splic) { + fputs(",@", stream); + if(depth == 0) + fputs("...", stream); + else + print(CADR(obj), depth - 1, stream); + break; + } + } + putc('(', stream); + if(depth == 0) + fputs("...", stream); + else { + for(;;) { + print(CAR(obj), depth - 1, stream); + obj = CDR(obj); + if(TYPE(obj) != TYPE_PAIR) break; + putc(' ', stream); + } + if(obj != obj_empty) { + fputs(" . ", stream); + print(obj, depth - 1, stream); + } + } + putc(')', stream); + } break; + + case TYPE_VECTOR: { + fputs("#(", stream); + if(depth == 0) + fputs("...", stream); + else { + size_t i; + for(i = 0; i < obj->vector.length; ++i) { + if(i > 0) putc(' ', stream); + print(obj->vector.vector[i], depth - 1, stream); + } + } + putc(')', stream); + } break; + + case TYPE_TABLE: { + size_t i, length = UNTAG_COUNT(obj->table.keys->length); + fputs("#[hashtable", stream); + for(i = 0; i < length; ++i) { + obj_t k = obj->table.keys->bucket[i]; + if(k != obj_unused && k != obj_deleted) { + fputs(" (", stream); + print(k, depth - 1, stream); + putc(' ', stream); + print(obj->table.values->bucket[i], depth - 1, stream); + putc(')', stream); + } + } + putc(']', stream); + } break; + + case TYPE_OPERATOR: { + fprintf(stream, "#[operator \"%s\" %p ", + obj->operator.name, + (void *)obj); + if(depth == 0) + fputs("...", stream); + else { + print(obj->operator.arguments, depth - 1, stream); + putc(' ', stream); + print(obj->operator.body, depth - 1, stream); + putc(' ', stream); + print(obj->operator.env, depth - 1, stream); + putc(' ', stream); + print(obj->operator.op_env, depth - 1, stream); + } + putc(']', stream); + } break; + + case TYPE_CHARACTER: { + fprintf(stream, "#\\%c", obj->character.c); + } break; + + default: + assert(0); + abort(); + } +} + + +static obj_t read_integer(FILE *stream, int c) +{ + long integer = 0; + + do { + integer = integer*10 + c-'0'; + c = getc(stream); + } while(isdigit(c)); + ungetc(c, stream); + + return make_integer(integer); +} + + +static obj_t read_symbol(FILE *stream, int c) +{ + size_t length = 0; + char string[SYMMAX+1]; + + do { + string[length++] = (char)tolower(c); + c = getc(stream); + } while(length < SYMMAX && (isalnum(c) || isealpha(c))); + + if(isalnum(c) || isealpha(c)) + error("read: symbol too long"); + + string[length] = '\0'; + + ungetc(c, stream); + + return intern(string); +} + + +static obj_t read_string(FILE *stream, int c) +{ + size_t length = 0; + char string[STRMAX+1]; + + for(;;) { + c = getc(stream); + if(c == EOF) + error("read: end of file during string"); + if(c == '"') break; + if(length >= STRMAX) + error("read: string too long"); + if(c == '\\') { + c = getc(stream); + switch(c) { + case '\\': break; + case '"': break; + case 'n': c = '\n'; break; + case 't': c = '\t'; break; + case EOF: + error("read: end of file in escape sequence in string"); + default: + error("read: unknown escape '%c'", c); + } + } + string[length++] = (char)c; + } + + string[length] = '\0'; + + return make_string(length, string); +} + + +static obj_t read_(FILE *stream); + + +static obj_t read_quote(FILE *stream, int c) +{ + UNUSED(c); + return make_pair(obj_quote, make_pair(read_(stream), obj_empty)); +} + + +static obj_t read_quasiquote(FILE *stream, int c) +{ + UNUSED(c); + return make_pair(obj_quasiquote, make_pair(read_(stream), obj_empty)); +} + + +static obj_t read_unquote(FILE *stream, int c) +{ + c = getc(stream); + if(c == '@') + return make_pair(obj_unquote_splic, make_pair(read_(stream), obj_empty)); + ungetc(c, stream); + return make_pair(obj_unquote, make_pair(read_(stream), obj_empty)); +} + + +static obj_t read_list(FILE *stream, int c) +{ + obj_t list, new, end; + + list = obj_empty; + end = NULL; /* suppress "uninitialized" warning in GCC */ + + for(;;) { + c = getnbc(stream); + if(c == ')' || c == '.' || c == EOF) break; + ungetc(c, stream); + new = make_pair(read_(stream), obj_empty); + if(list == obj_empty) { + list = new; + end = new; + } else { + CDR(end) = new; + end = new; + } + } + + if(c == '.') { + if(list == obj_empty) + error("read: unexpected dot"); + CDR(end) = read_(stream); + c = getnbc(stream); + } + + if(c != ')') + error("read: expected close parenthesis"); + + return list; +} + + +static obj_t list_to_vector(obj_t list) +{ + size_t i; + obj_t l, vector; + i = 0; + l = list; + while(TYPE(l) == TYPE_PAIR) { + ++i; + l = CDR(l); + } + if(l != obj_empty) + return obj_error; + vector = make_vector(i, obj_undefined); + i = 0; + l = list; + while(TYPE(l) == TYPE_PAIR) { + vector->vector.vector[i] = CAR(l); + ++i; + l = CDR(l); + } + return vector; +} + + +static obj_t read_special(FILE *stream, int c) +{ + c = getnbc(stream); + switch(tolower(c)) { + case 't': return obj_true; + case 'f': return obj_false; + case '\\': { /* character (R4RS 6.6) */ + c = getc(stream); + if(c == EOF) + error("read: end of file reading character literal"); + return make_character((char)c); + } + case '(': { /* vector (R4RS 6.8) */ + obj_t list = read_list(stream, c); + obj_t vector = list_to_vector(list); + if(vector == obj_error) + error("read: illegal vector syntax"); + return vector; + } + } + error("read: unknown special '%c'", c); + return obj_error; +} + + +static obj_t read_(FILE *stream) +{ + int c; + + c = getnbc(stream); + if(c == EOF) return obj_eof; + + if(isdigit(c)) + return read_integer(stream, c); + + switch(c) { + case '\'': return read_quote(stream, c); + case '`': return read_quasiquote(stream, c); + case ',': return read_unquote(stream, c); + case '(': return read_list(stream, c); + case '#': return read_special(stream, c); + case '"': return read_string(stream, c); + case '-': case '+': { + int next = getc(stream); + if(isdigit(next)) { + obj_t integer = read_integer(stream, next); + if(c == '-') + integer->integer.integer = -integer->integer.integer; + return integer; + } + ungetc(next, stream); + } break; /* fall through to read as symbol */ + } + + if(isalpha(c) || isealpha(c)) + return read_symbol(stream, c); + + error("read: illegal char '%c'", c); + return obj_error; +} + + +/* lookup_in_frame -- look up a symbol in single frame + * + * Search a single frame of the environment for a symbol binding. + */ + +static obj_t lookup_in_frame(obj_t frame, obj_t symbol) +{ + while(frame != obj_empty) { + assert(TYPE(frame) == TYPE_PAIR); + assert(TYPE(CAR(frame)) == TYPE_PAIR); + assert(TYPE(CAAR(frame)) == TYPE_SYMBOL); + if(CAAR(frame) == symbol) + return CAR(frame); + frame = CDR(frame); + } + return obj_undefined; +} + + +/* lookup -- look up symbol in environment + * + * Search an entire environment for a binding of a symbol. + */ + +static obj_t lookup(obj_t env, obj_t symbol) +{ + obj_t binding; + while(env != obj_empty) { + assert(TYPE(env) == TYPE_PAIR); + binding = lookup_in_frame(CAR(env), symbol); + if(binding != obj_undefined) + return binding; + env = CDR(env); + } + return obj_undefined; +} + + +/* define -- define symbol in environment + * + * In Scheme, define will actually rebind (i.e. set) a symbol in the + * same frame of the environment, or add a binding if it wasn't already + * set. This has the effect of making bindings local to functions + * (see how entry_interpret adds an empty frame to the environments), + * allowing recursion, and allowing redefinition at the top level. + * See R4R2 section 5.2 for details. + */ + +static void define(obj_t env, obj_t symbol, obj_t value) +{ + obj_t binding; + assert(TYPE(env) == TYPE_PAIR); /* always at least one frame */ + binding = lookup_in_frame(CAR(env), symbol); + if(binding != obj_undefined) + CDR(binding) = value; + else + CAR(env) = make_pair(make_pair(symbol, value), CAR(env)); +} + + +static obj_t eval(obj_t env, obj_t op_env, obj_t exp) +{ + for(;;) { + obj_t operator; + obj_t result; + + /* self-evaluating */ + if(TYPE(exp) == TYPE_INTEGER || + (TYPE(exp) == TYPE_SPECIAL && exp != obj_empty) || + TYPE(exp) == TYPE_STRING || + TYPE(exp) == TYPE_CHARACTER || + TYPE(exp) == TYPE_OPERATOR) + return exp; + + /* symbol lookup */ + if(TYPE(exp) == TYPE_SYMBOL) { + obj_t binding = lookup(env, exp); + if(binding == obj_undefined) + error("eval: unbound symbol \"%s\"", symbol_name(exp)); + return CDR(binding); + } + + if(TYPE(exp) != TYPE_PAIR) { + error("eval: unknown syntax"); + return obj_error; + } + + /* apply operator or function */ + if(TYPE(CAR(exp)) == TYPE_SYMBOL) { + obj_t binding = lookup(op_env, CAR(exp)); + if(binding != obj_undefined) { + operator = CDR(binding); + assert(TYPE(operator) == TYPE_OPERATOR); + result = (*operator->operator.entry)(env, op_env, operator, CDR(exp)); + goto found; + } + } + operator = eval(env, op_env, CAR(exp)); + unless(TYPE(operator) == TYPE_OPERATOR) + error("eval: application of non-function"); + result = (*operator->operator.entry)(env, op_env, operator, CDR(exp)); + + found: + if (!(TYPE(result) == TYPE_PAIR && CAR(result) == obj_tail)) + return result; + + env = CADR(result); + op_env = CADDR(result); + exp = CAR(CDDDR(result)); + } +} + + +static void mps_chat(void); + +static obj_t load(obj_t env, obj_t op_env, obj_t filename) { + obj_t port, result = obj_undefined; + FILE *stream; + assert(TYPE(filename) == TYPE_STRING); + stream = fopen(filename->string.string, "r"); + if(stream == NULL) + error("load: cannot open %s: %s", filename->string.string, strerror(errno)); + port = make_port(filename, stream); + for(;;) { + obj_t obj; + mps_chat(); + obj = read_(stream); + if(obj == obj_eof) break; + result = eval(env, op_env, obj); + } + port_close(port); + return result; +} + + +/* OPERATOR UTILITIES */ + + +/* eval_list -- evaluate list of expressions giving list of results + * + * eval_list evaluates a list of expressions and yields a list of their + * results, in order. If the list is badly formed, an error is thrown + * using the message given. + */ + +static obj_t eval_list(obj_t env, obj_t op_env, obj_t list, const char *message) +{ + obj_t result, end, pair; + result = obj_empty; + end = NULL; /* suppress "uninitialized" warning in GCC */ + while(list != obj_empty) { + if(TYPE(list) != TYPE_PAIR) + error(message); + pair = make_pair(eval(env, op_env, CAR(list)), obj_empty); + if(result == obj_empty) + result = pair; + else + CDR(end) = pair; + end = pair; + list = CDR(list); + } + return result; +} + + +/* eval_args1 -- evaluate some operator arguments + * + * See eval_args and eval_args_rest for usage. + */ + +static obj_t eval_args1(const char *name, obj_t env, obj_t op_env, + obj_t operands, unsigned n, va_list args) +{ + unsigned i; + for(i = 0; i < n; ++i) { + unless(TYPE(operands) == TYPE_PAIR) + error("eval: too few arguments to %s", name); + *va_arg(args, obj_t *) = eval(env, op_env, CAR(operands)); + operands = CDR(operands); + } + return operands; +} + + +/* eval_args -- evaluate operator arguments without rest list + * + * eval_args evaluates the first "n" expressions from the list of + * expressions in "operands", returning the rest of the operands + * unevaluated. It puts the results of evaluation in the addresses + * passed in the vararg list. If the operands list is badly formed + * an error is thrown using the operator name passed. For example: + * + * eval_args("foo", env, op_env, operands, 2, &arg1, &arg2); + */ + +static void eval_args(const char *name, obj_t env, obj_t op_env, + obj_t operands, unsigned n, ...) +{ + va_list args; + va_start(args, n); + operands = eval_args1(name, env, op_env, operands, n, args); + unless(operands == obj_empty) + error("eval: too many arguments to %s", name); + va_end(args); +} + + +/* eval_args_rest -- evaluate operator arguments with rest list + * + * eval_args_rest evaluates the first "n" expressions from the list of + * expressions in "operands", then evaluates the rest of the operands + * using eval_list and puts the result at *restp. It puts the results + * of evaluating the first "n" operands in the addresses + * passed in the vararg list. If the operands list is badly formed + * an error is thrown using the operator name passed. For example: + * + * eval_args_rest("foo", env, op_env, operands, &rest, 2, &arg1, &arg2); + */ + +static void eval_args_rest(const char *name, obj_t env, obj_t op_env, + obj_t operands, obj_t *restp, unsigned n, ...) +{ + va_list args; + va_start(args, n); + operands = eval_args1(name, env, op_env, operands, n, args); + va_end(args); + *restp = eval_list(env, op_env, operands, "eval: badly formed argument list"); +} + + +/* eval_tail -- return an object that will cause eval to loop + * + * Rather than calling `eval` an operator can return a special object that + * causes a calling `eval` to loop, avoiding using up a C stack frame. + * This implements tail recursion (in a simple way). + */ + +static obj_t eval_tail(obj_t env, obj_t op_env, obj_t exp) +{ + return make_pair(obj_tail, + make_pair(env, + make_pair(op_env, + make_pair(exp, + obj_empty)))); +} + + +/* eval_body -- evaluate a list of expressions, returning last result + * + * This is used for the bodies of forms such as let, begin, etc. where + * a list of expressions is allowed. + */ + +static obj_t eval_body(obj_t env, obj_t op_env, obj_t operator, obj_t body) +{ + for (;;) { + if (TYPE(body) != TYPE_PAIR) + error("%s: illegal expression list", operator->operator.name); + if (CDR(body) == obj_empty) + return eval_tail(env, op_env, CAR(body)); + (void)eval(env, op_env, CAR(body)); + body = CDR(body); + } +} + + +/* BUILT-IN OPERATORS */ + + +/* entry_interpret -- interpreted function entry point + * + * When a function is made using lambda (see entry_lambda) an operator + * is created with entry_interpret as its entry point, and the arguments + * and body of the function. The entry_interpret function evaluates + * the operands of the function and binds them to the argument names + * in a new frame added to the lambda's closure environment. It then + * evaluates the body in that environment, executing the function. + */ + +static obj_t entry_interpret(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t arguments, fun_env, fun_op_env; + + assert(TYPE(operator) == TYPE_OPERATOR); + + /* Make a new frame so that bindings are local to the function. */ + /* Arguments will be bound in this new frame. */ + fun_env = make_pair(obj_empty, operator->operator.env); + fun_op_env = make_pair(obj_empty, operator->operator.op_env); + + arguments = operator->operator.arguments; + while(operands != obj_empty) { + if(arguments == obj_empty) + error("eval: function applied to too many arguments"); + if(TYPE(arguments) == TYPE_SYMBOL) { + define(fun_env, arguments, + eval_list(env, op_env, operands, "eval: badly formed argument list")); + operands = obj_empty; + arguments = obj_empty; + } else { + assert(TYPE(arguments) == TYPE_PAIR && + TYPE(CAR(arguments)) == TYPE_SYMBOL); + define(fun_env, + CAR(arguments), + eval(env, op_env, CAR(operands))); + operands = CDR(operands); + arguments = CDR(arguments); + } + } + if(arguments != obj_empty) + error("eval: function applied to too few arguments"); + + return eval_tail(fun_env, fun_op_env, operator->operator.body); +} + + +/* entry_quote -- return operands unevaluated + * + * In Scheme, (quote foo) evaluates to foo (i.e. foo is not evaluated). + * See R4RS 4.1.2. The reader expands "'x" to "(quote x)". + */ + +static obj_t entry_quote(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + UNUSED(env); + UNUSED(op_env); + unless(TYPE(operands) == TYPE_PAIR && + CDR(operands) == obj_empty) + error("%s: illegal syntax", operator->operator.name); + return CAR(operands); +} + + +/* entry_define -- bind a symbol in the top frame of the environment + * + * In Scheme, "(define )" evaluates expressions + * and binds it to symbol in the top frame of the environment (see + * R4RS 5.2). This code also allows the non-essential syntax for + * define, "(define ( ) )" as a short-hand for + * "(define (lambda () ))". + */ + +static obj_t entry_define(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t symbol = NULL, value = NULL; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + if(TYPE(CAR(operands)) == TYPE_SYMBOL) { + unless(CDDR(operands) == obj_empty) + error("%s: too many arguments", operator->operator.name); + symbol = CAR(operands); + value = eval(env, op_env, CADR(operands)); + } else if(TYPE(CAR(operands)) == TYPE_PAIR && + TYPE(CAAR(operands)) == TYPE_SYMBOL) { + symbol = CAAR(operands); + value = eval(env, op_env, + make_pair(obj_lambda, + make_pair(CDAR(operands), CDR(operands)))); + } else + error("%s: applied to binder", operator->operator.name); + define(env, symbol, value); + return symbol; +} + + +/* entry_if -- one- or two-armed conditional + * + * "(if )" and "(if )". + * See R4RS 4.1.5. + */ + +static obj_t entry_if(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t test; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR && + (CDDR(operands) == obj_empty || + (TYPE(CDDR(operands)) == TYPE_PAIR && + CDDDR(operands) == obj_empty))) + error("%s: illegal syntax", operator->operator.name); + test = eval(env, op_env, CAR(operands)); + /* Anything which is not #f counts as true [R4RS 6.1]. */ + if(test != obj_false) + return eval_tail(env, op_env, CADR(operands)); + if(TYPE(CDDR(operands)) == TYPE_PAIR) + return eval_tail(env, op_env, CADDR(operands)); + return obj_undefined; +} + + +/* entry_cond -- general conditional + * + * "(cond ( ...) ( ...) ... [(else ...)])" + */ + +static obj_t entry_cond(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + unless(TYPE(operands) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + while(TYPE(operands) == TYPE_PAIR) { + obj_t clause = CAR(operands); + obj_t result; + unless(TYPE(clause) == TYPE_PAIR && + TYPE(CDR(clause)) == TYPE_PAIR) + error("%s: illegal clause syntax", operator->operator.name); + if(CAR(clause) == obj_else) { + unless(CDR(operands) == obj_empty) + error("%s: else clause must come last", operator->operator.name); + result = obj_true; + } else + result = eval(env, op_env, CAR(clause)); + if(result != obj_false) { + if (CDR(clause) == obj_empty) + return result; + return eval_body(env, op_env, operator, CDR(clause)); + } + operands = CDR(operands); + } + return obj_undefined; +} + + +/* entry_and -- (and ...) */ + +static obj_t entry_and(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t test; + if (operands == obj_empty) + return obj_true; + do { + if (TYPE(operands) != TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + if (CDR(operands) == obj_empty) + return eval_tail(env, op_env, CAR(operands)); + test = eval(env, op_env, CAR(operands)); + operands = CDR(operands); + } while (test != obj_false); + return test; +} + + +/* entry_or -- (or ...) */ + +static obj_t entry_or(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t test; + if (operands == obj_empty) + return obj_false; + do { + if (TYPE(operands) != TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + if (CDR(operands) == obj_empty) + return eval_tail(env, op_env, CAR(operands)); + test = eval(env, op_env, CAR(operands)); + operands = CDR(operands); + } while (test == obj_false); + return test; +} + + +/* entry_let -- (let ) */ +/* TODO: Too much common code with let* */ + +static obj_t entry_let(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t inner_env, bindings; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + inner_env = make_pair(obj_empty, env); /* TODO: common with interpret */ + bindings = CAR(operands); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + unless(TYPE(binding) == TYPE_PAIR && + TYPE(CAR(binding)) == TYPE_SYMBOL && + TYPE(CDR(binding)) == TYPE_PAIR && + CDDR(binding) == obj_empty) + error("%s: illegal binding", operator->operator.name); + define(inner_env, CAR(binding), eval(env, op_env, CADR(binding))); + bindings = CDR(bindings); + } + if(bindings != obj_empty) + error("%s: illegal bindings list", operator->operator.name); + return eval_body(inner_env, op_env, operator, CDR(operands)); +} + + +/* entry_let_star -- (let* ) */ +/* TODO: Too much common code with let */ + +static obj_t entry_let_star(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t inner_env, bindings; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + inner_env = make_pair(obj_empty, env); /* TODO: common with interpret */ + bindings = CAR(operands); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + unless(TYPE(binding) == TYPE_PAIR && + TYPE(CAR(binding)) == TYPE_SYMBOL && + TYPE(CDR(binding)) == TYPE_PAIR && + CDDR(binding) == obj_empty) + error("%s: illegal binding", operator->operator.name); + define(inner_env, CAR(binding), eval(inner_env, op_env, CADR(binding))); + bindings = CDR(bindings); + } + if(bindings != obj_empty) + error("%s: illegal bindings list", operator->operator.name); + return eval_body(inner_env, op_env, operator, CDR(operands)); +} + + +/* entry_letrec -- (letrec ) */ +/* TODO: Too much common code with let and let* */ + +static obj_t entry_letrec(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t inner_env, bindings; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + inner_env = make_pair(obj_empty, env); /* TODO: common with interpret */ + bindings = CAR(operands); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + unless(TYPE(binding) == TYPE_PAIR && + TYPE(CAR(binding)) == TYPE_SYMBOL && + TYPE(CDR(binding)) == TYPE_PAIR && + CDDR(binding) == obj_empty) + error("%s: illegal binding", operator->operator.name); + define(inner_env, CAR(binding), obj_undefined); + bindings = CDR(bindings); + } + if(bindings != obj_empty) + error("%s: illegal bindings list", operator->operator.name); + bindings = CAR(operands); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + define(inner_env, CAR(binding), eval(inner_env, op_env, CADR(binding))); + bindings = CDR(bindings); + } + return eval_body(inner_env, op_env, operator, CDR(operands)); +} + + +/* entry_do -- (do (( ) ...) ( ...) ...) + * Do is an iteration construct. It specifies a set of variables to be + * bound, how they are to be initialized at the start, and how they + * are to be updated on each iteration. When a termination condition + * is met, the loop exits with a specified result value. + * See R4RS 4.2.4. + */ +static obj_t entry_do(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t inner_env, next_env, bindings; + unless(TYPE(operands) == TYPE_PAIR && + TYPE(CDR(operands)) == TYPE_PAIR && + TYPE(CADR(operands)) == TYPE_PAIR) + error("%s: illegal syntax", operator->operator.name); + inner_env = make_pair(obj_empty, env); + + /* Do expressions are evaluated as follows: The expressions + are evaluated (in some unspecified order), the s are + bound to fresh locations, the results of the expressions + are stored in the bindings of the s, and then the + iteration phase begins. */ + bindings = CAR(operands); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + unless(TYPE(binding) == TYPE_PAIR && + TYPE(CAR(binding)) == TYPE_SYMBOL && + TYPE(CDR(binding)) == TYPE_PAIR && + (CDDR(binding) == obj_empty || + (TYPE(CDDR(binding)) == TYPE_PAIR && + CDDDR(binding) == obj_empty))) + error("%s: illegal binding", operator->operator.name); + define(inner_env, CAR(binding), eval(env, op_env, CADR(binding))); + bindings = CDR(bindings); + } + for(;;) { + /* Each iteration begins by evaluating ; */ + obj_t test = CADR(operands); + if(eval(inner_env, op_env, CAR(test)) == obj_false) { + /* if the result is false (see section see section 6.1 + Booleans), then the expressions are evaluated in + order for effect, */ + obj_t commands = CDDR(operands); + while(TYPE(commands) == TYPE_PAIR) { + eval(inner_env, op_env, CAR(commands)); + commands = CDR(commands); + } + unless(commands == obj_empty) + error("%s: illegal syntax", operator->operator.name); + + /* the expressions are evaluated in some unspecified + order, the s are bound to fresh locations, the + results of the s are stored in the bindings of the + s, and the next iteration begins. */ + bindings = CAR(operands); + next_env = make_pair(obj_empty, inner_env); + while(TYPE(bindings) == TYPE_PAIR) { + obj_t binding = CAR(bindings); + unless(CDDR(binding) == obj_empty) + define(next_env, CAR(binding), eval(inner_env, op_env, CADDR(binding))); + bindings = CDR(bindings); + } + inner_env = next_env; + } else { + /* If evaluates to a true value, then the s + are evaluated from left to right and the value of the last + is returned as the value of the do expression. + If no s are present, then the value of the do + expression is unspecified. */ + obj_t result = obj_undefined; + test = CDR(test); + while(TYPE(test) == TYPE_PAIR) { + result = eval(inner_env, op_env, CAR(test)); + test = CDR(test); + } + unless(test == obj_empty) + error("%s: illegal syntax", operator->operator.name); + return result; + } + } +} + + +/* entry_delay -- (delay ) */ + +static obj_t entry_delay(obj_t env, obj_t op_env, obj_t operator, obj_t operands) +{ + obj_t promise; + unless(TYPE(operands) == TYPE_PAIR && + CDR(operands) == obj_empty) + error("%s: illegal syntax", operator->operator.name); + promise = make_pair(obj_false, + make_operator("anonymous promise", + entry_interpret, obj_empty, + CAR(operands), env, op_env)); + TYPE(promise) = TYPE_PROMISE; + return promise; +} + + +static obj_t quasiquote(obj_t env, obj_t op_env, obj_t operator, obj_t arg) +{ + obj_t result = obj_empty, end = NULL, insert; + unless(TYPE(arg) == TYPE_PAIR) + return arg; + while(TYPE(arg) == TYPE_PAIR) { + if(TYPE(CAR(arg)) == TYPE_PAIR && + TYPE(CAAR(arg)) == TYPE_SYMBOL && + (CAAR(arg) == obj_unquote || + CAAR(arg) == obj_unquote_splic)) { + unless(TYPE(CDAR(arg)) == TYPE_PAIR && + CDDAR(arg) == obj_empty) + error("%s: illegal %s syntax", operator->operator.name, + symbol_name(CAAR(arg))); + insert = eval(env, op_env, CADAR(arg)); + if(CAAR(arg) == obj_unquote) { + obj_t pair = make_pair(insert, obj_empty); + if(result == obj_empty) + result = pair; + if(end) + CDR(end) = pair; + end = pair; + } else if(CAAR(arg) == obj_unquote_splic) { + while(TYPE(insert) == TYPE_PAIR) { + obj_t pair = make_pair(CAR(insert), obj_empty); + if(result == obj_empty) + result = pair; + if(end) + CDR(end) = pair; + end = pair; + insert = CDR(insert); + } + if(insert != obj_empty) + error("%s: %s expression must return list", + operator->operator.name, symbol_name(CAAR(arg))); + } + } else { + obj_t pair = make_pair(quasiquote(env, op_env, operator, CAR(arg)), obj_empty); + if(result == obj_empty) + result = pair; + if(end) + CDR(end) = pair; + end = pair; + } + arg = CDR(arg); + } + return result; +} + + +/* entry_quasiquote -- (quasiquote