diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 4a7d96b197e..f11e0e0d2c7 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -36,9 +36,9 @@ class StorageProvider; // TODO (T25527350): Debug Dump // TODO (T25527350): Heap Moving -/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own -/// storage size (which is a fixed power of two number of bytes). The storage -/// is further split up according to the diagram below: +/// An \c AlignedHeapSegmentBase manages a contiguous chunk of memory aligned to +/// kSegmentUnitSize. The storage is further split up according to the diagram +/// below: /// /// +----------------------------------------+ /// | (1) Card Table | @@ -52,83 +52,23 @@ class StorageProvider; /// | (End) | /// +----------------------------------------+ /// -/// The tables in (1), and (2) cover the contiguous allocation space (3) -/// into which GCCells are bump allocated. -class AlignedHeapSegment { +/// The tables in (1), and (2) cover the contiguous allocation space (3) into +/// which GCCells are bump allocated. They have fixed size computed from +/// kSegmentUnitSize. For segments with larger size (which must be multiples of +/// kSegmentUnitSize), card table allocates its internal arrays separately +/// instead. Any segment size smaller than kSegmentUnitSize is not supported. +class AlignedHeapSegmentBase { public: - /// @name Constants and utility functions for the aligned storage of \c - /// AlignedHeapSegment. - /// - /// @{ - /// The size and the alignment of the storage, in bytes. - static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; - static constexpr size_t kSize{1 << kLogSize}; - /// Mask for isolating the offset into a storage for a pointer. - static constexpr size_t kLowMask{kSize - 1}; - /// Mask for isolating the storage being pointed into by a pointer. - static constexpr size_t kHighMask{~kLowMask}; - - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. - static constexpr size_t storageSize() { - return kSize; - } - - /// Returns the pointer to the beginning of the storage containing \p ptr - /// (inclusive). Assuming such a storage exists. Note that - /// - /// storageStart(seg.hiLim()) != seg.lowLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageStart(const void *ptr) { - return reinterpret_cast( - reinterpret_cast(ptr) & kHighMask); - } - - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). Assuming such a storage exists. Note that - /// - /// storageEnd(seg.hiLim()) != seg.hiLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageEnd(const void *ptr) { - return reinterpret_cast(storageStart(ptr)) + kSize; - } - - /// Returns the offset in bytes to \p ptr from the start of its containing - /// storage. Assuming such a storage exists. Note that - /// - /// offset(seg.hiLim()) != seg.size() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static size_t offset(const char *ptr) { - return reinterpret_cast(ptr) & kLowMask; - } - /// @} - - /// Construct a null AlignedHeapSegment (one that does not own memory). - AlignedHeapSegment() = default; - /// \c AlignedHeapSegment is movable and assignable, but not copyable. - AlignedHeapSegment(AlignedHeapSegment &&); - AlignedHeapSegment &operator=(AlignedHeapSegment &&); - AlignedHeapSegment(const AlignedHeapSegment &) = delete; - - ~AlignedHeapSegment(); - - /// Create a AlignedHeapSegment by allocating memory with \p provider. - static llvh::ErrorOr create(StorageProvider *provider); - static llvh::ErrorOr create( - StorageProvider *provider, - const char *name); + static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; + static constexpr size_t kSegmentUnitSize = (1 << kLogSize); /// Contents of the memory region managed by this segment. class Contents { public: /// The number of bits representing the total number of heap-aligned /// addresses in the segment storage. - static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign; + static constexpr size_t kMarkBitArraySize = + kSegmentUnitSize >> LogHeapAlign; /// BitArray for marking allocation region of a segment. using MarkBitArray = BitArray; @@ -138,6 +78,11 @@ class AlignedHeapSegment { private: friend class AlignedHeapSegment; + friend class AlignedHeapSegmentBase; + + /// Pass segment size to CardTable constructor to allocate its data + /// separately if \p sz > kSegmentUnitSize. + Contents(size_t segmentSize) : cardTable_(segmentSize) {} /// Note that because of the Contents object, the first few bytes of the /// card table are unused, we instead use them to store a small @@ -179,10 +124,11 @@ class AlignedHeapSegment { "SHSegmentInfo does not fit in available unused CardTable space."); /// The offset from the beginning of a segment of the allocatable region. - static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)}; + static constexpr size_t kOffsetOfAllocRegion{ + offsetof(Contents, allocRegion_)}; static_assert( - isSizeHeapAligned(offsetOfAllocRegion), + isSizeHeapAligned(kOffsetOfAllocRegion), "Allocation region must start at a heap aligned offset"); static_assert( @@ -215,6 +161,219 @@ class AlignedHeapSegment { GCCell *cell_{nullptr}; }; + /// Returns the address that is the lower bound of the segment. + /// \post The returned pointer is guaranteed to be aligned to + /// kSegmentUnitSize. + char *lowLim() const { + return lowLim_; + } + + /// Read storage size from SHSegmentInfo. + size_t storageSize() const { + auto *segmentInfo = reinterpret_cast(lowLim_); + return segmentInfo->segmentSize; + } + + /// Returns the address that is the upper bound of the segment. + /// This is only used in debugging code and computing memory footprint, so + /// just read the segment size from SHSegmentInfo. + char *hiLim() const { + return lowLim_ + storageSize(); + } + + /// Returns the address at which the first allocation in this segment would + /// occur. + /// Disable UB sanitization because 'this' may be null during the tests. + char *start() const LLVM_NO_SANITIZE("undefined") { + return contents()->allocRegion_; + } + + /// Return a reference to the card table covering the memory region managed by + /// this segment. + CardTable &cardTable() const { + return contents()->cardTable_; + } + + /// Given a \p cell into the memory region of some valid segment \c s, returns + /// a pointer to the CardTable covering the segment containing the cell. + /// + /// \pre There exists a currently alive heap in which \p cell is allocated. + static CardTable *cardTableCovering(const GCCell *cell) { + return &contents(alignedStorageStart(cell))->cardTable_; + } + + /// Return a reference to the mark bit array covering the memory region + /// managed by this segment. + Contents::MarkBitArray &markBitArray() const { + return contents()->markBitArray_; + } + + /// Mark the given \p cell. Assumes the given address is a valid heap object. + static void setCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + markBits->set(ind, true); + } + + /// Return whether the given \p cell is marked. Assumes the given address is + /// a valid heap object. + static bool getCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + return markBits->at(ind); + } + + /// Return true if object \p a and \p b live in the same segment. This is used + /// to check if a pointer field in \p a may points to an object in same + /// segment (so that we don't need to dirty the cards). + static bool containedInSame(const GCCell *a, const GCCell *b) { + return ((reinterpret_cast(a) ^ reinterpret_cast(b)) < + kSegmentUnitSize) || + a == b; + } + +#ifndef NDEBUG + /// Get the storage end of segment that \p cell resides in. + static char *storageEnd(const GCCell *cell) { + auto *start = alignedStorageStart(cell); + auto *segmentInfo = reinterpret_cast(start); + return start + segmentInfo->segmentSize; + } +#endif + + protected: + AlignedHeapSegmentBase() = default; + + /// Construct Contents() at the address of \p lowLim. + AlignedHeapSegmentBase(void *lowLim, size_t segmentSize) + : lowLim_(reinterpret_cast(lowLim)) { + new (contents()) Contents(segmentSize); + contents()->protectGuardPage(oscompat::ProtectMode::None); + } + + /// Return a pointer to the contents of the memory region managed by this + /// segment. + Contents *contents() const { + return reinterpret_cast(lowLim_); + } + + /// Given the \p lowLim of some valid segment's memory region, returns a + /// pointer to the Contents laid out in the storage, assuming it exists. + static Contents *contents(void *lowLim) { + return reinterpret_cast(lowLim); + } + + /// The start of the aligned segment. + char *lowLim_{nullptr}; + + private: + /// Return the starting address for aligned region of size kSegmentUnitSize + /// that \p cell resides in. If \c cell resides in a JumboSegment, it's the + /// only cell there, this essentially returns its segment starting address. + static char *alignedStorageStart(const GCCell *cell) { + return reinterpret_cast( + reinterpret_cast(cell) & ~(kSegmentUnitSize - 1)); + } + + /// Given a \p cell, returns a pointer to the MarkBitArray covering the + /// segment that \p cell resides in. + /// + /// \pre There exists a currently alive heap that claims to contain \c ptr. + static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) { + auto *segStart = alignedStorageStart(cell); + return &contents(segStart)->markBitArray_; + } + + /// Translate the given address to a 0-based index in the MarkBitArray of its + /// segment. The base address is the start of the storage of this segment. For + /// JumboSegment, this should always return a constant index + /// kOffsetOfAllocRegion >> LogHeapAlign. + static size_t addressToMarkBitArrayIndex(const GCCell *cell) { + auto *cp = reinterpret_cast(cell); + auto *base = reinterpret_cast(alignedStorageStart(cell)); + return (cp - base) >> LogHeapAlign; + } +}; + +/// JumboHeapSegment has custom storage size that must be a multiple of +/// kSegmentUnitSize. Each such segment can only allocate a single object that +/// occupies the entire allocation space. Therefore, the inline MarkBitArray is +/// large enough, while the CardTable is stored separately. +class JumboHeapSegment : public AlignedHeapSegmentBase {}; + +/// AlignedHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable +/// and MarkBitArray are stored inline right before the allocation space. This +/// is used for all normal object allcations in YoungGen and OldGen. +class AlignedHeapSegment : public AlignedHeapSegmentBase { + public: + /// @name Constants and utility functions for the aligned storage of \c + /// AlignedHeapSegment. + /// + /// @{ + /// The size and the alignment of the storage, in bytes. + static constexpr size_t kSize = kSegmentUnitSize; + /// Mask for isolating the offset into a storage for a pointer. + static constexpr size_t kLowMask{kSize - 1}; + /// Mask for isolating the storage being pointed into by a pointer. + static constexpr size_t kHighMask{~kLowMask}; + + /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. This + /// replaces AlignedHeapSegmentBase::storageSize, which reads the size from + /// SHSegmentInfo. + static constexpr size_t storageSize() { + return kSize; + } + + /// Returns the pointer to the beginning of the storage containing \p ptr + /// (inclusive). Assuming such a storage exists. Note that + /// + /// storageStart(seg.hiLim()) != seg.lowLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageStart(const void *ptr) { + return reinterpret_cast( + reinterpret_cast(ptr) & kHighMask); + } + + /// Returns the pointer to the end of the storage containing \p ptr + /// (exclusive). Assuming such a storage exists. Note that + /// + /// storageEnd(seg.hiLim()) != seg.hiLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageEnd(const void *ptr) { + return reinterpret_cast(storageStart(ptr)) + kSize; + } + + /// Returns the offset in bytes to \p ptr from the start of its containing + /// storage. Assuming such a storage exists. Note that + /// + /// offset(seg.hiLim()) != seg.size() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static size_t offset(const char *ptr) { + return reinterpret_cast(ptr) & kLowMask; + } + /// @} + + /// Construct a null AlignedHeapSegment (one that does not own memory). + AlignedHeapSegment() = default; + /// \c AlignedHeapSegment is movable and assignable, but not copyable. + AlignedHeapSegment(AlignedHeapSegment &&); + AlignedHeapSegment &operator=(AlignedHeapSegment &&); + AlignedHeapSegment(const AlignedHeapSegment &) = delete; + + ~AlignedHeapSegment(); + + /// Create a AlignedHeapSegment by allocating memory with \p provider. + static llvh::ErrorOr create(StorageProvider *provider); + static llvh::ErrorOr create( + StorageProvider *provider, + const char *name); + /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) @@ -238,40 +397,12 @@ class AlignedHeapSegment { /// space, returns {nullptr, false}. inline AllocResult alloc(uint32_t size); - /// Given the \p lowLim of some valid segment's memory region, returns a - /// pointer to the AlignedHeapSegment::Contents laid out in that storage, - /// assuming it exists. - inline static Contents *contents(void *lowLim); - inline static const Contents *contents(const void *lowLim); - /// Given a \p ptr into the memory region of some valid segment \c s, returns /// a pointer to the CardTable covering the segment containing the pointer. /// /// \pre There exists a currently alive heap that claims to contain \c ptr. inline static CardTable *cardTableCovering(const void *ptr); - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the MarkBitArray covering the segment containing the - /// pointer. - /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr); - - /// Translate the given address to a 0-based index in the MarkBitArray of its - /// segment. The base address is the start of the storage of this segment. - static size_t addressToMarkBitArrayIndex(const void *ptr) { - auto *cp = reinterpret_cast(ptr); - auto *base = reinterpret_cast(storageStart(cp)); - return (cp - base) >> LogHeapAlign; - } - - /// Mark the given \p cell. Assumes the given address is a valid heap object. - inline static void setCellMarkBit(const GCCell *cell); - - /// Return whether the given \p cell is marked. Assumes the given address is - /// a valid heap object. - inline static bool getCellMarkBit(const GCCell *cell); - /// Find the head of the first cell that extends into the card at index /// \p cardIdx. /// \return A cell such that @@ -294,23 +425,6 @@ class AlignedHeapSegment { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the lower bound of the segment. - /// \post The returned pointer is guaranteed to be aligned to a segment - /// boundary. - char *lowLim() const { - return lowLim_; - } - - /// Returns the address that is the upper bound of the segment. - char *hiLim() const { - return lowLim() + storageSize(); - } - - /// Returns the address at which the first allocation in this segment would - /// occur. - /// Disable UB sanitization because 'this' may be null during the tests. - inline char *start() const LLVM_NO_SANITIZE("undefined"); - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). @@ -340,15 +454,6 @@ class AlignedHeapSegment { /// AlignedHeapSegment. inline static bool containedInSame(const void *a, const void *b); - /// Return a reference to the card table covering the memory region managed by - /// this segment. - /// Disable sanitization because 'this' may be null in the tests. - inline CardTable &cardTable() const LLVM_NO_SANITIZE("null"); - - /// Return a reference to the mark bit array covering the memory region - /// managed by this segment. - inline Contents::MarkBitArray &markBitArray() const; - explicit operator bool() const { return lowLim(); } @@ -390,20 +495,11 @@ class AlignedHeapSegment { /// Set the contents of the segment to a dead value. void clear(); - /// Set the given range [start, end) to a dead value. - static void clear(char *start, char *end); /// Checks that dead values are present in the [start, end) range. static void checkUnwritten(char *start, char *end); #endif - protected: - /// Return a pointer to the contents of the memory region managed by this - /// segment. - inline Contents *contents() const; - - /// The start of the aligned segment. - char *lowLim_{nullptr}; - + private: /// The provider that created this segment. It will be used to properly /// destroy this. StorageProvider *provider_{nullptr}; @@ -419,7 +515,6 @@ class AlignedHeapSegment { /// and swap idiom. friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); - private: AlignedHeapSegment(StorageProvider *provider, void *lowLim); }; @@ -459,26 +554,6 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) { return {cell, true}; } -/*static*/ -AlignedHeapSegment::Contents::MarkBitArray * -AlignedHeapSegment::markBitArrayCovering(const void *ptr) { - return &contents(storageStart(ptr))->markBitArray_; -} - -/*static*/ -void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - markBits->set(ind, true); -} - -/*static*/ -bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - return markBits->at(ind); -} - GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) { CardTable &cards = cardTable(); GCCell *cell = cards.firstObjForCard(cardIdx); @@ -499,16 +574,6 @@ void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) { } } -/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - const void *lowLim) { - return reinterpret_cast(lowLim); -} - /* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) { return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_; } @@ -529,10 +594,6 @@ size_t AlignedHeapSegment::available() const { return effectiveEnd() - level(); } -char *AlignedHeapSegment::start() const { - return contents()->allocRegion_; -} - char *AlignedHeapSegment::effectiveEnd() const { return effectiveEnd_; } @@ -558,19 +619,6 @@ bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { storageSize(); } -CardTable &AlignedHeapSegment::cardTable() const { - return contents()->cardTable_; -} - -AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray() - const { - return contents()->markBitArray_; -} - -AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const { - return contents(lowLim()); -} - } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/ArrayStorage.h b/include/hermes/VM/ArrayStorage.h index 15d90f83e3b..adc796d0cba 100644 --- a/include/hermes/VM/ArrayStorage.h +++ b/include/hermes/VM/ArrayStorage.h @@ -151,7 +151,7 @@ class ArrayStorageBase final template void set(size_type index, HVType val, GC &gc) { assert(index < size() && "index out of range"); - data()[index].set(val, gc); + data()[index].set(val, gc, this); } /// \return the element at index \p index @@ -185,7 +185,7 @@ class ArrayStorageBase final assert(sz < capacity()); // Use the constructor of GCHermesValue to use the correct write barrier // for uninitialized memory. - new (&data()[sz]) GCHVType(value, runtime.getHeap()); + new (&data()[sz]) GCHVType(value, runtime.getHeap(), this); size_.store(sz + 1, std::memory_order_release); } @@ -237,7 +237,7 @@ class ArrayStorageBase final auto *fromStart = other->data(); auto *fromEnd = fromStart + otherSz; GCHVType::uninitialized_copy( - fromStart, fromEnd, data() + sz, runtime.getHeap()); + fromStart, fromEnd, data() + sz, runtime.getHeap(), this); size_.store(sz + otherSz, std::memory_order_release); } diff --git a/include/hermes/VM/Callable.h b/include/hermes/VM/Callable.h index 407583edbc1..72afe2c7fe1 100644 --- a/include/hermes/VM/Callable.h +++ b/include/hermes/VM/Callable.h @@ -92,14 +92,19 @@ class Environment final Runtime &runtime, Handle parentEnvironment, uint32_t size) - : parentEnvironment_(runtime, parentEnvironment.get(), runtime.getHeap()), + : parentEnvironment_( + runtime, + parentEnvironment.get(), + runtime.getHeap(), + this), size_(size) { // Initialize all slots to 'undefined'. GCHermesValue::uninitialized_fill( getSlots(), getSlots() + size, HermesValue::encodeUndefinedValue(), - runtime.getHeap()); + runtime.getHeap(), + this); } /// Create an environment using the given function to retrieve the parent @@ -344,7 +349,7 @@ class Callable : public JSObject { HiddenClass *clazz, Handle env) : JSObject(runtime, parent, clazz), - environment_(runtime, *env, runtime.getHeap()) {} + environment_(runtime, *env, runtime.getHeap(), this) {} Callable(Runtime &runtime, JSObject *parent, HiddenClass *clazz) : JSObject(runtime, parent, clazz), environment_() {} @@ -374,14 +379,16 @@ Environment::Environment( runtime, // TODO: Consider keeping the parent as a compressed pointer. parentFn->getEnvironment(runtime), - runtime.getHeap()), + runtime.getHeap(), + this), size_(size) { // Initialize all slots to 'undefined'. GCHermesValue::uninitialized_fill( getSlots(), getSlots() + size, HermesValue::encodeUndefinedValue(), - runtime.getHeap()); + runtime.getHeap(), + this); } /// A function produced by Function.prototype.bind(). It packages a function @@ -451,8 +458,8 @@ class BoundFunction final : public Callable { Handle target, Handle argStorage) : Callable(runtime, *parent, *clazz), - target_(runtime, *target, runtime.getHeap()), - argStorage_(runtime, *argStorage, runtime.getHeap()) {} + target_(runtime, *target, runtime.getHeap(), this), + argStorage_(runtime, *argStorage, runtime.getHeap(), this) {} private: /// Return a pointer to the stored arguments, including \c this. \c this is @@ -1093,7 +1100,7 @@ class JSFunction : public Callable { CodeBlock *codeBlock) : Callable(runtime, *parent, *clazz, environment), codeBlock_(codeBlock), - domain_(runtime, *domain, runtime.getHeap()) { + domain_(runtime, *domain, runtime.getHeap(), this) { assert( !vt.finalize_ == (kHasFinalizer != HasFinalizer::Yes) && "kHasFinalizer invalid value"); diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index 5bfa40f2102..f332954a5bb 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -22,10 +22,16 @@ namespace hermes { namespace vm { /// The card table optimizes young gen collections by restricting the amount of -/// heap belonging to the old gen that must be scanned. The card table expects -/// to be constructed inside an AlignedHeapSegment's storage, at some position -/// before the allocation region, and covers the extent of that storage's -/// memory. +/// heap belonging to the old gen that must be scanned. The card table expects +/// to be constructed at the beginning of a segment's storage, and covers the +/// extent of that storage's memory. There are two cases: +/// 1. For AlignedHeapSegment, the inline CardStatus array and Boundary array +/// in the card table is large enough. +/// 2. For JumboHeapSegment, the two arrays are allocated separately. +/// In either case, the pointers to the CardStatus array and Boundary array are +/// stored in \c cards and \c boundaries field of SHSegmentInfo, which occupies +/// the prefix bytes of card table that are mapped to auxiliary data structures +/// for a segment. /// /// Also supports the following query: Given a card in the heap that intersects /// with the used portion of its segment, find its "crossing object" -- the @@ -58,16 +64,19 @@ class CardTable { const char *address_{nullptr}; }; + enum class CardStatus : char { Clean = 0, Dirty = 1 }; + /// The size (and base-two log of the size) of cards used in the card table. static constexpr size_t kLogCardSize = 9; // ==> 512-byte cards. static constexpr size_t kCardSize = 1 << kLogCardSize; // ==> 512-byte cards. - static constexpr size_t kSegmentSize = 1 << HERMESVM_LOG_HEAP_SEGMENT_SIZE; - - /// The number of valid indices into the card table. - static constexpr size_t kValidIndices = kSegmentSize >> kLogCardSize; + /// Maximum ize of segment that can have inline cards and boundaries array. + static constexpr size_t kSegmentUnitSize = 1 + << HERMESVM_LOG_HEAP_SEGMENT_SIZE; - /// The size of the card table. - static constexpr size_t kCardTableSize = kValidIndices; + /// The size of the maximum inline card table. CardStatus array and boundary + /// array for larger segment has larger size and is storage separately. + static constexpr size_t kInlineCardTableSize = + kSegmentUnitSize >> kLogCardSize; /// For convenience, this is a conversion factor to determine how many bytes /// in the heap correspond to a single byte in the card table. This is @@ -77,29 +86,57 @@ class CardTable { /// guaranteed by a static_assert below. static constexpr size_t kHeapBytesPerCardByte = kCardSize; - /// A prefix of every segment is occupied by auxilary data - /// structures. The card table is the first such data structure. - /// The card table maps to the segment. Only the suffix of the card - /// table that maps to the suffix of entire segment that is used for - /// allocation is ever used; the prefix that maps to the card table - /// itself is not used. (Nor is the portion that of the card table - /// that maps to the other auxiliary data structure, the mark bit - /// array, but we don't attempt to calculate that here.) - /// It is useful to know the size of this unused region of - /// the card table, so it can be used for other purposes. - /// Note that the total size of the card table is 2 times - /// kCardTableSize, since the CardTable contains two byte arrays of - /// that size (cards_ and _boundaries_). - static constexpr size_t kFirstUsedIndex = - (2 * kCardTableSize) >> kLogCardSize; - - CardTable() = default; + /// A prefix of every segment is occupied by auxiliary data structures. The + /// card table is the first such data structure. The card table maps to the + /// segment. Only the suffix of the card table that maps to the suffix of + /// entire segment that is used for allocation is ever used; the prefix that + /// maps to the card table itself is not used, nor is the portion of the card + /// table that maps to the other auxiliary data structure: the mark bit array + /// and guard pages. This small space can be used for other purpose, such as + /// storing the SHSegmentInfo. The actual first used index should take into + /// account of this. Here we only calculate for CardTable and size of + /// SHSegmentInfo. It's only used as starting index for clearing/dirtying + /// range of bits. + /// Note that the total size of the card table is 2 times kCardTableSize, + /// since the CardTable contains two byte arrays of that size (cards_ and + /// boundaries_). + static constexpr size_t kFirstUsedIndex = std::max( + sizeof(SHSegmentInfo), + (2 * kInlineCardTableSize) >> kLogCardSize); + + CardTable(size_t segmentSize) { + assert( + segmentSize && segmentSize % kSegmentUnitSize == 0 && + "segmentSize must be a multiple of kSegmentUnitSize"); + + segmentInfo_.segmentSize = segmentSize; + if (segmentSize == kSegmentUnitSize) { + // Just use the inline storage. + setCards(inlineCardStatusArray); + setBoundaries(inlineBoundaryArray_); + } else { + size_t cardTableSize = segmentSize >> kLogCardSize; + // CardStatus is clean by default, so must zero-initialize it. + setCards(new AtomicIfConcurrentGC[cardTableSize] {}); + setBoundaries(new int8_t[cardTableSize]); + } + } /// CardTable is not copyable or movable: It must be constructed in-place. CardTable(const CardTable &) = delete; CardTable(CardTable &&) = delete; CardTable &operator=(const CardTable &) = delete; CardTable &operator=(CardTable &&) = delete; + ~CardTable() { + // If CardStatus/Boundary array is allocated separately, free them. + if (cards() != inlineCardStatusArray) { + delete[] cards(); + } + if (boundaries() != inlineBoundaryArray_) { + delete[] boundaries(); + } + } + /// Returns the card table index corresponding to a byte at the given address. /// \pre \p addr must be within the bounds of the segment owning this card /// table or at most 1 card after it, that is to say @@ -112,8 +149,7 @@ class CardTable { /// of how this is used. inline size_t addressToIndex(const void *addr) const LLVM_NO_SANITIZE("null"); - /// Returns the address corresponding to the given card table - /// index. + /// Returns the address corresponding to the given card table index. /// /// \pre \p index is bounded: /// @@ -143,7 +179,7 @@ class CardTable { inline OptValue findNextDirtyCard(size_t fromIndex, size_t endIndex) const; - /// If there is a card card at or after \p fromIndex, at an index less than + /// If there is a card at or after \p fromIndex, at an index less than /// \p endIndex, returns the index of the clean card, else returns none. inline OptValue findNextCleanCard(size_t fromIndex, size_t endIndex) const; @@ -184,12 +220,17 @@ class CardTable { /// is the first object.) GCCell *firstObjForCard(unsigned index) const; + /// The end index of the card table (all valid indices should be smaller). + size_t getEndIndex() const { + return getSegmentSize() >> kLogCardSize; + } + #ifdef HERMES_EXTRA_DEBUG /// Temporary debugging hack: yield the numeric value of the boundaries_ array /// for the given \p index. /// TODO(T48709128): remove this when the problem is diagnosed. int8_t cardObjectTableValue(unsigned index) const { - return boundaries_[index]; + return boundaries()[index]; } /// These methods protect and unprotect, respectively, the memory @@ -214,13 +255,33 @@ class CardTable { #endif // HERMES_SLOW_DEBUG private: + unsigned getSegmentSize() const { + return segmentInfo_.segmentSize; + } + #ifndef NDEBUG - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). - static void *storageEnd(const void *ptr); + /// Returns the pointer to the end of the storage starting at \p lowLim. + void *storageEnd(const void *lowLim) const { + return reinterpret_cast( + reinterpret_cast(lowLim) + getSegmentSize()); + } #endif - enum class CardStatus : char { Clean = 0, Dirty = 1 }; + void setCards(AtomicIfConcurrentGC *cards) { + segmentInfo_.cards = cards; + } + + AtomicIfConcurrentGC *cards() const { + return static_cast *>(segmentInfo_.cards); + } + + void setBoundaries(int8_t *boundaries) { + segmentInfo_.boundaries = boundaries; + } + + int8_t *boundaries() const { + return segmentInfo_.boundaries; + } /// \return The lowest address whose card can be dirtied in this array. i.e. /// The smallest address such that @@ -255,14 +316,27 @@ class CardTable { void cleanOrDirtyRange(size_t from, size_t to, CardStatus cleanOrDirty); - /// This needs to be atomic so that the background thread in Hades can safely - /// dirty cards when compacting. - std::array, kCardTableSize> cards_{}; + union { + /// The bytes occupied by segmentInfo_ are guaranteed to be not override by + /// writes to cards_ array. See static assertions in AlignedHeapSegmentBase. + /// Pointers to the underlying CardStatus array and boundary array are + /// stored in it. Note that we could also store the boundary array in a + /// union along with inlineBoundaryArray_, since that array has unused + /// prefix bytes as well. It will save 8 bytes here. But it makes the size + /// check more complex as we need to ensure that the segment size is large + /// enough so that inlineBoundaryArray_ has enough unused prefix bytes to + /// store the pointer. + SHSegmentInfo segmentInfo_; + /// This needs to be atomic so that the background thread in Hades can + /// safely dirty cards when compacting. + AtomicIfConcurrentGC + inlineCardStatusArray[kInlineCardTableSize]{}; + }; /// See the comment at kHeapBytesPerCardByte above to see why this is /// necessary. static_assert( - sizeof(cards_[0]) == 1, + sizeof(inlineCardStatusArray[0]) == 1, "Validate assumption that card table entries are one byte"); /// Each card has a corresponding signed byte in the boundaries_ table. A @@ -275,7 +349,7 @@ class CardTable { /// time: If we allocate a large object that crosses many cards, the first /// crossed cards gets a non-negative value, and each subsequent one uses the /// maximum exponent that stays within the card range for the object. - int8_t boundaries_[kCardTableSize]; + int8_t inlineBoundaryArray_[kInlineCardTableSize]; }; /// Implementations of inlines. @@ -305,7 +379,7 @@ inline size_t CardTable::addressToIndex(const void *addr) const { } inline const char *CardTable::indexToAddress(size_t index) const { - assert(index <= kValidIndices && "index must be within the index range"); + assert(index <= getEndIndex() && "index must be within the index range"); const char *res = base() + (index << kLogCardSize); assert( base() <= res && res <= storageEnd(base()) && @@ -314,7 +388,7 @@ inline const char *CardTable::indexToAddress(size_t index) const { } inline void CardTable::dirtyCardForAddress(const void *addr) { - cards_[addressToIndex(addr)].store( + cards()[addressToIndex(addr)].store( CardStatus::Dirty, std::memory_order_relaxed); } @@ -323,8 +397,8 @@ inline bool CardTable::isCardForAddressDirty(const void *addr) const { } inline bool CardTable::isCardForIndexDirty(size_t index) const { - assert(index < kValidIndices && "index is required to be in range."); - return cards_[index].load(std::memory_order_relaxed) == CardStatus::Dirty; + assert(index < getEndIndex() && "index is required to be in range."); + return cards()[index].load(std::memory_order_relaxed) == CardStatus::Dirty; } inline OptValue CardTable::findNextDirtyCard( @@ -348,9 +422,9 @@ inline CardTable::Boundary CardTable::nextBoundary(const char *level) const { } inline const char *CardTable::base() const { - // As we know the card table is laid out inline before the allocation region - // of its aligned heap segment, we can use its own this pointer as the base - // address. + // As we know the card table is laid out inline at the beginning of the + // segment storage, which is before the allocation region, we can use its own + // this pointer as the base address. return reinterpret_cast(this); } diff --git a/include/hermes/VM/GCBase.h b/include/hermes/VM/GCBase.h index c1114809745..a0c0f1d026c 100644 --- a/include/hermes/VM/GCBase.h +++ b/include/hermes/VM/GCBase.h @@ -1152,29 +1152,36 @@ class GCBase { #ifdef HERMESVM_GC_RUNTIME /// Default implementations for read and write barriers: do nothing. - void writeBarrier(const GCHermesValue *loc, HermesValue value); - void writeBarrier(const GCSmallHermesValue *loc, SmallHermesValue value); - void writeBarrier(const GCPointerBase *loc, const GCCell *value); - void constructorWriteBarrier(const GCHermesValue *loc, HermesValue value); + template + void writeBarrier( + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value); + void writeBarrier( + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value); + template void constructorWriteBarrier( - const GCSmallHermesValue *loc, - SmallHermesValue value); - void constructorWriteBarrier(const GCPointerBase *loc, const GCCell *value); - void writeBarrierRange(const GCHermesValue *start, uint32_t numHVs); - void writeBarrierRange(const GCSmallHermesValue *start, uint32_t numHVs); - void constructorWriteBarrierRange( - const GCHermesValue *start, - uint32_t numHVs); + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value); + void constructorWriteBarrier( + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value); + template void constructorWriteBarrierRange( - const GCSmallHermesValue *start, + const GCCell *cell, + const GCHermesValueBase *start, uint32_t numHVs); - void snapshotWriteBarrier(const GCHermesValue *loc); - void snapshotWriteBarrier(const GCSmallHermesValue *loc); + template + void snapshotWriteBarrier(const GCHermesValueBase *loc); void snapshotWriteBarrier(const GCPointerBase *loc); void snapshotWriteBarrier(const GCSymbolID *symbol); - void snapshotWriteBarrierRange(const GCHermesValue *start, uint32_t numHVs); + template void snapshotWriteBarrierRange( - const GCSmallHermesValue *start, + const GCHermesValueBase *start, uint32_t numHVs); void weakRefReadBarrier(HermesValue value); void weakRefReadBarrier(GCCell *value); diff --git a/include/hermes/VM/GCPointer-inline.h b/include/hermes/VM/GCPointer-inline.h index fa5f7633ed5..810b7ccfee0 100644 --- a/include/hermes/VM/GCPointer-inline.h +++ b/include/hermes/VM/GCPointer-inline.h @@ -20,42 +20,55 @@ GCPointerBase::GCPointerBase( PointerBase &base, GCCell *ptr, GC &gc, + const GCCell *owningObj, NeedsBarriers) : CompressedPointer(CompressedPointer::encode(ptr, base)) { assert( (!ptr || gc.validPointer(ptr)) && "Cannot construct a GCPointer from an invalid pointer"); if (NeedsBarriers::value) { - gc.constructorWriteBarrier(this, ptr); + gc.constructorWriteBarrier(owningObj, this, ptr); } else { assert(!gc.needsWriteBarrier(this, ptr)); } } -inline void GCPointerBase::set(PointerBase &base, GCCell *ptr, GC &gc) { +inline void GCPointerBase::set( + PointerBase &base, + GCCell *ptr, + GC &gc, + const GCCell *owningObj) { assert( (!ptr || gc.validPointer(ptr)) && "Cannot set a GCPointer to an invalid pointer"); // Write barrier must happen before the write. - gc.writeBarrier(this, ptr); + gc.writeBarrier(owningObj, this, ptr); setNoBarrier(CompressedPointer::encode(ptr, base)); } -inline void GCPointerBase::setNonNull(PointerBase &base, GCCell *ptr, GC &gc) { +inline void GCPointerBase::setNonNull( + PointerBase &base, + GCCell *ptr, + GC &gc, + const GCCell *owningObj) { assert( gc.validPointer(ptr) && "Cannot set a GCPointer to an invalid pointer"); // Write barrier must happen before the write. - gc.writeBarrier(this, ptr); + gc.writeBarrier(owningObj, this, ptr); setNoBarrier(CompressedPointer::encodeNonNull(ptr, base)); } -inline void -GCPointerBase::set(PointerBase &base, CompressedPointer ptr, GC &gc) { +inline void GCPointerBase::set( + PointerBase &base, + CompressedPointer ptr, + GC &gc, + const GCCell *owningObj) { assert( (!ptr || gc.validPointer(ptr.get(base))) && "Cannot set a GCPointer to an invalid pointer"); // Write barrier must happen before the write. - gc.writeBarrier(this, ptr.get(base)); + (void)owningObj; + gc.writeBarrier(owningObj, this, ptr.get(base)); setNoBarrier(ptr); } diff --git a/include/hermes/VM/GCPointer.h b/include/hermes/VM/GCPointer.h index 09db5f06d87..e7451417e53 100644 --- a/include/hermes/VM/GCPointer.h +++ b/include/hermes/VM/GCPointer.h @@ -25,7 +25,12 @@ class GCPointerBase : public CompressedPointer { explicit GCPointerBase(std::nullptr_t) : CompressedPointer(nullptr) {} template - inline GCPointerBase(PointerBase &base, GCCell *ptr, GC &gc, NeedsBarriers); + inline GCPointerBase( + PointerBase &base, + GCCell *ptr, + GC &gc, + const GCCell *owningObj, + NeedsBarriers); public: // These classes are used as arguments to GCPointer constructors, to @@ -38,9 +43,16 @@ class GCPointerBase : public CompressedPointer { /// \param ptr The memory being pointed to. /// \param base The base of ptr. /// \param gc Used for write barriers. - inline void set(PointerBase &base, GCCell *ptr, GC &gc); - inline void set(PointerBase &base, CompressedPointer ptr, GC &gc); - inline void setNonNull(PointerBase &base, GCCell *ptr, GC &gc); + /// \param owningObj The object that contains this GCPointer. + inline void + set(PointerBase &base, GCCell *ptr, GC &gc, const GCCell *owningObj); + inline void set( + PointerBase &base, + CompressedPointer ptr, + GC &gc, + const GCCell *owningObj); + inline void + setNonNull(PointerBase &base, GCCell *ptr, GC &gc, const GCCell *owningObj); /// Set this pointer to null. This needs a write barrier in some types of /// garbage collectors. @@ -62,14 +74,19 @@ class GCPointer : public GCPointerBase { /// this argument is unused, but its type's boolean value constant indicates /// whether barriers are required.) template - GCPointer(PointerBase &base, T *ptr, GC &gc, NeedsBarriers needsBarriers) - : GCPointerBase(base, ptr, gc, needsBarriers) {} + GCPointer( + PointerBase &base, + T *ptr, + GC &gc, + const GCCell *owningObj, + NeedsBarriers needsBarriers) + : GCPointerBase(base, ptr, gc, owningObj, needsBarriers) {} /// Same as the constructor above, with the default for /// NeedsBarriers as "YesBarriers". (We can't use default template /// arguments with the idiom used above.) - inline GCPointer(PointerBase &base, T *ptr, GC &gc) - : GCPointer(base, ptr, gc, YesBarriers()) {} + inline GCPointer(PointerBase &base, T *ptr, GC &gc, const GCCell *owningObj) + : GCPointer(base, ptr, gc, owningObj, YesBarriers()) {} /// We are not allowed to copy-construct or assign GCPointers. GCPointer(const GCPointerBase &) = delete; @@ -90,16 +107,21 @@ class GCPointer : public GCPointerBase { /// \param base The base of ptr. /// \param ptr The memory being pointed to. /// \param gc Used for write barriers. - void set(PointerBase &base, T *ptr, GC &gc) { - GCPointerBase::set(base, ptr, gc); + /// \param owningObj The object that contains this GCPointer. + void set(PointerBase &base, T *ptr, GC &gc, const GCCell *owningObj) { + GCPointerBase::set(base, ptr, gc, owningObj); } - void setNonNull(PointerBase &base, T *ptr, GC &gc) { - GCPointerBase::setNonNull(base, ptr, gc); + void setNonNull(PointerBase &base, T *ptr, GC &gc, const GCCell *owningObj) { + GCPointerBase::setNonNull(base, ptr, gc, owningObj); } /// Convenience overload of GCPointer::set for other GCPointers. - void set(PointerBase &base, const GCPointer &ptr, GC &gc) { - GCPointerBase::set(base, ptr, gc); + void set( + PointerBase &base, + const GCPointer &ptr, + GC &gc, + const GCCell *owningObj) { + GCPointerBase::set(base, ptr, gc, owningObj); } }; diff --git a/include/hermes/VM/HadesGC.h b/include/hermes/VM/HadesGC.h index 1ff0d7219c8..9f3f1b0b224 100644 --- a/include/hermes/VM/HadesGC.h +++ b/include/hermes/VM/HadesGC.h @@ -152,93 +152,116 @@ class HadesGC final : public GCBase { /// be in the heap). If value is a pointer, execute a write barrier. /// NOTE: The write barrier call must be placed *before* the write to the /// pointer, so that the current value can be fetched. - void writeBarrier(const GCHermesValue *loc, HermesValue value) { + template + void writeBarrier( + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value) { assert( !calledByBackgroundThread() && "Write barrier invoked by background thread."); // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(loc))) - writeBarrierSlow(loc, value); + writeBarrierSlow(cell, loc, value); } - void writeBarrierSlow(const GCHermesValue *loc, HermesValue value); - - void writeBarrier(const GCSmallHermesValue *loc, SmallHermesValue value) { - assert( - !calledByBackgroundThread() && - "Write barrier invoked by background thread."); - // A pointer that lives in YG never needs any write barriers. - if (LLVM_UNLIKELY(!inYoungGen(loc))) - writeBarrierSlow(loc, value); + template + void writeBarrierSlow( + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value) { + if (ogMarkingBarriers_) { + snapshotWriteBarrierInternal(*loc); + } + if (!value.isPointer()) { + return; + } + relocationWriteBarrier(cell, loc, value.getPointer(getPointerBase())); } - void writeBarrierSlow(const GCSmallHermesValue *loc, SmallHermesValue value); /// The given pointer value is being written at the given loc (required to /// be in the heap). The value may be null. Execute a write barrier. /// NOTE: The write barrier call must be placed *before* the write to the /// pointer, so that the current value can be fetched. - void writeBarrier(const GCPointerBase *loc, const GCCell *value) { + void writeBarrier( + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value) { assert( !calledByBackgroundThread() && "Write barrier invoked by background thread."); // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(loc))) - writeBarrierSlow(loc, value); + writeBarrierSlow(cell, loc, value); } - void writeBarrierSlow(const GCPointerBase *loc, const GCCell *value); + void writeBarrierSlow( + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value); /// Special versions of \p writeBarrier for when there was no previous value /// initialized into the space. - void constructorWriteBarrier(const GCHermesValue *loc, HermesValue value) { - // A pointer that lives in YG never needs any write barriers. - if (LLVM_UNLIKELY(!inYoungGen(loc))) - constructorWriteBarrierSlow(loc, value); - } - void constructorWriteBarrierSlow(const GCHermesValue *loc, HermesValue value); - + template void constructorWriteBarrier( - const GCSmallHermesValue *loc, - SmallHermesValue value) { + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(loc))) - constructorWriteBarrierSlow(loc, value); + constructorWriteBarrierSlow(cell, loc, value); } + template void constructorWriteBarrierSlow( - const GCSmallHermesValue *loc, - SmallHermesValue value); + const GCCell *cell, + const GCHermesValueBase *loc, + HVType value) { + // A constructor never needs to execute a SATB write barrier, since its + // previous value was definitely not live. + if (!value.isPointer()) { + return; + } + relocationWriteBarrier(cell, loc, value.getPointer(getPointerBase())); + } - void constructorWriteBarrier(const GCPointerBase *loc, const GCCell *value) { + void constructorWriteBarrier( + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(loc))) - relocationWriteBarrier(loc, value); + relocationWriteBarrier(cell, loc, value); } + template void constructorWriteBarrierRange( - const GCHermesValue *start, + const GCCell *cell, + const GCHermesValueBase *start, uint32_t numHVs) { // A pointer that lives in YG never needs any write barriers. if (LLVM_UNLIKELY(!inYoungGen(start))) - constructorWriteBarrierRangeSlow(start, numHVs); + constructorWriteBarrierRangeSlow(cell, start, numHVs); } + template void constructorWriteBarrierRangeSlow( - const GCHermesValue *start, - uint32_t numHVs); - - void constructorWriteBarrierRange( - const GCSmallHermesValue *start, + const GCCell *cell, + const GCHermesValueBase *start, uint32_t numHVs) { - // A pointer that lives in YG never needs any write barriers. - if (LLVM_UNLIKELY(!inYoungGen(start))) - constructorWriteBarrierRangeSlow(start, numHVs); + assert( + reinterpret_cast(start + numHVs) < + AlignedHeapSegmentBase::storageEnd(cell) && + "Range must start and end within a heap segment."); + + // Most constructors should be running in the YG, so in the common case, + // we can avoid doing anything for the whole range. If the range is in + // the OG, then just dirty all the cards corresponding to it, and we can + // scan them for pointers later. This is less precise but makes the + // write barrier faster. + + AlignedHeapSegmentBase::cardTableCovering(cell)->dirtyCardsForAddressRange( + start, start + numHVs); } - void constructorWriteBarrierRangeSlow( - const GCSmallHermesValue *start, - uint32_t numHVs); - void snapshotWriteBarrier(const GCHermesValue *loc) { - if (LLVM_UNLIKELY(!inYoungGen(loc) && ogMarkingBarriers_)) - snapshotWriteBarrierInternal(*loc); - } - void snapshotWriteBarrier(const GCSmallHermesValue *loc) { + template + void snapshotWriteBarrier(const GCHermesValueBase *loc) { if (LLVM_UNLIKELY(!inYoungGen(loc) && ogMarkingBarriers_)) snapshotWriteBarrierInternal(*loc); } @@ -252,23 +275,21 @@ class HadesGC final : public GCBase { snapshotWriteBarrierInternal(*loc); } - void snapshotWriteBarrierRange(const GCHermesValue *start, uint32_t numHVs) { - if (LLVM_UNLIKELY(!inYoungGen(start) && ogMarkingBarriers_)) - snapshotWriteBarrierRangeSlow(start, numHVs); - } - void snapshotWriteBarrierRangeSlow( - const GCHermesValue *start, - uint32_t numHVs); - + template void snapshotWriteBarrierRange( - const GCSmallHermesValue *start, + const GCHermesValueBase *start, uint32_t numHVs) { if (LLVM_UNLIKELY(!inYoungGen(start) && ogMarkingBarriers_)) snapshotWriteBarrierRangeSlow(start, numHVs); } + template void snapshotWriteBarrierRangeSlow( - const GCSmallHermesValue *start, - uint32_t numHVs); + const GCHermesValueBase *start, + uint32_t numHVs) { + for (uint32_t i = 0; i < numHVs; ++i) { + snapshotWriteBarrierInternal(start[i]); + } + } /// Add read barrier for \p value. This is only used when reading entry /// value from WeakMap/WeakSet. @@ -990,7 +1011,10 @@ class HadesGC final : public GCBase { /// Common logic for doing the relocation write barrier for detecting /// pointers into YG and for tracking newly created pointers into the /// compactee. - void relocationWriteBarrier(const void *loc, const void *value); + void relocationWriteBarrier( + const GCCell *cell, + const void *loc, + const GCCell *value); /// Finalize all objects in YG that have finalizers. void finalizeYoungGenObjects(); diff --git a/include/hermes/VM/HeapRuntime.h b/include/hermes/VM/HeapRuntime.h index c87aed40d76..a6fbbe55a9d 100644 --- a/include/hermes/VM/HeapRuntime.h +++ b/include/hermes/VM/HeapRuntime.h @@ -22,7 +22,7 @@ class HeapRuntime { public: ~HeapRuntime() { runtime_->~RT(); - sp_->deleteStorage(runtime_); + sp_->deleteStorage(runtime_, kHeapRuntimeStorageSize); } /// Allocate a segment and create an aliased shared_ptr that points to the @@ -36,16 +36,17 @@ class HeapRuntime { private: HeapRuntime(std::shared_ptr sp) : sp_{std::move(sp)} { - auto ptrOrError = sp_->newStorage("hermes-rt"); + auto ptrOrError = sp_->newStorage("hermes-rt", kHeapRuntimeStorageSize); if (!ptrOrError) hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError()); - static_assert( - sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small."); + static_assert(sizeof(RT) < kHeapRuntimeStorageSize, "Segments too small."); runtime_ = static_cast(*ptrOrError); } std::shared_ptr sp_; RT *runtime_; + static constexpr size_t kHeapRuntimeStorageSize = + AlignedHeapSegment::storageSize(); }; } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/HermesValue-inline.h b/include/hermes/VM/HermesValue-inline.h index d38a9219879..31b7ae6428d 100644 --- a/include/hermes/VM/HermesValue-inline.h +++ b/include/hermes/VM/HermesValue-inline.h @@ -31,10 +31,14 @@ inline PinnedHermesValue &PinnedHermesValue::operator=(PseudoHandle &&hv) { template template -GCHermesValueBase::GCHermesValueBase(HVType hv, GC &gc) : HVType{hv} { +GCHermesValueBase::GCHermesValueBase( + HVType hv, + GC &gc, + const GCCell *cell) + : HVType{hv} { assert(!hv.isPointer() || hv.getPointer()); if (NeedsBarriers::value) - gc.constructorWriteBarrier(this, hv); + gc.constructorWriteBarrier(cell, this, hv); } template @@ -49,7 +53,8 @@ GCHermesValueBase::GCHermesValueBase(HVType hv, GC &gc, std::nullptr_t) template template -inline void GCHermesValueBase::set(HVType hv, GC &gc) { +inline void +GCHermesValueBase::set(HVType hv, GC &gc, const GCCell *owningObj) { if (hv.isPointer()) { HERMES_SLOW_ASSERT( gc.validPointer(hv.getPointer(gc.getPointerBase())) && @@ -57,7 +62,7 @@ inline void GCHermesValueBase::set(HVType hv, GC &gc) { } assert(NeedsBarriers::value || !gc.needsWriteBarrier(this, hv)); if (NeedsBarriers::value) - gc.writeBarrier(this, hv); + gc.writeBarrier(owningObj, this, hv); HVType::setNoBarrier(hv); } @@ -81,10 +86,11 @@ inline void GCHermesValueBase::fill( InputIt start, InputIt end, HVType fill, - GC &gc) { + GC &gc, + const GCCell *owningObj) { if (fill.isPointer()) { for (auto cur = start; cur != end; ++cur) { - cur->set(fill, gc); + cur->set(fill, gc, owningObj); } } else { for (auto cur = start; cur != end; ++cur) { @@ -100,11 +106,12 @@ inline void GCHermesValueBase::uninitialized_fill( InputIt start, InputIt end, HVType fill, - GC &gc) { + GC &gc, + const GCCell *cell) { if (fill.isPointer()) { for (auto cur = start; cur != end; ++cur) { // Use the constructor write barrier. Assume it needs barriers. - new (&*cur) GCHermesValueBase(fill, gc); + new (&*cur) GCHermesValueBase(fill, gc, cell); } } else { for (auto cur = start; cur != end; ++cur) { @@ -121,14 +128,13 @@ inline OutputIt GCHermesValueBase::copy( InputIt last, OutputIt result, GC &gc) { -#if !defined(HERMESVM_GC_HADES) && !defined(HERMESVM_GC_RUNTIME) static_assert( !std::is_same::value || !std::is_same::value, "Pointer arguments must invoke pointer overload."); -#endif for (; first != last; ++first, (void)++result) { - result->set(*first, gc); + auto [hv, owningObj] = first.get_cell_value(); + result->set(*first, gc, owningObj); } return result; } @@ -139,42 +145,50 @@ inline OutputIt GCHermesValueBase::uninitialized_copy( InputIt first, InputIt last, OutputIt result, - GC &gc) { + GC &gc, + const GCCell *cell) { static_assert( !std::is_same::value || !std::is_same::value, "Pointer arguments must invoke pointer overload."); for (; first != last; ++first, (void)++result) { - new (&*result) GCHermesValueBase(*first, gc); + new (&*result) GCHermesValueBase(*first, gc, cell); } return result; } -// Specializations using memmove can't be used in Hades, because the concurrent -// write barrier needs strict control over how assignments are done to HV fields -// which need to be atomically updated. -#if !defined(HERMESVM_GC_HADES) && !defined(HERMESVM_GC_RUNTIME) /// Specialization for raw pointers to do a ranged write barrier. template inline GCHermesValueBase *GCHermesValueBase::copy( GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc) { + GC &gc, + const GCCell *owningObj) { +// Specializations using memmove can't be used in Hades, because the concurrent +// write barrier needs strict control over how assignments are done to HV fields +// which need to be atomically updated. +#if !defined(HERMESVM_GC_HADES) && !defined(HERMESVM_GC_RUNTIME) + gc.writeBarrierRange(result, last - first); // We must use "raw" function such as memmove here, rather than a // function like std::copy (or copy_backward) that respects // constructors and operator=. For HermesValue, those require the // contents not to contain pointers. The range write barrier // before the copies ensure that sufficient barriers are // performed. - gc.writeBarrierRange(result, last - first); + (void)owningObj; std::memmove( reinterpret_cast(result), first, (last - first) * sizeof(GCHermesValueBase)); return result + (last - first); -} +#else + for (; first != last; ++first, (void)++result) { + result->set(*first, gc, owningObj); + } + return result; #endif +} /// Specialization for raw pointers to do a ranged write barrier. template @@ -182,7 +196,8 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc) { + GC &gc, + const GCCell *owningObj) { #ifndef NDEBUG uintptr_t fromFirst = reinterpret_cast(first), fromLast = reinterpret_cast(last); @@ -194,7 +209,7 @@ inline GCHermesValueBase *GCHermesValueBase::uninitialized_copy( "Uninitialized range cannot overlap with an initialized one."); #endif - gc.constructorWriteBarrierRange(result, last - first); + gc.constructorWriteBarrierRange(owningObj, result, last - first); // memcpy is fine for an uninitialized copy. std::memcpy( reinterpret_cast(result), first, (last - first) * sizeof(HVType)); @@ -207,9 +222,10 @@ inline OutputIt GCHermesValueBase::copy_backward( InputIt first, InputIt last, OutputIt result, - GC &gc) { + GC &gc, + const GCCell *owningObj) { while (first != last) { - (--result)->set(*--last, gc); + (--result)->set(*--last, gc, owningObj); } return result; } diff --git a/include/hermes/VM/HermesValue.h b/include/hermes/VM/HermesValue.h index 13bc13bb69c..1314c62fc57 100644 --- a/include/hermes/VM/HermesValue.h +++ b/include/hermes/VM/HermesValue.h @@ -523,7 +523,7 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase() : HVType(HVType::encodeUndefinedValue()) {} /// Initialize a GCHermesValue from another HV. Performs a write barrier. template - GCHermesValueBase(HVType hv, GC &gc); + GCHermesValueBase(HVType hv, GC &gc, const GCCell *cell); /// Initialize a GCHermesValue from a non-pointer HV. Might perform a write /// barrier, depending on the GC. /// NOTE: The last parameter is unused, but acts as an overload selector. @@ -531,10 +531,11 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase(HVType hv, GC &gc, std::nullptr_t); GCHermesValueBase(const HVType &) = delete; - /// The HermesValue \p hv may be an object pointer. Assign the - /// value, and perform any necessary write barriers. + /// The HermesValue \p hv may be an object pointer. Assign the value, and + /// perform any necessary write barriers. \p cell is the object that contains + /// this GCHermesValueBase. It's needed by the write barrier. template - inline void set(HVType hv, GC &gc); + inline void set(HVType hv, GC &gc, const GCCell *owningObj); /// The HermesValue \p hv must not be an object pointer. Assign the /// value. @@ -552,14 +553,23 @@ class GCHermesValueBase final : public HVType { /// value \p fill. If the fill value is an object pointer, must /// provide a non-null \p gc argument, to perform write barriers. template - static inline void fill(InputIt first, InputIt last, HVType fill, GC &gc); + static inline void fill( + InputIt first, + InputIt last, + HVType fill, + GC &gc, + const GCCell *owningObj); /// Same as \p fill except the range expressed by [\p first, \p last) has not /// been previously initialized. Cannot use this on previously initialized /// memory, as it will use an incorrect write barrier. template - static inline void - uninitialized_fill(InputIt first, InputIt last, HVType fill, GC &gc); + static inline void uninitialized_fill( + InputIt first, + InputIt last, + HVType fill, + GC &gc, + const GCCell *cell); /// Copies a range of values and performs a write barrier on each. template @@ -570,17 +580,20 @@ class GCHermesValueBase final : public HVType { /// been previously initialized. Cannot use this on previously initialized /// memory, as it will use an incorrect write barrier. template - static inline OutputIt - uninitialized_copy(InputIt first, InputIt last, OutputIt result, GC &gc); + static inline OutputIt uninitialized_copy( + InputIt first, + InputIt last, + OutputIt result, + GC &gc, + const GCCell *cell); -#if !defined(HERMESVM_GC_HADES) && !defined(HERMESVM_GC_RUNTIME) /// Same as \p copy, but specialized for raw pointers. static inline GCHermesValueBase *copy( GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc); -#endif + GC &gc, + const GCCell *owningObj); /// Same as \p uninitialized_copy, but specialized for raw pointers. This is /// unsafe to use if the memory region being copied into (pointed to by @@ -590,12 +603,17 @@ class GCHermesValueBase final : public HVType { GCHermesValueBase *first, GCHermesValueBase *last, GCHermesValueBase *result, - GC &gc); + GC &gc, + const GCCell *owningObj); /// Copies a range of values and performs a write barrier on each. template - static inline OutputIt - copy_backward(InputIt first, InputIt last, OutputIt result, GC &gc); + static inline OutputIt copy_backward( + InputIt first, + InputIt last, + OutputIt result, + GC &gc, + const GCCell *owningObj); /// Same as \c unreachableWriteBarrier, but for a range of values all becoming /// unreachable. diff --git a/include/hermes/VM/HiddenClass.h b/include/hermes/VM/HiddenClass.h index b87a06d8d43..4e9a0eab867 100644 --- a/include/hermes/VM/HiddenClass.h +++ b/include/hermes/VM/HiddenClass.h @@ -326,7 +326,7 @@ class HiddenClass final : public GCCell { } void setForInCache(BigStorage *arr, Runtime &runtime) { - forInCache_.set(runtime, arr, runtime.getHeap()); + forInCache_.set(runtime, arr, runtime.getHeap(), this); } void clearForInCache(Runtime &runtime) { @@ -495,7 +495,7 @@ class HiddenClass final : public GCCell { propertyFlags_(propertyFlags), flags_(flags), numProperties_(numProperties), - parent_(runtime, *parent, runtime.getHeap()) { + parent_(runtime, *parent, runtime.getHeap(), this) { assert(propertyFlags.isValid() && "propertyFlags must be valid"); } diff --git a/include/hermes/VM/JSArray.h b/include/hermes/VM/JSArray.h index a61092bff13..7d1036dbe50 100644 --- a/include/hermes/VM/JSArray.h +++ b/include/hermes/VM/JSArray.h @@ -121,7 +121,7 @@ class ArrayImpl : public JSObject { /// Set the indexed storage of this array to be \p p. The pointer is allowed /// to be null. void setIndexedStorage(PointerBase &base, StorageType *p, GC &gc) { - indexedStorage_.set(base, p, gc); + indexedStorage_.set(base, p, gc, this); } /// @} @@ -430,7 +430,7 @@ class JSArrayIterator : public JSObject { Handle iteratedObject, IterationKind iterationKind) : JSObject(runtime, *parent, *clazz), - iteratedObject_(runtime, *iteratedObject, runtime.getHeap()), + iteratedObject_(runtime, *iteratedObject, runtime.getHeap(), this), iterationKind_(iterationKind) {} private: diff --git a/include/hermes/VM/JSDataView.h b/include/hermes/VM/JSDataView.h index eeb8afe27c7..63050eb4eda 100644 --- a/include/hermes/VM/JSDataView.h +++ b/include/hermes/VM/JSDataView.h @@ -92,7 +92,7 @@ class JSDataView final : public JSObject { assert( offset + length <= buffer->size() && "A DataView cannot be looking outside of the storage"); - buffer_.setNonNull(runtime, buffer, runtime.getHeap()); + buffer_.setNonNull(runtime, buffer, runtime.getHeap(), this); offset_ = offset; length_ = length; } diff --git a/include/hermes/VM/JSMapImpl.h b/include/hermes/VM/JSMapImpl.h index 900b8ffb983..34dd1be6265 100644 --- a/include/hermes/VM/JSMapImpl.h +++ b/include/hermes/VM/JSMapImpl.h @@ -146,7 +146,7 @@ class JSMapIteratorImpl final : public JSObject { Runtime &runtime, Handle::ContainerKind>> data, IterationKind kind) { - data_.set(runtime, data.get(), runtime.getHeap()); + data_.set(runtime, data.get(), runtime.getHeap(), this); iterationKind_ = kind; assert(data_ && "Invalid storage data"); @@ -172,7 +172,8 @@ class JSMapIteratorImpl final : public JSObject { runtime, self->data_.getNonNull(runtime)->iteratorNext( runtime, self->itr_.get(runtime)), - runtime.getHeap()); + runtime.getHeap(), + *self); if (self->itr_) { switch (self->iterationKind_) { case IterationKind::Key: diff --git a/include/hermes/VM/JSObject.h b/include/hermes/VM/JSObject.h index e60bb4bec13..34f856506d6 100644 --- a/include/hermes/VM/JSObject.h +++ b/include/hermes/VM/JSObject.h @@ -325,8 +325,8 @@ class JSObject : public GCCell { JSObject *parent, HiddenClass *clazz, NeedsBarriers needsBarriers) - : parent_(runtime, parent, runtime.getHeap(), needsBarriers), - clazz_(runtime, clazz, runtime.getHeap(), needsBarriers), + : parent_(runtime, parent, runtime.getHeap(), this, needsBarriers), + clazz_(runtime, clazz, runtime.getHeap(), this, needsBarriers), propStorage_(nullptr) { // Direct property slots are initialized by initDirectPropStorage. } @@ -337,8 +337,8 @@ class JSObject : public GCCell { Handle parent, Handle clazz, NeedsBarriers needsBarriers) - : parent_(runtime, *parent, runtime.getHeap(), needsBarriers), - clazz_(runtime, *clazz, runtime.getHeap(), needsBarriers), + : parent_(runtime, *parent, runtime.getHeap(), this, needsBarriers), + clazz_(runtime, *clazz, runtime.getHeap(), this, needsBarriers), propStorage_(nullptr) { // Direct property slots are initialized by initDirectPropStorage. } @@ -509,7 +509,7 @@ class JSObject : public GCCell { /// cycle checking. static void unsafeSetParentInternal(JSObject *self, Runtime &runtime, JSObject *parent) { - self->parent_.set(runtime, parent, runtime.getHeap()); + self->parent_.set(runtime, parent, runtime.getHeap(), self); } /// Return the value of an internal property slot. Use getDirectSlotValue if @@ -1686,7 +1686,7 @@ inline ExecutionStatus JSObject::allocatePropStorage( return ExecutionStatus::EXCEPTION; selfHandle->propStorage_.setNonNull( - runtime, vmcast(*res), runtime.getHeap()); + runtime, vmcast(*res), runtime.getHeap(), *selfHandle); return ExecutionStatus::RETURNED; } @@ -1722,7 +1722,8 @@ inline T *JSObject::initDirectPropStorage(Runtime &runtime, T *self) { self->directProps() + numOverlapSlots(), self->directProps() + DIRECT_PROPERTY_SLOTS, SmallHermesValue::encodeUndefinedValue(), - runtime.getHeap()); + runtime.getHeap(), + self); return self; } @@ -1736,7 +1737,7 @@ template inline void JSObject::setDirectSlotValue(JSObject *self, SmallHermesValue value, GC &gc) { static_assert(index < DIRECT_PROPERTY_SLOTS, "Must be a direct property"); - self->directProps()[index].set(value, gc); + self->directProps()[index].set(value, gc, self); } inline SmallHermesValue JSObject::getNamedSlotValueUnsafe( @@ -1839,7 +1840,7 @@ inline void JSObject::setNamedSlotValueDirectUnsafe( // to namedSlotRef(), it is a slight performance regression, which is not // entirely unexpected. return self->directProps()[index].set( - value, runtime.getHeap()); + value, runtime.getHeap(), self); } inline void JSObject::setNamedSlotValueIndirectUnsafe( diff --git a/include/hermes/VM/JSRegExp.h b/include/hermes/VM/JSRegExp.h index 94087984050..19b8ab0cf77 100644 --- a/include/hermes/VM/JSRegExp.h +++ b/include/hermes/VM/JSRegExp.h @@ -131,7 +131,8 @@ class JSRegExp final : public JSObject { pattern_( runtime, runtime.getPredefinedString(Predefined::emptyString), - runtime.getHeap()) {} + runtime.getHeap(), + this) {} private: ~JSRegExp(); diff --git a/include/hermes/VM/JSRegExpStringIterator.h b/include/hermes/VM/JSRegExpStringIterator.h index 3258d836be1..1733c4a6b4c 100644 --- a/include/hermes/VM/JSRegExpStringIterator.h +++ b/include/hermes/VM/JSRegExpStringIterator.h @@ -54,8 +54,8 @@ class JSRegExpStringIterator : public JSObject { bool global, bool unicode) : JSObject(runtime, *parent, *clazz), - iteratedRegExp_(runtime, *iteratedRegExp, runtime.getHeap()), - iteratedString_(runtime, *iteratedString, runtime.getHeap()), + iteratedRegExp_(runtime, *iteratedRegExp, runtime.getHeap(), this), + iteratedString_(runtime, *iteratedString, runtime.getHeap(), this), global_(global), unicode_(unicode) {} diff --git a/include/hermes/VM/LimitedStorageProvider.h b/include/hermes/VM/LimitedStorageProvider.h index a060435027b..44d7c8adf39 100644 --- a/include/hermes/VM/LimitedStorageProvider.h +++ b/include/hermes/VM/LimitedStorageProvider.h @@ -29,9 +29,9 @@ class LimitedStorageProvider final : public StorageProvider { : delegate_(std::move(provider)), limit_(limit) {} protected: - llvh::ErrorOr newStorageImpl(const char *name) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; - void deleteStorageImpl(void *storage) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; } // namespace vm diff --git a/include/hermes/VM/MallocGC.h b/include/hermes/VM/MallocGC.h index 98ed5c523a3..b418df7db1e 100644 --- a/include/hermes/VM/MallocGC.h +++ b/include/hermes/VM/MallocGC.h @@ -233,22 +233,33 @@ class MallocGC final : public GCBase { virtual void creditExternalMemory(GCCell *alloc, uint32_t size) override; virtual void debitExternalMemory(GCCell *alloc, uint32_t size) override; - void writeBarrier(const GCHermesValue *, HermesValue) {} - void writeBarrier(const GCSmallHermesValue *, SmallHermesValue) {} - void writeBarrier(const GCPointerBase *, const GCCell *) {} - void constructorWriteBarrier(const GCHermesValue *, HermesValue) {} - void constructorWriteBarrier(const GCSmallHermesValue *, SmallHermesValue) {} - void constructorWriteBarrier(const GCPointerBase *, const GCCell *) {} - void writeBarrierRange(const GCHermesValue *, uint32_t) {} - void writeBarrierRange(const GCSmallHermesValue *, uint32_t) {} - void constructorWriteBarrierRange(const GCHermesValue *, uint32_t) {} - void constructorWriteBarrierRange(const GCSmallHermesValue *, uint32_t) {} - void snapshotWriteBarrier(const GCHermesValue *) {} - void snapshotWriteBarrier(const GCSmallHermesValue *) {} + template + void + writeBarrier(const GCCell *cell, const GCHermesValueBase *, HVType) {} + void writeBarrier(const GCCell *cell, const GCPointerBase *, const GCCell *) { + } + template + void constructorWriteBarrier( + const GCCell *cell, + const GCHermesValueBase *, + HVType) {} + void constructorWriteBarrier( + const GCCell *cell, + const GCPointerBase *, + const GCCell *) {} + template + void writeBarrierRange(const GCHermesValueBase *, uint32_t) {} + template + void constructorWriteBarrierRange( + const GCCell *cell, + const GCHermesValueBase *, + uint32_t) {} + template + void snapshotWriteBarrier(const GCHermesValueBase *) {} void snapshotWriteBarrier(const GCPointerBase *) {} void snapshotWriteBarrier(const GCSymbolID *) {} - void snapshotWriteBarrierRange(const GCHermesValue *, uint32_t) {} - void snapshotWriteBarrierRange(const GCSmallHermesValue *, uint32_t) {} + template + void snapshotWriteBarrierRange(const GCHermesValueBase *, uint32_t) {} void weakRefReadBarrier(HermesValue) {} void weakRefReadBarrier(GCCell *) {} diff --git a/include/hermes/VM/OrderedHashMap.h b/include/hermes/VM/OrderedHashMap.h index 22583d7f17f..6ced778c7a7 100644 --- a/include/hermes/VM/OrderedHashMap.h +++ b/include/hermes/VM/OrderedHashMap.h @@ -207,7 +207,7 @@ class OrderedHashMapBase { return ExecutionStatus::EXCEPTION; } - self->hashTable_.set(runtime, arrRes->get(), runtime.getHeap()); + self->hashTable_.set(runtime, arrRes->get(), runtime.getHeap(), *self); return ExecutionStatus::RETURNED; } diff --git a/include/hermes/VM/PrimitiveBox.h b/include/hermes/VM/PrimitiveBox.h index f823e0cb22c..0de37a2a450 100644 --- a/include/hermes/VM/PrimitiveBox.h +++ b/include/hermes/VM/PrimitiveBox.h @@ -66,7 +66,7 @@ class JSString final : public JSObject { Handle parent, Handle clazz) : JSObject(runtime, *parent, *clazz), - primitiveValue_(runtime, *value, runtime.getHeap()) { + primitiveValue_(runtime, *value, runtime.getHeap(), this) { flags_.indexedStorage = true; flags_.fastIndexProperties = true; } @@ -157,7 +157,7 @@ class JSStringIterator : public JSObject { Handle clazz, Handle iteratedString) : JSObject(runtime, *parent, *clazz), - iteratedString_(runtime, *iteratedString, runtime.getHeap()) {} + iteratedString_(runtime, *iteratedString, runtime.getHeap(), this) {} private: /// [[IteratedString]] @@ -221,7 +221,7 @@ class JSBigInt final : public JSObject { Handle parent, Handle clazz) : JSObject(runtime, *parent, *clazz), - primitiveValue_(runtime, *value, runtime.getHeap()) {} + primitiveValue_(runtime, *value, runtime.getHeap(), this) {} private: GCPointer primitiveValue_; diff --git a/include/hermes/VM/PropertyAccessor.h b/include/hermes/VM/PropertyAccessor.h index 3fe62fa5c64..4a65e224ecc 100644 --- a/include/hermes/VM/PropertyAccessor.h +++ b/include/hermes/VM/PropertyAccessor.h @@ -20,8 +20,8 @@ class PropertyAccessor final : public GCCell { Runtime &runtime, Handle getter, Handle setter) - : getter(runtime, *getter, runtime.getHeap()), - setter(runtime, *setter, runtime.getHeap()) {} + : getter(runtime, *getter, runtime.getHeap(), this), + setter(runtime, *setter, runtime.getHeap(), this) {} static const VTable vt; diff --git a/include/hermes/VM/SegmentedArray.h b/include/hermes/VM/SegmentedArray.h index 8b17b6fdbd6..30149dfb74e 100644 --- a/include/hermes/VM/SegmentedArray.h +++ b/include/hermes/VM/SegmentedArray.h @@ -161,6 +161,8 @@ class SegmentedArrayBase final : public VariableSizeRuntimeCell, using difference_type = ptrdiff_t; using pointer = GCHVType *; using reference = GCHVType &; + // Dereference a value + using cell_value_pair = std::pair; /// The SegmentedArray which owns this iterator. This iterator should never /// be compared against an iterator with a different owner. This is used to @@ -238,6 +240,24 @@ class SegmentedArrayBase final : public VariableSizeRuntimeCell, } } + /// Return the value and the owning cell of that value (either owner_ or the + /// extra Segment allocated for the current index_). + /// Note: this is a temporary workaround to pass the correct owning object + /// pointer to write barrier. Once we support large allocation, we could + /// get rid of this (and probably the entire iterator here). + cell_value_pair get_cell_value() { + assert( + index_ < owner_->size(base_) && + "Trying to read from an index outside the size"); + // Almost all arrays fit entirely in the inline storage. + if (LLVM_LIKELY(index_ < kValueToSegmentThreshold)) { + return {owner_->inlineStorage()[index_], owner_}; + } else { + auto *segment = owner_->segmentAt(base_, toSegment(index_)); + return {segment->at(toInterior(index_)), segment}; + } + } + pointer operator->() { return &**this; } @@ -288,7 +308,25 @@ class SegmentedArrayBase final : public VariableSizeRuntimeCell, /// Sets the element located at \p index to \p val. template void set(Runtime &runtime, TotalIndex index, HVType val) { - atRef(runtime, index).set(val, runtime.getHeap()); + assert(index < size(runtime) && "Out-of-bound access"); + if constexpr (inl == Inline::Yes) { + assert( + index < kValueToSegmentThreshold && + "Using the inline storage accessor when the index is larger than the " + "inline storage"); + inlineStorage()[index].set(val, runtime.getHeap(), this); + } + // The caller may not set inl to Inline::Yes when the index is actually in + // inline storage. + if (index < kValueToSegmentThreshold) { + inlineStorage()[index].set(val, runtime.getHeap(), this); + } else { + auto segmentNumber = toSegment(index); + auto *segment = segmentAt(runtime, segmentNumber); + auto &elm = segment->at(toInterior(index)); + // elm lives in segment, which is not the same cell as SegmentedArrayBase. + elm.set(val, runtime.getHeap(), segment); + } } template void setNonPtr(Runtime &runtime, TotalIndex index, HVType val) { diff --git a/include/hermes/VM/StorageProvider.h b/include/hermes/VM/StorageProvider.h index 41d87f82ac5..be3887c755a 100644 --- a/include/hermes/VM/StorageProvider.h +++ b/include/hermes/VM/StorageProvider.h @@ -37,20 +37,21 @@ class StorageProvider { /// @} - /// Create a new segment memory space. - llvh::ErrorOr newStorage() { - return newStorage(nullptr); + /// Create a new segment memory space with given size \p sz. + llvh::ErrorOr newStorage(size_t sz) { + return newStorage(nullptr, sz); } - /// Create a new segment memory space and give this memory the name \p name. - /// \return A pointer to a block of memory that has - /// AlignedHeapSegment::storageSize() bytes, and is aligned on - /// AlignedHeapSegment::storageSize(). - llvh::ErrorOr newStorage(const char *name); + /// \return A pointer to a block of memory that has \p sz bytes, and is + /// aligned on AlignedHeapSegmentBase::kSegmentUnitSize. Note that \p sz + /// must be equals to or a multiple of + /// AlignedHeapSegmentBase::kSegmentUnitSize. + llvh::ErrorOr newStorage(const char *name, size_t sz); /// Delete the given segment's memory space, and make it available for re-use. - /// \post Nothing in the range [storage, storage + - /// AlignedHeapSegment::storageSize()) is valid memory to be read or written. - void deleteStorage(void *storage); + /// Note that \p sz must be the same as used to allocating \p storage. + /// \post Nothing in the range [storage, storage + sz) is valid memory to be + /// read or written. + void deleteStorage(void *storage, size_t sz); /// The number of storages this provider has allocated in its lifetime. size_t numSucceededAllocs() const; @@ -67,8 +68,8 @@ class StorageProvider { size_t numLiveAllocs() const; protected: - virtual llvh::ErrorOr newStorageImpl(const char *name) = 0; - virtual void deleteStorageImpl(void *storage) = 0; + virtual llvh::ErrorOr newStorageImpl(const char *name, size_t sz) = 0; + virtual void deleteStorageImpl(void *storage, size_t sz) = 0; private: size_t numSucceededAllocs_{0}; diff --git a/include/hermes/VM/StringPrimitive.h b/include/hermes/VM/StringPrimitive.h index e98cec3f75e..e3e0a8d72b6 100644 --- a/include/hermes/VM/StringPrimitive.h +++ b/include/hermes/VM/StringPrimitive.h @@ -743,7 +743,7 @@ class BufferedStringPrimitive final : public StringPrimitive { Handle> concatBuffer) : StringPrimitive(length) { concatBufferHV_.set( - HermesValue::encodeObjectValue(*concatBuffer), runtime.getHeap()); + HermesValue::encodeObjectValue(*concatBuffer), runtime.getHeap(), this); assert( concatBuffer->contents_.size() >= length && "length exceeds size of concatenation buffer"); diff --git a/include/hermes/VM/sh_segment_info.h b/include/hermes/VM/sh_segment_info.h index ae4c7ebdf51..7683afc7b9e 100644 --- a/include/hermes/VM/sh_segment_info.h +++ b/include/hermes/VM/sh_segment_info.h @@ -12,6 +12,15 @@ /// contain segment-specific information. typedef struct SHSegmentInfo { unsigned index; + /// The storage size for this segment. We practically don't support segment + /// with size larger than UINT32_MAX. + unsigned segmentSize; + /// Pointer that points to the CardStatus array for this segment. + /// Erase the actual type AtomicIfConcurrent here to avoid using + /// C++ type and forward declaring nested type. + void *cards; + /// Pointer that points to the boundary array for this segment. + int8_t *boundaries; } SHSegmentInfo; #endif diff --git a/lib/VM/ArrayStorage.cpp b/lib/VM/ArrayStorage.cpp index a4fc8acba0c..9e1765ce3fe 100644 --- a/lib/VM/ArrayStorage.cpp +++ b/lib/VM/ArrayStorage.cpp @@ -103,7 +103,8 @@ ExecutionStatus ArrayStorageBase::reallocateToLarger( { GCHVType *from = self->data() + fromFirst; GCHVType *to = newSelf->data() + toFirst; - GCHVType::uninitialized_copy(from, from + copySize, to, runtime.getHeap()); + GCHVType::uninitialized_copy( + from, from + copySize, to, runtime.getHeap(), self); } // Initialize the elements before the first copied element. @@ -111,7 +112,8 @@ ExecutionStatus ArrayStorageBase::reallocateToLarger( newSelf->data(), newSelf->data() + toFirst, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + newSelf); // Initialize the elements after the last copied element and toLast. if (toFirst + copySize < toLast) { @@ -119,7 +121,8 @@ ExecutionStatus ArrayStorageBase::reallocateToLarger( newSelf->data() + toFirst + copySize, newSelf->data() + toLast, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + newSelf); } newSelf->size_.store(toLast, std::memory_order_release); @@ -151,7 +154,8 @@ void ArrayStorageBase::resizeWithinCapacity( self->data() + sz, self->data() + newSize, HVType::encodeEmptyValue(), - gc); + gc, + self); } else if (newSize < sz) { // Execute write barriers on elements about to be conceptually changed to // null. @@ -184,7 +188,8 @@ ExecutionStatus ArrayStorageBase::shift( self->data() + fromFirst, self->data() + fromFirst + copySize, self->data() + toFirst, - runtime.getHeap()); + runtime.getHeap(), + self); } else if (fromFirst < toFirst) { // Copying to the right, need to copy backwards to avoid overwriting what // is being copied. @@ -192,7 +197,8 @@ ExecutionStatus ArrayStorageBase::shift( self->data() + fromFirst, self->data() + fromFirst + copySize, self->data() + toFirst + copySize, - runtime.getHeap()); + runtime.getHeap(), + self); } // Initialize the elements which were emptied in front. @@ -200,7 +206,8 @@ ExecutionStatus ArrayStorageBase::shift( self->data(), self->data() + toFirst, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + self); // Initialize the elements between the last copied element and toLast. if (toFirst + copySize < toLast) { @@ -208,7 +215,8 @@ ExecutionStatus ArrayStorageBase::shift( self->data() + toFirst + copySize, self->data() + toLast, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + self); } if (toLast < self->size()) { // Some elements are becoming unreachable, let the GC know. diff --git a/lib/VM/Domain.cpp b/lib/VM/Domain.cpp index c0d2b7b3916..f833950262d 100644 --- a/lib/VM/Domain.cpp +++ b/lib/VM/Domain.cpp @@ -167,7 +167,7 @@ ExecutionStatus Domain::importCJSModuleTable( return ExecutionStatus::EXCEPTION; } - self->throwingRequire_.set(runtime, *requireFn, runtime.getHeap()); + self->throwingRequire_.set(runtime, *requireFn, runtime.getHeap(), *self); } else { cjsModules = self->cjsModules_.get(runtime); } @@ -308,7 +308,7 @@ ExecutionStatus Domain::importCJSModuleTable( } } - self->cjsModules_.set(runtime, cjsModules.get(), runtime.getHeap()); + self->cjsModules_.set(runtime, cjsModules.get(), runtime.getHeap(), *self); return ExecutionStatus::RETURNED; } @@ -343,8 +343,8 @@ Handle RequireContext::create( runtime.getHiddenClassForPrototype( *objProto, numOverlapSlots())); auto self = JSObjectInit::initToHandle(runtime, cell); - self->domain_.set(runtime, *domain, runtime.getHeap()); - self->dirname_.set(runtime, *dirname, runtime.getHeap()); + self->domain_.set(runtime, *domain, runtime.getHeap(), *self); + self->dirname_.set(runtime, *dirname, runtime.getHeap(), *self); return self; } diff --git a/lib/VM/DummyObject.cpp b/lib/VM/DummyObject.cpp index cab35da5c22..244c43e4e5a 100644 --- a/lib/VM/DummyObject.cpp +++ b/lib/VM/DummyObject.cpp @@ -53,7 +53,7 @@ void DummyObject::releaseExtMem(GC &gc) { } void DummyObject::setPointer(GC &gc, DummyObject *obj) { - other.set(gc.getPointerBase(), obj, gc); + other.set(gc.getPointerBase(), obj, gc, this); } /* static */ constexpr CellKind DummyObject::getCellKind() { diff --git a/lib/VM/FastArray.cpp b/lib/VM/FastArray.cpp index 0189596cf9e..e4fd8b262e5 100644 --- a/lib/VM/FastArray.cpp +++ b/lib/VM/FastArray.cpp @@ -101,7 +101,7 @@ CallResult> FastArray::create( return ExecutionStatus::EXCEPTION; self->indexedStorage_.setNonNull( - runtime, vmcast(*arrRes), runtime.getHeap()); + runtime, vmcast(*arrRes), runtime.getHeap(), *self); auto shv = SmallHermesValue::encodeNumberValue(0, runtime); self->setLength(runtime, shv); @@ -119,7 +119,7 @@ FastArray::pushSlow(Handle self, Runtime &runtime, Handle<> val) { ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; - self->indexedStorage_.setNonNull(runtime, *storage, runtime.getHeap()); + self->indexedStorage_.setNonNull(runtime, *storage, runtime.getHeap(), *self); auto newSz = SmallHermesValue::encodeNumberValue(storage->size(), runtime); self->setLength(runtime, newSz); return ExecutionStatus::RETURNED; @@ -138,7 +138,7 @@ ExecutionStatus FastArray::appendSlow( ArrayStorageSmall::append(storage, runtime, otherStorage) == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; - self->indexedStorage_.setNonNull(runtime, *storage, runtime.getHeap()); + self->indexedStorage_.setNonNull(runtime, *storage, runtime.getHeap(), *self); auto newSz = SmallHermesValue::encodeNumberValue(storage->size(), runtime); self->setLength(runtime, newSz); return ExecutionStatus::RETURNED; diff --git a/lib/VM/GCBase.cpp b/lib/VM/GCBase.cpp index 6b29d1979fc..719cce4fcdc 100644 --- a/lib/VM/GCBase.cpp +++ b/lib/VM/GCBase.cpp @@ -965,23 +965,46 @@ bool GCBase::shouldSanitizeHandles() { runtimeGCDispatch([&](auto *gc) { gc->name(arg1, arg2); }); \ } -GCBASE_BARRIER_2(writeBarrier, const GCHermesValue *, HermesValue); -GCBASE_BARRIER_2(writeBarrier, const GCSmallHermesValue *, SmallHermesValue); -GCBASE_BARRIER_2(writeBarrier, const GCPointerBase *, const GCCell *); -GCBASE_BARRIER_2(constructorWriteBarrier, const GCHermesValue *, HermesValue); +GCBASE_BARRIER_2( + writeBarrier, + const GCCell *, + const GCHermesValue *, + HermesValue); +GCBASE_BARRIER_2( + writeBarrier, + const GCCell *, + const GCSmallHermesValue *, + SmallHermesValue); +GCBASE_BARRIER_2( + writeBarrier, + const GCCell *, + const GCPointerBase *, + const GCCell *); +GCBASE_BARRIER_2( + constructorWriteBarrier, + const GCCell *, + const GCHermesValue *, + HermesValue); GCBASE_BARRIER_2( constructorWriteBarrier, + const GCCell *, const GCSmallHermesValue *, SmallHermesValue); GCBASE_BARRIER_2( constructorWriteBarrier, + const GCCell *, const GCPointerBase *, const GCCell *); GCBASE_BARRIER_2(writeBarrierRange, const GCHermesValue *, uint32_t); GCBASE_BARRIER_2(writeBarrierRange, const GCSmallHermesValue *, uint32_t); -GCBASE_BARRIER_2(constructorWriteBarrierRange, const GCHermesValue *, uint32_t); GCBASE_BARRIER_2( constructorWriteBarrierRange, + const GCCell *, + const GCHermesValue *, + uint32_t); +GCBASE_BARRIER_2( + constructorWriteBarrierRange, + const GCCell *, const GCSmallHermesValue *, uint32_t); GCBASE_BARRIER_1(snapshotWriteBarrier, const GCHermesValue *); diff --git a/lib/VM/HiddenClass.cpp b/lib/VM/HiddenClass.cpp index fcd069eb0a5..0ada602aced 100644 --- a/lib/VM/HiddenClass.cpp +++ b/lib/VM/HiddenClass.cpp @@ -213,7 +213,7 @@ Handle HiddenClass::copyToNewDictionary( initializeMissingPropertyMap(selfHandle, runtime); newClassHandle->propertyMap_.set( - runtime, selfHandle->propertyMap_, runtime.getHeap()); + runtime, selfHandle->propertyMap_, runtime.getHeap(), *newClassHandle); selfHandle->propertyMap_.setNull(runtime.getHeap()); LLVM_DEBUG( @@ -436,7 +436,7 @@ CallResult, SlotIndex>> HiddenClass::addProperty( return ExecutionStatus::EXCEPTION; } childHandle->propertyMap_.set( - runtime, selfHandle->propertyMap_, runtime.getHeap()); + runtime, selfHandle->propertyMap_, runtime.getHeap(), *childHandle); } else { LLVM_DEBUG( dbgs() << "Adding property " << runtime.formatSymbolID(name) @@ -512,7 +512,7 @@ CallResult, SlotIndex>> HiddenClass::addProperty( // Move the map to the child class. childHandle->propertyMap_.set( - runtime, selfHandle->propertyMap_, runtime.getHeap()); + runtime, selfHandle->propertyMap_, runtime.getHeap(), *childHandle); selfHandle->propertyMap_.setNull(runtime.getHeap()); if (LLVM_UNLIKELY( @@ -589,7 +589,7 @@ Handle HiddenClass::updateProperty( descPair->second.flags = newFlags; existingChild->propertyMap_.set( - runtime, selfHandle->propertyMap_, runtime.getHeap()); + runtime, selfHandle->propertyMap_, runtime.getHeap(), existingChild); } else { LLVM_DEBUG( dbgs() << "Updating property " << runtime.formatSymbolID(name) @@ -634,7 +634,7 @@ Handle HiddenClass::updateProperty( // Move the updated map to the child class. childHandle->propertyMap_.set( - runtime, selfHandle->propertyMap_, runtime.getHeap()); + runtime, selfHandle->propertyMap_, runtime.getHeap(), *childHandle); selfHandle->propertyMap_.setNull(runtime.getHeap()); return childHandle; @@ -826,7 +826,8 @@ ExecutionStatus HiddenClass::addToPropertyMap( return ExecutionStatus::EXCEPTION; } - selfHandle->propertyMap_.setNonNull(runtime, *updatedMap, runtime.getHeap()); + selfHandle->propertyMap_.setNonNull( + runtime, *updatedMap, runtime.getHeap(), *selfHandle); return ExecutionStatus::RETURNED; } @@ -889,7 +890,8 @@ void HiddenClass::initializeMissingPropertyMap( inserted->first->slot = slotIndex++; } - selfHandle->propertyMap_.setNonNull(runtime, *mapHandle, runtime.getHeap()); + selfHandle->propertyMap_.setNonNull( + runtime, *mapHandle, runtime.getHeap(), *selfHandle); } void HiddenClass::stealPropertyMapFromParent( @@ -913,7 +915,8 @@ void HiddenClass::stealPropertyMapFromParent( self->propertyMap_.set( runtime, self->parent_.getNonNull(runtime)->propertyMap_, - runtime.getHeap()); + runtime.getHeap(), + self); self->parent_.getNonNull(runtime)->propertyMap_.setNull(runtime.getHeap()); // Does our class add a new property? diff --git a/lib/VM/Interpreter.cpp b/lib/VM/Interpreter.cpp index 8e00660452b..3179b150a39 100644 --- a/lib/VM/Interpreter.cpp +++ b/lib/VM/Interpreter.cpp @@ -2077,16 +2077,16 @@ CallResult Interpreter::interpretFunction( } CASE(StoreToEnvironment) { - vmcast(O1REG(StoreToEnvironment)) - ->slot(ip->iStoreToEnvironment.op2) - .set(O3REG(StoreToEnvironment), runtime.getHeap()); + auto *environment = vmcast(O1REG(StoreToEnvironment)); + environment->slot(ip->iStoreToEnvironment.op2) + .set(O3REG(StoreToEnvironment), runtime.getHeap(), environment); ip = NEXTINST(StoreToEnvironment); DISPATCH; } CASE(StoreToEnvironmentL) { - vmcast(O1REG(StoreToEnvironmentL)) - ->slot(ip->iStoreToEnvironmentL.op2) - .set(O3REG(StoreToEnvironmentL), runtime.getHeap()); + auto *environment = vmcast(O1REG(StoreToEnvironmentL)); + environment->slot(ip->iStoreToEnvironmentL.op2) + .set(O3REG(StoreToEnvironmentL), runtime.getHeap(), environment); ip = NEXTINST(StoreToEnvironmentL); DISPATCH; } diff --git a/lib/VM/JSCallSite.cpp b/lib/VM/JSCallSite.cpp index 2e6f0c71f5e..677a9f95fba 100644 --- a/lib/VM/JSCallSite.cpp +++ b/lib/VM/JSCallSite.cpp @@ -40,7 +40,7 @@ JSCallSite::JSCallSite( Handle error, size_t stackFrameIndex) : JSObject(runtime, *parent, *clazz), - error_(runtime, *error, runtime.getHeap()), + error_(runtime, *error, runtime.getHeap(), this), stackFrameIndex_(stackFrameIndex) { assert( error_.getNonNull(runtime)->getStackTrace() && diff --git a/lib/VM/JSCallableProxy.cpp b/lib/VM/JSCallableProxy.cpp index 105411dd043..7accc588ae0 100644 --- a/lib/VM/JSCallableProxy.cpp +++ b/lib/VM/JSCallableProxy.cpp @@ -65,8 +65,8 @@ void JSCallableProxy::setTargetAndHandler( Runtime &runtime, Handle target, Handle handler) { - slots_.target.set(runtime, target.get(), runtime.getHeap()); - slots_.handler.set(runtime, handler.get(), runtime.getHeap()); + slots_.target.set(runtime, target.get(), runtime.getHeap(), this); + slots_.handler.set(runtime, handler.get(), runtime.getHeap(), this); } CallResult JSCallableProxy::isConstructor(Runtime &runtime) { diff --git a/lib/VM/JSError.cpp b/lib/VM/JSError.cpp index 0a4867d6a05..1141dc533b4 100644 --- a/lib/VM/JSError.cpp +++ b/lib/VM/JSError.cpp @@ -507,7 +507,8 @@ ExecutionStatus JSError::recordStackTrace( } } } - selfHandle->domains_.set(runtime, domains.get(), runtime.getHeap()); + selfHandle->domains_.set( + runtime, domains.get(), runtime.getHeap(), *selfHandle); // Remove the last entry. stack->pop_back(); @@ -521,7 +522,8 @@ ExecutionStatus JSError::recordStackTrace( "Function names and stack trace must have same size."); selfHandle->stacktrace_ = std::move(stack); - selfHandle->funcNames_.set(runtime, *funcNames, runtime.getHeap()); + selfHandle->funcNames_.set( + runtime, *funcNames, runtime.getHeap(), *selfHandle); return ExecutionStatus::RETURNED; } diff --git a/lib/VM/JSGeneratorObject.cpp b/lib/VM/JSGeneratorObject.cpp index 40e7ca07562..65bf8500a07 100644 --- a/lib/VM/JSGeneratorObject.cpp +++ b/lib/VM/JSGeneratorObject.cpp @@ -43,7 +43,7 @@ CallResult> JSGeneratorObject::create( parentHandle, runtime.getHiddenClassForPrototype( *parentHandle, numOverlapSlots())); - cell->innerFunction_.set(runtime, *innerFunction, runtime.getHeap()); + cell->innerFunction_.set(runtime, *innerFunction, runtime.getHeap(), cell); return JSObjectInit::initToPseudoHandle(runtime, cell); } diff --git a/lib/VM/JSObject.cpp b/lib/VM/JSObject.cpp index 86791f701f0..e22aefb02cf 100644 --- a/lib/VM/JSObject.cpp +++ b/lib/VM/JSObject.cpp @@ -101,7 +101,7 @@ PseudoHandle JSObject::create( Runtime &runtime, Handle clazz) { auto obj = JSObject::create(runtime, clazz->getNumProperties()); - obj->clazz_.setNonNull(runtime, *clazz, runtime.getHeap()); + obj->clazz_.setNonNull(runtime, *clazz, runtime.getHeap(), obj.get()); // If the hidden class has index like property, we need to clear the fast path // flag. if (LLVM_UNLIKELY( @@ -115,7 +115,7 @@ PseudoHandle JSObject::create( Handle parentHandle, Handle clazz) { PseudoHandle obj = JSObject::create(runtime, clazz); - obj->parent_.set(runtime, parentHandle.get(), runtime.getHeap()); + obj->parent_.set(runtime, parentHandle.get(), runtime.getHeap(), obj.get()); return obj; } @@ -224,7 +224,7 @@ CallResult JSObject::setParent( } } // 9. - self->parent_.set(runtime, parent, runtime.getHeap()); + self->parent_.set(runtime, parent, runtime.getHeap(), self); // 10. return true; } @@ -237,7 +237,8 @@ void JSObject::allocateNewSlotStorage( // If it is a direct property, just store the value and we are done. if (LLVM_LIKELY(newSlotIndex < DIRECT_PROPERTY_SLOTS)) { auto shv = SmallHermesValue::encodeHermesValue(*valueHandle, runtime); - selfHandle->directProps()[newSlotIndex].set(shv, runtime.getHeap()); + selfHandle->directProps()[newSlotIndex].set( + shv, runtime.getHeap(), *selfHandle); return; } @@ -251,7 +252,7 @@ void JSObject::allocateNewSlotStorage( auto arrRes = runtime.ignoreAllocationFailure( PropStorage::create(runtime, DEFAULT_PROPERTY_CAPACITY)); selfHandle->propStorage_.setNonNull( - runtime, vmcast(arrRes), runtime.getHeap()); + runtime, vmcast(arrRes), runtime.getHeap(), *selfHandle); } else if (LLVM_UNLIKELY( newSlotIndex >= selfHandle->propStorage_.getNonNull(runtime)->capacity())) { @@ -261,7 +262,8 @@ void JSObject::allocateNewSlotStorage( "allocated slot must be at end"); auto hnd = runtime.makeMutableHandle(selfHandle->propStorage_); PropStorage::resize(hnd, runtime, newSlotIndex + 1); - selfHandle->propStorage_.setNonNull(runtime, *hnd, runtime.getHeap()); + selfHandle->propStorage_.setNonNull( + runtime, *hnd, runtime.getHeap(), *selfHandle); } { @@ -1923,7 +1925,8 @@ CallResult JSObject::deleteNamed( // Perform the actual deletion. auto newClazz = HiddenClass::deleteProperty( runtime.makeHandle(selfHandle->clazz_), runtime, *pos); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); return true; } @@ -2023,7 +2026,8 @@ CallResult JSObject::deleteComputed( // Remove the property descriptor. auto newClazz = HiddenClass::deleteProperty( runtime.makeHandle(selfHandle->clazz_), runtime, *pos); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); } else if (LLVM_UNLIKELY(selfHandle->flags_.proxyObject)) { CallResult> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) @@ -2612,7 +2616,8 @@ ExecutionStatus JSObject::seal(Handle selfHandle, Runtime &runtime) { auto newClazz = HiddenClass::makeAllNonConfigurable( runtime.makeHandle(selfHandle->clazz_), runtime); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); selfHandle->flags_.sealed = true; @@ -2637,7 +2642,8 @@ ExecutionStatus JSObject::freeze( auto newClazz = HiddenClass::makeAllReadOnly( runtime.makeHandle(selfHandle->clazz_), runtime); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); selfHandle->flags_.frozen = true; selfHandle->flags_.sealed = true; @@ -2657,7 +2663,8 @@ void JSObject::updatePropertyFlagsWithoutTransitions( flagsToClear, flagsToSet, props); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); } CallResult JSObject::isExtensible( @@ -2782,7 +2789,8 @@ ExecutionStatus JSObject::addOwnPropertyImpl( if (LLVM_UNLIKELY(addResult == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } - selfHandle->clazz_.setNonNull(runtime, *addResult->first, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *addResult->first, runtime.getHeap(), *selfHandle); allocateNewSlotStorage( selfHandle, runtime, addResult->second, valueOrAccessor); @@ -2825,7 +2833,8 @@ CallResult JSObject::updateOwnProperty( runtime, propertyPos, desc.flags); - selfHandle->clazz_.setNonNull(runtime, *newClazz, runtime.getHeap()); + selfHandle->clazz_.setNonNull( + runtime, *newClazz, runtime.getHeap(), *selfHandle); } if (updateStatus->first == PropertyUpdateStatus::done) @@ -3006,9 +3015,11 @@ JSObject::checkPropertyUpdate( // If not setting the getter or the setter, re-use the current one. if (!dpFlags.setGetter) - newAccessor->getter.set(runtime, curAccessor->getter, runtime.getHeap()); + newAccessor->getter.set( + runtime, curAccessor->getter, runtime.getHeap(), newAccessor); if (!dpFlags.setSetter) - newAccessor->setter.set(runtime, curAccessor->setter, runtime.getHeap()); + newAccessor->setter.set( + runtime, curAccessor->setter, runtime.getHeap(), newAccessor); } // 8.12.9 [12] For each attribute field of Desc that is present, set the diff --git a/lib/VM/JSProxy.cpp b/lib/VM/JSProxy.cpp index ad7fc4b516b..cdfea6d0274 100644 --- a/lib/VM/JSProxy.cpp +++ b/lib/VM/JSProxy.cpp @@ -108,8 +108,8 @@ void JSProxy::setTargetAndHandler( Handle target, Handle handler) { auto &slots = detail::slots(*selfHandle); - slots.target.set(runtime, target.get(), runtime.getHeap()); - slots.handler.set(runtime, handler.get(), runtime.getHeap()); + slots.target.set(runtime, target.get(), runtime.getHeap(), *selfHandle); + slots.handler.set(runtime, handler.get(), runtime.getHeap(), *selfHandle); } namespace { diff --git a/lib/VM/JSRegExp.cpp b/lib/VM/JSRegExp.cpp index a8140cfd34c..442753492b9 100644 --- a/lib/VM/JSRegExp.cpp +++ b/lib/VM/JSRegExp.cpp @@ -101,7 +101,7 @@ void JSRegExp::initialize( assert( pattern && flags && "Null pattern and/or flags passed to JSRegExp::initialize"); - selfHandle->pattern_.set(runtime, *pattern, runtime.getHeap()); + selfHandle->pattern_.set(runtime, *pattern, runtime.getHeap(), *selfHandle); DefinePropertyFlags dpf = DefinePropertyFlags::getDefaultNewPropertyFlags(); dpf.enumerable = 0; @@ -220,7 +220,8 @@ ExecutionStatus JSRegExp::initializeGroupNameMappingObj( return ExecutionStatus::EXCEPTION; } - selfHandle->groupNameMappings_.set(runtime, *obj, runtime.getHeap()); + selfHandle->groupNameMappings_.set( + runtime, *obj, runtime.getHeap(), *selfHandle); return ExecutionStatus::RETURNED; } @@ -231,7 +232,7 @@ Handle JSRegExp::getGroupNameMappings(Runtime &runtime) { } void JSRegExp::setGroupNameMappings(Runtime &runtime, JSObject *groupObj) { - groupNameMappings_.set(runtime, groupObj, runtime.getHeap()); + groupNameMappings_.set(runtime, groupObj, runtime.getHeap(), this); } void JSRegExp::initializeBytecode(llvh::ArrayRef bytecode) { diff --git a/lib/VM/JSTypedArray.cpp b/lib/VM/JSTypedArray.cpp index 518ff5d9293..5b21e57c09e 100644 --- a/lib/VM/JSTypedArray.cpp +++ b/lib/VM/JSTypedArray.cpp @@ -305,7 +305,7 @@ void JSTypedArrayBase::setBuffer( assert( self->getByteWidth() == byteWidth && "Cannot set to a buffer of a different byte width"); - self->buffer_.setNonNull(runtime, buf, runtime.getHeap()); + self->buffer_.setNonNull(runtime, buf, runtime.getHeap(), self); self->offset_ = offset; self->length_ = size / byteWidth; } diff --git a/lib/VM/LimitedStorageProvider.cpp b/lib/VM/LimitedStorageProvider.cpp index 90e3e6138b5..e9e0146a809 100644 --- a/lib/VM/LimitedStorageProvider.cpp +++ b/lib/VM/LimitedStorageProvider.cpp @@ -13,20 +13,22 @@ namespace hermes { namespace vm { -llvh::ErrorOr LimitedStorageProvider::newStorageImpl(const char *name) { +llvh::ErrorOr LimitedStorageProvider::newStorageImpl( + const char *name, + size_t sz) { if (limit_ < AlignedHeapSegment::storageSize()) { return make_error_code(OOMError::TestVMLimitReached); } - limit_ -= AlignedHeapSegment::storageSize(); - return delegate_->newStorage(name); + limit_ -= sz; + return delegate_->newStorage(name, sz); } -void LimitedStorageProvider::deleteStorageImpl(void *storage) { +void LimitedStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - delegate_->deleteStorage(storage); - limit_ += AlignedHeapSegment::storageSize(); + delegate_->deleteStorage(storage, sz); + limit_ += sz; } } // namespace vm diff --git a/lib/VM/OrderedHashMap.cpp b/lib/VM/OrderedHashMap.cpp index b1d55758289..b17ee9502f7 100644 --- a/lib/VM/OrderedHashMap.cpp +++ b/lib/VM/OrderedHashMap.cpp @@ -75,7 +75,13 @@ template OrderedHashMapBase::OrderedHashMapBase( Runtime &runtime, Handle hashTableStorage) - : hashTable_(runtime, hashTableStorage.get(), runtime.getHeap()) {} + : hashTable_( + runtime, + hashTableStorage.get(), + runtime.getHeap(), + // The casted pointer is only used to provide the GCCell starting + // address. + static_cast(this)) {} template OrderedHashMapBase::OrderedHashMapBase() {} @@ -99,15 +105,18 @@ void OrderedHashMapBase::removeLinkedListNode( entry != lastIterationEntry_.get(runtime) && "Cannot remove the last entry"); if (entry->prevIterationEntry) { - entry->prevIterationEntry.getNonNull(runtime)->nextIterationEntry.set( - runtime, entry->nextIterationEntry, gc); + auto *prevIterationEntry = entry->prevIterationEntry.getNonNull(runtime); + prevIterationEntry->nextIterationEntry.set( + runtime, entry->nextIterationEntry, gc, prevIterationEntry); } if (entry->nextIterationEntry) { - entry->nextIterationEntry.getNonNull(runtime)->prevIterationEntry.set( - runtime, entry->prevIterationEntry, gc); + auto *nextIterationEntry = entry->nextIterationEntry.getNonNull(runtime); + nextIterationEntry->prevIterationEntry.set( + runtime, entry->prevIterationEntry, gc, nextIterationEntry); } if (entry == firstIterationEntry_.get(runtime)) { - firstIterationEntry_.set(runtime, entry->nextIterationEntry, gc); + firstIterationEntry_.set( + runtime, entry->nextIterationEntry, gc, static_cast(this)); } entry->prevIterationEntry.setNull(runtime.getHeap()); } @@ -207,7 +216,8 @@ ExecutionStatus OrderedHashMapBase::rehash( } rawSelf->deletedCount_ = 0; - rawSelf->hashTable_.setNonNull(runtime, newHashTable, runtime.getHeap()); + rawSelf->hashTable_.setNonNull( + runtime, newHashTable, runtime.getHeap(), *self); assert( rawSelf->hashTable_.getNonNull(runtime)->size(runtime) == rawSelf->capacity_ && @@ -270,7 +280,7 @@ ExecutionStatus OrderedHashMapBase::insert( self->lookupInBucket(runtime, bucket, key.getHermesValue()); if (entry) { // Element for the key already exists, update value and return. - entry->value.set(shv, runtime.getHeap()); + entry->value.set(shv, runtime.getHeap(), entry); return ExecutionStatus::RETURNED; } } @@ -337,11 +347,11 @@ ExecutionStatus OrderedHashMapBase::doInsert( // call it and set to newMapEntry one at a time. auto newMapEntry = runtime.makeHandle(std::move(*crtRes)); auto k = SmallHermesValue::encodeHermesValue(key.getHermesValue(), runtime); - newMapEntry->key.set(k, runtime.getHeap()); + newMapEntry->key.set(k, runtime.getHeap(), *newMapEntry); if constexpr (std::is_same_v) { auto v = SmallHermesValue::encodeHermesValue(value.getHermesValue(), runtime); - newMapEntry->value.set(v, runtime.getHeap()); + newMapEntry->value.set(v, runtime.getHeap(), *newMapEntry); } // After here, no allocation @@ -361,17 +371,17 @@ ExecutionStatus OrderedHashMapBase::doInsert( if (!rawSelf->firstIterationEntry_) { // If we are inserting the first ever element, update // first iteration entry pointer. - rawSelf->firstIterationEntry_.set(runtime, newMapEntry.get(), heap); - rawSelf->lastIterationEntry_.set(runtime, newMapEntry.get(), heap); + rawSelf->firstIterationEntry_.set(runtime, newMapEntry.get(), heap, *self); + rawSelf->lastIterationEntry_.set(runtime, newMapEntry.get(), heap, *self); } else { // Connect the new entry with the last entry. - rawSelf->lastIterationEntry_.getNonNull(runtime)->nextIterationEntry.set( - runtime, newMapEntry.get(), heap); + auto *previousLastEntry = rawSelf->lastIterationEntry_.getNonNull(runtime); + previousLastEntry->nextIterationEntry.set( + runtime, newMapEntry.get(), heap, previousLastEntry); newMapEntry->prevIterationEntry.set( - runtime, rawSelf->lastIterationEntry_, heap); + runtime, rawSelf->lastIterationEntry_, heap, *newMapEntry); - BucketType *previousLastEntry = rawSelf->lastIterationEntry_.get(runtime); - rawSelf->lastIterationEntry_.set(runtime, newMapEntry.get(), heap); + rawSelf->lastIterationEntry_.set(runtime, newMapEntry.get(), heap, *self); if (previousLastEntry && previousLastEntry->isDeleted()) { // If the last entry was a deleted entry, we no longer need to keep it. @@ -466,7 +476,11 @@ void OrderedHashMapBase::clear(Runtime &runtime) { // in case there is an iterator out there // pointing to the middle of the iteration chain. We need it to be // able to merge back eventually. - firstIterationEntry_.set(runtime, lastIterationEntry_, runtime.getHeap()); + firstIterationEntry_.set( + runtime, + lastIterationEntry_, + runtime.getHeap(), + static_cast(this)); firstIterationEntry_.getNonNull(runtime)->prevIterationEntry.setNull( runtime.getHeap()); size_ = 0; diff --git a/lib/VM/PrimitiveBox.cpp b/lib/VM/PrimitiveBox.cpp index dc4f620e0a6..bc80dc76fb3 100644 --- a/lib/VM/PrimitiveBox.cpp +++ b/lib/VM/PrimitiveBox.cpp @@ -81,7 +81,8 @@ void JSString::setPrimitiveString( auto shv = SmallHermesValue::encodeNumberValue(string->getStringLength(), runtime); JSObject::setNamedSlotValueUnsafe(*selfHandle, runtime, desc, shv); - selfHandle->primitiveValue_.set(runtime, *string, runtime.getHeap()); + selfHandle->primitiveValue_.set( + runtime, *string, runtime.getHeap(), *selfHandle); } bool JSString::_haveOwnIndexedImpl( diff --git a/lib/VM/SegmentedArray.cpp b/lib/VM/SegmentedArray.cpp index 93c4af99437..c46c7576106 100644 --- a/lib/VM/SegmentedArray.cpp +++ b/lib/VM/SegmentedArray.cpp @@ -62,7 +62,8 @@ void SegmentedArrayBase::Segment::setLength( data_ + len, data_ + newLength, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + this); length_.store(newLength, std::memory_order_release); } else if (newLength < len) { // If length is decreasing a write barrier needs to be done. @@ -193,8 +194,16 @@ ExecutionStatus SegmentedArrayBase::push_back( return ExecutionStatus::EXCEPTION; } const auto shv = HVType::encodeHermesValue(*value, runtime); - auto &elm = self->atRef(runtime, oldSize); - new (&elm) GCHVType(shv, runtime.getHeap()); + if (oldSize < kValueToSegmentThreshold) { + auto &elm = self->inlineStorage()[oldSize]; + new (&elm) GCHVType(shv, runtime.getHeap(), *self); + } else { + auto segmentNumber = toSegment(oldSize); + auto *segment = self->segmentAt(runtime, segmentNumber); + auto &elm = segment->at(toInterior(oldSize)); + // elm lives in segment, which is not the same cell as SegmentedArrayBase. + new (&elm) GCHVType(shv, runtime.getHeap(), segment); + } return ExecutionStatus::RETURNED; } @@ -265,7 +274,7 @@ void SegmentedArrayBase::allocateSegment( "Allocating into a non-empty segment"); PseudoHandle c = Segment::create(runtime); self->segmentAtPossiblyUnallocated(segment)->set( - HVType::encodeObjectValue(c.get(), runtime), runtime.getHeap()); + HVType::encodeObjectValue(c.get(), runtime), runtime.getHeap(), *self); } template @@ -292,7 +301,8 @@ ExecutionStatus SegmentedArrayBase::growRight( self->inlineStorage(), self->inlineStorage() + numSlotsUsed, newSegmentedArray->inlineStorage(), - runtime.getHeap()); + runtime.getHeap(), + *self); // Set the size of the new array to be the same as the old array's size. newSegmentedArray->numSlotsUsed_.store( numSlotsUsed, std::memory_order_release); @@ -347,13 +357,15 @@ void SegmentedArrayBase::growLeftWithinCapacity( self->begin(runtime), self->end(runtime) - amount, self->end(runtime), - runtime.getHeap()); + runtime.getHeap(), + self.get()); // Fill the beginning with empty values. GCHVType::fill( self->begin(runtime), self->begin(runtime) + amount, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + self.get()); } template @@ -392,7 +404,8 @@ void SegmentedArrayBase::increaseSizeWithinCapacity( inlineStorage() + currSize, inlineStorage() + finalSize, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + this); // Set the final size. numSlotsUsed_.store(finalSize, std::memory_order_release); return; @@ -407,7 +420,8 @@ void SegmentedArrayBase::increaseSizeWithinCapacity( inlineStorage() + currSize, inlineStorage() + kValueToSegmentThreshold, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + this); } segmentAt(runtime, segment)->setLength(runtime, segmentLength); } @@ -440,7 +454,8 @@ SegmentedArrayBase::increaseSize( self->inlineStorage() + currSize, self->inlineStorage() + kValueToSegmentThreshold, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + self.get()); // Set the size to the inline storage threshold. self->numSlotsUsed_.store( kValueToSegmentThreshold, std::memory_order_release); @@ -466,7 +481,8 @@ SegmentedArrayBase::increaseSize( self->numSlotsUsed_.load(std::memory_order_relaxed), self->inlineStorage() + newNumSlotsUsed, HVType::encodeEmptyValue(), - runtime.getHeap()); + runtime.getHeap(), + self.get()); self->numSlotsUsed_.store(newNumSlotsUsed, std::memory_order_release); // Allocate a handle to track the current array. diff --git a/lib/VM/StaticH.cpp b/lib/VM/StaticH.cpp index 5783041cef4..1f0bd718486 100644 --- a/lib/VM/StaticH.cpp +++ b/lib/VM/StaticH.cpp @@ -519,9 +519,10 @@ extern "C" void _sh_ljs_store_to_env( SHLegacyValue env, SHLegacyValue val, uint32_t index) { - vmcast(HermesValue::fromRaw(env.raw)) - ->slot(index) - .set(HermesValue::fromRaw(val.raw), getRuntime(shr).getHeap()); + auto *environment = vmcast(HermesValue::fromRaw(env.raw)); + + environment->slot(index).set( + HermesValue::fromRaw(val.raw), getRuntime(shr).getHeap(), environment); } extern "C" void _sh_ljs_store_np_to_env( diff --git a/lib/VM/StorageProvider.cpp b/lib/VM/StorageProvider.cpp index 67fed1eb8d3..b79cab1325a 100644 --- a/lib/VM/StorageProvider.cpp +++ b/lib/VM/StorageProvider.cpp @@ -7,11 +7,13 @@ #include "hermes/VM/StorageProvider.h" +#include "hermes/ADT/BitArray.h" #include "hermes/Support/CheckedMalloc.h" #include "hermes/Support/Compiler.h" #include "hermes/Support/OSCompat.h" #include "hermes/VM/AlignedHeapSegment.h" +#include "llvh/ADT/BitVector.h" #include "llvh/ADT/DenseMap.h" #include "llvh/Support/ErrorHandling.h" #include "llvh/Support/MathExtras.h" @@ -55,14 +57,18 @@ namespace vm { namespace { +/// Minimum segment storage size. Any larger segment size should be a multiple +/// of it. +static constexpr size_t kSegmentUnitSize = + AlignedHeapSegmentBase::kSegmentUnitSize; + bool isAligned(void *p) { - return (reinterpret_cast(p) & - (AlignedHeapSegment::storageSize() - 1)) == 0; + return (reinterpret_cast(p) & (kSegmentUnitSize - 1)) == 0; } char *alignAlloc(void *p) { - return reinterpret_cast(llvh::alignTo( - reinterpret_cast(p), AlignedHeapSegment::storageSize())); + return reinterpret_cast( + llvh::alignTo(reinterpret_cast(p), kSegmentUnitSize)); } void *getMmapHint() { @@ -78,67 +84,104 @@ void *getMmapHint() { class VMAllocateStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; + void deleteStorageImpl(void *storage, size_t sz) override; }; class ContiguousVAStorageProvider final : public StorageProvider { public: ContiguousVAStorageProvider(size_t size) - : size_(llvh::alignTo(size)) { - auto result = oscompat::vm_reserve_aligned( - size_, AlignedHeapSegment::storageSize(), getMmapHint()); + : size_(llvh::alignTo(size)), + statusBits_(size_ / kSegmentUnitSize) { + auto result = + oscompat::vm_reserve_aligned(size_, kSegmentUnitSize, getMmapHint()); if (!result) hermes_fatal("Contiguous storage allocation failed.", result.getError()); - level_ = start_ = static_cast(*result); + start_ = static_cast(*result); oscompat::vm_name(start_, size_, kFreeRegionName); } ~ContiguousVAStorageProvider() override { oscompat::vm_release_aligned(start_, size_); } - llvh::ErrorOr newStorageImpl(const char *name) override { + private: + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override { + // No available space to use. + if (LLVM_UNLIKELY(firstFreeBit_ == -1)) { + return make_error_code(OOMError::MaxStorageReached); + } + + assert( + statusBits_.find_first_unset() == firstFreeBit_ && + "firstFreeBit_ should always be the first unset bit"); + void *storage; - if (!freelist_.empty()) { - storage = freelist_.back(); - freelist_.pop_back(); - } else if (level_ < start_ + size_) { - storage = - std::exchange(level_, level_ + AlignedHeapSegment::storageSize()); - } else { + int numUnits = sz / kSegmentUnitSize; + int nextUsedBit = statusBits_.find_next(firstFreeBit_); + int curFreeBit = firstFreeBit_; + // Search for a large enough continuous bit range. + while (nextUsedBit != -1 && (nextUsedBit - curFreeBit < numUnits)) { + curFreeBit = statusBits_.find_next_unset(nextUsedBit); + if (curFreeBit == -1) { + return make_error_code(OOMError::MaxStorageReached); + } + nextUsedBit = statusBits_.find_next(curFreeBit); + } + // nextUsedBit could be -1, so check if there is enough space left. + if (nextUsedBit == -1 && curFreeBit + numUnits > (int)statusBits_.size()) { return make_error_code(OOMError::MaxStorageReached); } - auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize()); + + storage = start_ + curFreeBit * kSegmentUnitSize; + statusBits_.set(curFreeBit, curFreeBit + numUnits); + // Reset it to the new leftmost free bit. + firstFreeBit_ = statusBits_.find_first_unset(); + + auto res = oscompat::vm_commit(storage, sz); if (res) { - oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(storage, sz, name); } return res; } - void deleteStorageImpl(void *storage) override { + void deleteStorageImpl(void *storage, size_t sz) override { assert( - !llvh::alignmentAdjustment( - storage, AlignedHeapSegment::storageSize()) && + !llvh::alignmentAdjustment(storage, kSegmentUnitSize) && "Storage not aligned"); - assert(storage >= start_ && storage < level_ && "Storage not in region"); - oscompat::vm_name( - storage, AlignedHeapSegment::storageSize(), kFreeRegionName); - oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize()); - freelist_.push_back(storage); + assert( + storage >= start_ && storage < start_ + size_ && + "Storage not in region"); + size_t numUnits = sz / kSegmentUnitSize; + oscompat::vm_name(storage, sz, kFreeRegionName); + oscompat::vm_uncommit(storage, sz); + // Reset all bits for this storage. + int startIndex = (static_cast(storage) - start_) / kSegmentUnitSize; + statusBits_.reset(startIndex, startIndex + numUnits); + if (startIndex < firstFreeBit_) + firstFreeBit_ = startIndex; } private: static constexpr const char *kFreeRegionName = "hermes-free-heap"; size_t size_; char *start_; - char *level_; - llvh::SmallVector freelist_; + /// First free bit in \c statusBits_. We always make new allocation from the + /// leftmost free bit, based on heuristics: + /// 1. Usually the reserved address space is not full. + /// 2. Storage with size kSegmentUnitSize is allocated and deleted more + /// frequently than larger storage. + /// 3. Likely small storage will find space available from leftmost free bit, + /// leaving enough space at the right side for large storage. + int firstFreeBit_{0}; + /// One bit for each kSegmentUnitSize space in the entire reserved virtual + /// address space. A bit is set if the corresponding space is used. + llvh::BitVector statusBits_; }; class MallocStorageProvider final : public StorageProvider { public: - llvh::ErrorOr newStorageImpl(const char *name) override; - void deleteStorageImpl(void *storage) override; + llvh::ErrorOr newStorageImpl(const char *name, size_t sz) override; + void deleteStorageImpl(void *storage, size_t sz) override; private: /// Map aligned starts to actual starts for freeing. @@ -148,13 +191,12 @@ class MallocStorageProvider final : public StorageProvider { }; llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( - const char *name) { - assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0); + const char *name, + size_t sz) { + assert(kSegmentUnitSize % oscompat::page_size() == 0); // Allocate the space, hoping it will be the correct alignment. - auto result = oscompat::vm_allocate_aligned( - AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize(), - getMmapHint()); + auto result = + oscompat::vm_allocate_aligned(sz, kSegmentUnitSize, getMmapHint()); if (!result) { return result; } @@ -162,32 +204,36 @@ llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( assert(isAligned(mem)); (void)&isAligned; #ifdef HERMESVM_ALLOW_HUGE_PAGES - oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize()); + oscompat::vm_hugepage(mem, sz); #endif // Name the memory region on platforms that support naming. - oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(mem, sz, name); return mem; } -void VMAllocateStorageProvider::deleteStorageImpl(void *storage) { +void VMAllocateStorageProvider::deleteStorageImpl(void *storage, size_t sz) { if (!storage) { return; } - oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize()); + oscompat::vm_free_aligned(storage, sz); } -llvh::ErrorOr MallocStorageProvider::newStorageImpl(const char *name) { +llvh::ErrorOr MallocStorageProvider::newStorageImpl( + const char *name, + size_t sz) { // name is unused, can't name malloc memory. (void)name; - void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u); + void *mem = checkedMalloc2(2u, sz); void *lowLim = alignAlloc(mem); assert(isAligned(lowLim) && "New storage should be aligned"); lowLimToAllocHandle_[lowLim] = mem; return lowLim; } -void MallocStorageProvider::deleteStorageImpl(void *storage) { +void MallocStorageProvider::deleteStorageImpl(void *storage, size_t sz) { + // free() does not need the memory size. + (void)sz; if (!storage) { return; } @@ -217,8 +263,11 @@ std::unique_ptr StorageProvider::mallocProvider() { return std::unique_ptr(new MallocStorageProvider); } -llvh::ErrorOr StorageProvider::newStorage(const char *name) { - auto res = newStorageImpl(name); +llvh::ErrorOr StorageProvider::newStorage(const char *name, size_t sz) { + assert( + sz && (sz % kSegmentUnitSize == 0) && + "Allocated storage size must be multiples of kSegmentUnitSize"); + auto res = newStorageImpl(name, sz); if (res) { numSucceededAllocs_++; @@ -229,13 +278,13 @@ llvh::ErrorOr StorageProvider::newStorage(const char *name) { return res; } -void StorageProvider::deleteStorage(void *storage) { +void StorageProvider::deleteStorage(void *storage, size_t sz) { if (!storage) { return; } numDeletedAllocs_++; - deleteStorageImpl(storage); + return deleteStorageImpl(storage, sz); } llvh::ErrorOr> diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 1509168194d..a1220b60d1c 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -22,6 +22,17 @@ namespace hermes { namespace vm { +#ifndef NDEBUG +/// Set the given range [start, end) to a dead value. +static void clearRange(char *start, char *end) { +#if LLVM_ADDRESS_SANITIZER_BUILD + __asan_poison_memory_region(start, end - start); +#else + std::memset(start, kInvalidHeapValue, end - start); +#endif +} +#endif + void AlignedHeapSegment::Contents::protectGuardPage( oscompat::ProtectMode mode) { char *begin = &paddedGuardPage_[kGuardPagePadding]; @@ -41,15 +52,16 @@ llvh::ErrorOr AlignedHeapSegment::create( llvh::ErrorOr AlignedHeapSegment::create( StorageProvider *provider, const char *name) { - auto result = provider->newStorage(name); + auto result = provider->newStorage(name, storageSize()); if (!result) { return result.getError(); } + assert(*result && "Heap segment storage allocation failure"); return AlignedHeapSegment{provider, *result}; } AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : lowLim_(static_cast(lowLim)), provider_(provider) { + : AlignedHeapSegmentBase(lowLim, kSize), provider_(provider) { assert( storageStart(lowLim_) == lowLim_ && "The lower limit of this storage must be aligned"); @@ -58,13 +70,9 @@ AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) assert( reinterpret_cast(hiLim()) % oscompat::page_size() == 0 && "The higher limit must be page aligned"); - if (*this) { - new (contents()) Contents(); - contents()->protectGuardPage(oscompat::ProtectMode::None); #ifndef NDEBUG - clear(); + clear(); #endif - } } void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { @@ -95,7 +103,7 @@ AlignedHeapSegment::~AlignedHeapSegment() { __asan_unpoison_memory_region(start(), end() - start()); if (provider_) { - provider_->deleteStorage(lowLim_); + provider_->deleteStorage(lowLim_, storageSize()); } } @@ -120,7 +128,7 @@ void AlignedHeapSegment::setLevel(char *lvl) { assert(dbgContainsLevel(lvl)); if (lvl < level_) { #ifndef NDEBUG - clear(lvl, level_); + clearRange(lvl, level_); #else if (MU == AdviseUnused::Yes) { const size_t PS = oscompat::page_size(); @@ -172,15 +180,7 @@ bool AlignedHeapSegment::validPointer(const void *p) const { } void AlignedHeapSegment::clear() { - clear(start(), end()); -} - -/* static */ void AlignedHeapSegment::clear(char *start, char *end) { -#if LLVM_ADDRESS_SANITIZER_BUILD - __asan_poison_memory_region(start, end - start); -#else - std::memset(start, kInvalidHeapValue, end - start); -#endif + clearRange(start(), end()); } /* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) { diff --git a/lib/VM/gcs/CardTableNC.cpp b/lib/VM/gcs/CardTableNC.cpp index ec94d5e5710..10937e15192 100644 --- a/lib/VM/gcs/CardTableNC.cpp +++ b/lib/VM/gcs/CardTableNC.cpp @@ -20,12 +20,6 @@ namespace hermes { namespace vm { -#ifndef NDEBUG -/* static */ void *CardTable::storageEnd(const void *ptr) { - return AlignedHeapSegment::storageEnd(ptr); -} -#endif - void CardTable::dirtyCardsForAddressRange(const void *low, const void *high) { // If high is in the middle of some card, ensure that we dirty that card. high = reinterpret_cast(high) + kCardSize - 1; @@ -37,26 +31,26 @@ OptValue CardTable::findNextCardWithStatus( size_t fromIndex, size_t endIndex) const { for (size_t idx = fromIndex; idx < endIndex; idx++) - if (cards_[idx].load(std::memory_order_relaxed) == status) + if (cards()[idx].load(std::memory_order_relaxed) == status) return idx; return llvh::None; } void CardTable::clear() { - cleanRange(kFirstUsedIndex, kValidIndices); + cleanRange(kFirstUsedIndex, getEndIndex()); } void CardTable::updateAfterCompaction(const void *newLevel) { const char *newLevelPtr = static_cast(newLevel); size_t firstCleanCardIndex = addressToIndex(newLevelPtr + kCardSize - 1); assert( - firstCleanCardIndex <= kValidIndices && + firstCleanCardIndex <= getEndIndex() && firstCleanCardIndex >= kFirstUsedIndex && "Invalid index."); // Dirty the occupied cards (below the level), and clean the cards above the // level. dirtyRange(kFirstUsedIndex, firstCleanCardIndex); - cleanRange(firstCleanCardIndex, kValidIndices); + cleanRange(firstCleanCardIndex, getEndIndex()); } void CardTable::cleanRange(size_t from, size_t to) { @@ -72,7 +66,7 @@ void CardTable::cleanOrDirtyRange( size_t to, CardStatus cleanOrDirty) { for (size_t index = from; index < to; index++) { - cards_[index].store(cleanOrDirty, std::memory_order_relaxed); + cards()[index].store(cleanOrDirty, std::memory_order_relaxed); } } @@ -93,7 +87,7 @@ void CardTable::updateBoundaries( "Precondition: must have crossed boundary."); // The object may be large, and may cross multiple cards, but first // handle the first card. - boundaries_[boundary->index()] = + boundaries()[boundary->index()] = (boundary->address() - start) >> LogHeapAlign; boundary->bump(); @@ -106,7 +100,7 @@ void CardTable::updateBoundaries( unsigned currentIndexDelta = 1; unsigned numWithCurrentExp = 0; while (boundary->address() < end) { - boundaries_[boundary->index()] = encodeExp(currentExp); + boundaries()[boundary->index()] = encodeExp(currentExp); numWithCurrentExp++; if (numWithCurrentExp == currentIndexDelta) { numWithCurrentExp = 0; @@ -120,14 +114,14 @@ void CardTable::updateBoundaries( } GCCell *CardTable::firstObjForCard(unsigned index) const { - int8_t val = boundaries_[index]; + int8_t val = boundaries()[index]; // If val is negative, it means skip backwards some number of cards. // In general, for an object crossing 2^N cards, a query for one of // those cards will examine at most N entries in the table. while (val < 0) { index -= 1 << decodeExp(val); - val = boundaries_[index]; + val = boundaries()[index]; } char *boundary = const_cast(indexToAddress(index)); @@ -147,12 +141,12 @@ protectBoundaryTableWork(void *table, size_t sz, oscompat::ProtectMode mode) { void CardTable::protectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::None); + boundaries(), getEndIndex(), oscompat::ProtectMode::None); } void CardTable::unprotectBoundaryTable() { protectBoundaryTableWork( - &boundaries_[0], kValidIndices, oscompat::ProtectMode::ReadWrite); + boundaries(), getEndIndex(), oscompat::ProtectMode::ReadWrite); } #endif // HERMES_EXTRA_DEBUG @@ -160,7 +154,7 @@ void CardTable::unprotectBoundaryTable() { void CardTable::verifyBoundaries(char *start, char *level) const { // Start should be card-aligned. assert(isCardAligned(start)); - for (unsigned index = addressToIndex(start); index < kValidIndices; index++) { + for (unsigned index = addressToIndex(start); index < getEndIndex(); index++) { const char *boundary = indexToAddress(index); if (level <= boundary) { break; diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index e9bf33f4b28..2f4b71495f0 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -1899,98 +1899,15 @@ void HadesGC::debitExternalMemory(GCCell *cell, uint32_t sz) { } } -void HadesGC::writeBarrierSlow(const GCHermesValue *loc, HermesValue value) { - if (ogMarkingBarriers_) { - snapshotWriteBarrierInternal(*loc); - } - if (!value.isPointer()) { - return; - } - relocationWriteBarrier(loc, value.getPointer()); -} - void HadesGC::writeBarrierSlow( - const GCSmallHermesValue *loc, - SmallHermesValue value) { - if (ogMarkingBarriers_) { - snapshotWriteBarrierInternal(*loc); - } - if (!value.isPointer()) { - return; - } - relocationWriteBarrier(loc, value.getPointer(getPointerBase())); -} - -void HadesGC::writeBarrierSlow(const GCPointerBase *loc, const GCCell *value) { + const GCCell *cell, + const GCPointerBase *loc, + const GCCell *value) { if (*loc && ogMarkingBarriers_) snapshotWriteBarrierInternal(*loc); // Always do the non-snapshot write barrier in order for YG to be able to // scan cards. - relocationWriteBarrier(loc, value); -} - -void HadesGC::constructorWriteBarrierSlow( - const GCHermesValue *loc, - HermesValue value) { - // A constructor never needs to execute a SATB write barrier, since its - // previous value was definitely not live. - if (!value.isPointer()) { - return; - } - relocationWriteBarrier(loc, value.getPointer()); -} - -void HadesGC::constructorWriteBarrierSlow( - const GCSmallHermesValue *loc, - SmallHermesValue value) { - // A constructor never needs to execute a SATB write barrier, since its - // previous value was definitely not live. - if (!value.isPointer()) { - return; - } - relocationWriteBarrier(loc, value.getPointer(getPointerBase())); -} - -void HadesGC::constructorWriteBarrierRangeSlow( - const GCHermesValue *start, - uint32_t numHVs) { - assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && - "Range must start and end within a heap segment."); - - // Most constructors should be running in the YG, so in the common case, we - // can avoid doing anything for the whole range. If the range is in the OG, - // then just dirty all the cards corresponding to it, and we can scan them for - // pointers later. This is less precise but makes the write barrier faster. - - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( - start, start + numHVs); -} - -void HadesGC::constructorWriteBarrierRangeSlow( - const GCSmallHermesValue *start, - uint32_t numHVs) { - assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && - "Range must start and end within a heap segment."); - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( - start, start + numHVs); -} - -void HadesGC::snapshotWriteBarrierRangeSlow( - const GCHermesValue *start, - uint32_t numHVs) { - for (uint32_t i = 0; i < numHVs; ++i) { - snapshotWriteBarrierInternal(start[i]); - } -} - -void HadesGC::snapshotWriteBarrierRangeSlow( - const GCSmallHermesValue *start, - uint32_t numHVs) { - for (uint32_t i = 0; i < numHVs; ++i) { - snapshotWriteBarrierInternal(start[i]); - } + relocationWriteBarrier(cell, loc, value); } void HadesGC::snapshotWriteBarrierInternal(GCCell *oldValue) { @@ -2043,12 +1960,15 @@ void HadesGC::snapshotWriteBarrierInternal(SymbolID symbol) { oldGenMarker_->markSymbol(symbol); } -void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { +void HadesGC::relocationWriteBarrier( + const GCCell *cell, + const void *loc, + const GCCell *value) { assert(!inYoungGen(loc) && "Pre-condition from other callers"); // Do not dirty cards for compactee->compactee, yg->yg, or yg->compactee // pointers. But do dirty cards for compactee->yg pointers, since compaction // may not happen in the next YG. - if (AlignedHeapSegment::containedInSame(loc, value)) { + if (AlignedHeapSegmentBase::containedInSame(cell, value)) { return; } if (inYoungGen(value) || compactee_.contains(value)) { @@ -2057,7 +1977,7 @@ void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { // allocation. // Note that this *only* applies since the boundaries are updated separately // from the card table being marked itself. - AlignedHeapSegment::cardTableCovering(loc)->dirtyCardForAddress(loc); + AlignedHeapSegmentBase::cardTableCovering(cell)->dirtyCardForAddress(loc); } } diff --git a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp index 6362b80d6f1..29a34106d85 100644 --- a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp +++ b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp @@ -115,7 +115,8 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { // We can't use the storage of s here since it contains guard pages and also // s.start() may not align to actual page boundary. - void *storage = provider_->newStorage().get(); + void *storage = + provider_->newStorage(AlignedHeapSegment::storageSize()).get(); char *start = reinterpret_cast(storage); char *end = start + AlignedHeapSegment::storageSize(); @@ -139,7 +140,7 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { EXPECT_EQ(*initial + TOTAL_PAGES, *touched); EXPECT_EQ(*touched - FREED_PAGES, *marked); - provider_->deleteStorage(storage); + provider_->deleteStorage(storage, AlignedHeapSegment::storageSize()); #endif } diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index adaffe0651d..c5bdf04e4a6 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -22,7 +22,11 @@ using namespace hermes::vm; namespace { -struct CardTableNCTest : public ::testing::Test { +struct CardTableParam { + size_t segmentSize; +}; + +struct CardTableNCTest : public ::testing::TestWithParam { CardTableNCTest(); /// Run a test scenario whereby we dirty [dirtyStart, dirtyEnd], and then test @@ -38,7 +42,7 @@ struct CardTableNCTest : public ::testing::Test { std::unique_ptr provider{StorageProvider::mmapProvider()}; AlignedHeapSegment seg{ std::move(AlignedHeapSegment::create(provider.get()).get())}; - CardTable *table{new (seg.lowLim()) CardTable()}; + CardTable *table{nullptr}; // Addresses in the aligned storage to interact with during the tests. std::vector addrs; @@ -57,10 +61,14 @@ void CardTableNCTest::dirtyRangeTest( } CardTableNCTest::CardTableNCTest() { + auto ¶m = GetParam(); + table = new (seg.lowLim()) CardTable(param.segmentSize); + // For purposes of this test, we'll assume the first writeable byte of - // the segment comes just after the card table (which is at the - // start of the segment). - auto first = seg.lowLim() + sizeof(CardTable); + // the segment comes just after the memory region that can be mapped by + // kFirstUsedIndex bytes. + auto first = seg.lowLim() + + CardTable::kFirstUsedIndex * CardTable::kHeapBytesPerCardByte; auto last = reinterpret_cast(llvh::alignDown( reinterpret_cast(seg.hiLim() - 1), CardTable::kCardSize)); @@ -76,10 +84,10 @@ CardTableNCTest::CardTableNCTest() { EXPECT_TRUE(std::is_sorted(addrs.begin(), addrs.end())); } -TEST_F(CardTableNCTest, AddressToIndex) { +TEST_P(CardTableNCTest, AddressToIndex) { // Expected indices in the card table corresponding to the probe // addresses into the storage. - const size_t lastIx = CardTable::kValidIndices - 1; + const size_t lastIx = table->getEndIndex() - 1; std::vector indices{ CardTable::kFirstUsedIndex, CardTable::kFirstUsedIndex + 1, @@ -99,18 +107,18 @@ TEST_F(CardTableNCTest, AddressToIndex) { } } -TEST_F(CardTableNCTest, AddressToIndexBoundary) { +TEST_P(CardTableNCTest, AddressToIndexBoundary) { // This test only works if the card table is laid out at the very beginning of // the storage. ASSERT_EQ(seg.lowLim(), reinterpret_cast(table)); - const size_t hiLim = CardTable::kValidIndices; + const size_t hiLim = table->getEndIndex(); EXPECT_EQ(0, table->addressToIndex(seg.lowLim())); EXPECT_EQ(hiLim, table->addressToIndex(seg.hiLim())); } -TEST_F(CardTableNCTest, DirtyAddress) { - const size_t lastIx = CardTable::kValidIndices - 1; +TEST_P(CardTableNCTest, DirtyAddress) { + const size_t lastIx = table->getEndIndex() - 1; for (char *addr : addrs) { size_t ind = table->addressToIndex(addr); @@ -134,14 +142,15 @@ TEST_F(CardTableNCTest, DirtyAddress) { } /// Dirty an emtpy range. -TEST_F(CardTableNCTest, DirtyAddressRangeEmpty) { +TEST_P(CardTableNCTest, DirtyAddressRangeEmpty) { char *addr = addrs.at(0); table->dirtyCardsForAddressRange(addr, addr); - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); } /// Dirty an address range smaller than a single card. -TEST_F(CardTableNCTest, DirtyAddressRangeSmall) { +TEST_P(CardTableNCTest, DirtyAddressRangeSmall) { char *addr = addrs.at(0); dirtyRangeTest( /* expectedStart */ addr, @@ -151,7 +160,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeSmall) { } /// Dirty an address range corresponding exactly to a card. -TEST_F(CardTableNCTest, DirtyAddressRangeCard) { +TEST_P(CardTableNCTest, DirtyAddressRangeCard) { char *addr = addrs.at(0); dirtyRangeTest( /* expectedStart */ addr, @@ -162,7 +171,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeCard) { /// Dirty an address range the width of a card but spread across a card /// boundary. -TEST_F(CardTableNCTest, DirtyAddressRangeCardOverlapping) { +TEST_P(CardTableNCTest, DirtyAddressRangeCardOverlapping) { char *addr = addrs.at(0); char *start = addr + CardTable::kCardSize / 2; dirtyRangeTest( @@ -174,7 +183,7 @@ TEST_F(CardTableNCTest, DirtyAddressRangeCardOverlapping) { /// Dirty an address range spanning multiple cards, with overhang on either /// side. -TEST_F(CardTableNCTest, DirtyAddressRangeLarge) { +TEST_P(CardTableNCTest, DirtyAddressRangeLarge) { char *addr = addrs.at(0); char *start = addr + CardTable::kCardSize / 2; dirtyRangeTest( @@ -184,13 +193,13 @@ TEST_F(CardTableNCTest, DirtyAddressRangeLarge) { /* expectedEnd */ addr + 4 * CardTable::kCardSize); } -TEST_F(CardTableNCTest, Initial) { +TEST_P(CardTableNCTest, Initial) { for (char *addr : addrs) { EXPECT_FALSE(table->isCardForAddressDirty(addr)); } } -TEST_F(CardTableNCTest, Clear) { +TEST_P(CardTableNCTest, Clear) { for (char *addr : addrs) { ASSERT_FALSE(table->isCardForAddressDirty(addr)); } @@ -209,22 +218,23 @@ TEST_F(CardTableNCTest, Clear) { } } -TEST_F(CardTableNCTest, NextDirtyCardImmediate) { +TEST_P(CardTableNCTest, NextDirtyCardImmediate) { char *addr = addrs.at(addrs.size() / 2); size_t ind = table->addressToIndex(addr); table->dirtyCardForAddress(addr); - auto dirty = table->findNextDirtyCard(ind, CardTable::kValidIndices); + auto dirty = table->findNextDirtyCard(ind, table->getEndIndex()); ASSERT_TRUE(dirty); EXPECT_EQ(ind, *dirty); } -TEST_F(CardTableNCTest, NextDirtyCard) { +TEST_P(CardTableNCTest, NextDirtyCard) { /// Empty case: No dirty cards - EXPECT_FALSE(table->findNextDirtyCard(0, CardTable::kValidIndices)); + EXPECT_FALSE(table->findNextDirtyCard( + CardTable::kFirstUsedIndex, table->getEndIndex())); - size_t from = 0; + size_t from = CardTable::kFirstUsedIndex; for (char *addr : addrs) { table->dirtyCardForAddress(addr); @@ -232,7 +242,7 @@ TEST_F(CardTableNCTest, NextDirtyCard) { EXPECT_FALSE(table->findNextDirtyCard(from, ind)); auto atEnd = table->findNextDirtyCard(from, ind + 1); - auto inMiddle = table->findNextDirtyCard(from, CardTable::kValidIndices); + auto inMiddle = table->findNextDirtyCard(from, table->getEndIndex()); ASSERT_TRUE(atEnd); EXPECT_EQ(ind, *atEnd); @@ -243,6 +253,14 @@ TEST_F(CardTableNCTest, NextDirtyCard) { } } +INSTANTIATE_TEST_CASE_P( + CardTableNCTests, + CardTableNCTest, + ::testing::Values( + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize}, + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize * 8}, + CardTableParam{AlignedHeapSegmentBase::kSegmentUnitSize * 128})); + } // namespace #endif // HERMESVM_GC_MALLOC diff --git a/unittests/VMRuntime/MarkBitArrayNCTest.cpp b/unittests/VMRuntime/MarkBitArrayNCTest.cpp index 455c1996fb1..a46536155d2 100644 --- a/unittests/VMRuntime/MarkBitArrayNCTest.cpp +++ b/unittests/VMRuntime/MarkBitArrayNCTest.cpp @@ -27,6 +27,13 @@ namespace { struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest(); + static size_t addressToMarkBitArrayIndex(const void *addr) { + auto *cp = reinterpret_cast(addr); + auto *base = + reinterpret_cast(AlignedHeapSegment::storageStart(addr)); + return (cp - base) >> LogHeapAlign; + } + protected: std::unique_ptr provider; AlignedHeapSegment seg; @@ -66,7 +73,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) { char *addr = addrs.at(i); size_t ind = indices.at(i); - EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr)) + EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr)) << "0x" << std::hex << (void *)addr << " -> " << ind; char *toAddr = seg.lowLim() + (ind << LogHeapAlign); EXPECT_EQ(toAddr, addr) @@ -78,7 +85,7 @@ TEST_F(MarkBitArrayTest, MarkGet) { const size_t lastIx = mba.size() - 1; for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1"; EXPECT_FALSE(mba.at(ind)) << "initial " << ind; @@ -97,37 +104,37 @@ TEST_F(MarkBitArrayTest, MarkGet) { TEST_F(MarkBitArrayTest, Initial) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, Clear) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_FALSE(mba.at(ind)); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_TRUE(mba.at(ind)); } mba.reset(); for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findNextSetBitFrom(ind)); @@ -140,7 +147,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); indices.push(ind); } @@ -154,7 +161,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind)); @@ -167,7 +174,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, false); indices.push(ind); } @@ -182,7 +189,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1)); } @@ -196,7 +203,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, true); indices.push(ind); } @@ -209,7 +216,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1)); @@ -225,7 +232,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, false); indices.push(ind); } diff --git a/unittests/VMRuntime/StorageProviderTest.cpp b/unittests/VMRuntime/StorageProviderTest.cpp index e189bcabce0..5de5b8a0b69 100644 --- a/unittests/VMRuntime/StorageProviderTest.cpp +++ b/unittests/VMRuntime/StorageProviderTest.cpp @@ -12,8 +12,6 @@ #include "hermes/VM/AlignedHeapSegment.h" #include "hermes/VM/LimitedStorageProvider.h" -#include "llvh/ADT/STLExtras.h" - using namespace hermes; using namespace hermes::vm; @@ -24,8 +22,8 @@ struct NullStorageProvider : public StorageProvider { static std::unique_ptr create(); protected: - llvh::ErrorOr newStorageImpl(const char *) override; - void deleteStorageImpl(void *) override; + llvh::ErrorOr newStorageImpl(const char *, size_t sz) override; + void deleteStorageImpl(void *, size_t sz) override; }; /* static */ @@ -33,7 +31,9 @@ std::unique_ptr NullStorageProvider::create() { return std::make_unique(); } -llvh::ErrorOr NullStorageProvider::newStorageImpl(const char *) { +llvh::ErrorOr NullStorageProvider::newStorageImpl( + const char *, + size_t sz) { // Doesn't matter what code is returned here. return make_error_code(OOMError::TestVMLimitReached); } @@ -43,33 +43,43 @@ enum StorageProviderType { ContiguousVAProvider, }; +struct StorageProviderParam { + StorageProviderType providerType; + size_t storageSize; + size_t vaSize; +}; + static std::unique_ptr GetStorageProvider( - StorageProviderType type) { + StorageProviderType type, + size_t vaSize) { switch (type) { case MmapProvider: return StorageProvider::mmapProvider(); case ContiguousVAProvider: - return StorageProvider::contiguousVAProvider( - AlignedHeapSegment::storageSize()); + return StorageProvider::contiguousVAProvider(vaSize); default: return nullptr; } } class StorageProviderTest - : public ::testing::TestWithParam {}; + : public ::testing::TestWithParam {}; -void NullStorageProvider::deleteStorageImpl(void *) {} +void NullStorageProvider::deleteStorageImpl(void *, size_t sz) {} + +/// Minimum segment storage size. +static constexpr size_t SIZE = AlignedHeapSegment::storageSize(); TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { - auto provider{GetStorageProvider(GetParam())}; + auto ¶ms = GetParam(); + auto provider{GetStorageProvider(params.providerType, params.vaSize)}; ASSERT_EQ(0, provider->numSucceededAllocs()); ASSERT_EQ(0, provider->numFailedAllocs()); ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage("Test", params.storageSize); ASSERT_TRUE(result); void *s = result.get(); @@ -78,7 +88,7 @@ TEST_P(StorageProviderTest, StorageProviderSucceededAllocsLogCount) { EXPECT_EQ(0, provider->numDeletedAllocs()); EXPECT_EQ(1, provider->numLiveAllocs()); - provider->deleteStorage(s); + provider->deleteStorage(s, params.storageSize); EXPECT_EQ(1, provider->numSucceededAllocs()); EXPECT_EQ(0, provider->numFailedAllocs()); @@ -94,7 +104,7 @@ TEST(StorageProviderTest, StorageProviderFailedAllocsLogCount) { ASSERT_EQ(0, provider->numDeletedAllocs()); ASSERT_EQ(0, provider->numLiveAllocs()); - auto result = provider->newStorage("Test"); + auto result = provider->newStorage("Test", SIZE); ASSERT_FALSE(result); EXPECT_EQ(0, provider->numSucceededAllocs()); @@ -107,20 +117,20 @@ TEST(StorageProviderTest, LimitedStorageProviderEnforce) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); live[i] = result.get(); } - EXPECT_FALSE(provider.newStorage("Dead")); + EXPECT_FALSE(provider.newStorage("Dead", SIZE)); // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -128,16 +138,16 @@ TEST(StorageProviderTest, LimitedStorageProviderTrackDelete) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; // If the storage gets deleted, we should be able to re-allocate it, even if // the total number of allocations exceeds the limit. for (size_t i = 0; i < LIM + 1; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); auto *s = result.get(); - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } @@ -145,13 +155,13 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + SIZE * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); ASSERT_TRUE(result); live[i] = result.get(); } @@ -159,27 +169,25 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { // The allocations should fail because we have hit the limit, and the // deletions should not affect the limit, because they are of null storages. for (size_t i = 0; i < 2; ++i) { - auto result = provider.newStorage("Live"); + auto result = provider.newStorage("Live", SIZE); EXPECT_FALSE(result); } // Clean-up for (auto s : live) { - provider.deleteStorage(s); + provider.deleteStorage(s, SIZE); } } TEST(StorageProviderTest, StorageProviderAllocsCount) { constexpr size_t LIM = 2; - auto provider = - std::unique_ptr{new LimitedStorageProvider{ - StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM}}; + auto provider = std::unique_ptr{ + new LimitedStorageProvider{StorageProvider::mmapProvider(), SIZE * LIM}}; constexpr size_t FAILS = 3; void *storages[LIM]; for (size_t i = 0; i < LIM; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_TRUE(result); storages[i] = result.get(); } @@ -188,7 +196,7 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { EXPECT_EQ(LIM, provider->numLiveAllocs()); for (size_t i = 0; i < FAILS; ++i) { - auto result = provider->newStorage(); + auto result = provider->newStorage(SIZE); ASSERT_FALSE(result); } @@ -196,21 +204,63 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { // Clean-up for (auto s : storages) { - provider->deleteStorage(s); + provider->deleteStorage(s, SIZE); } EXPECT_EQ(0, provider->numLiveAllocs()); EXPECT_EQ(LIM, provider->numDeletedAllocs()); } +TEST(StorageProviderTest, ContinuousProviderTest) { + auto provider = + GetStorageProvider(StorageProviderType::ContiguousVAProvider, SIZE * 10); + + size_t sz1 = SIZE * 5; + auto result = provider->newStorage(sz1); + ASSERT_TRUE(result); + auto *s1 = *result; + + size_t sz2 = SIZE * 3; + result = provider->newStorage(sz2); + ASSERT_TRUE(result); + auto *s2 = *result; + + size_t sz3 = SIZE * 3; + result = provider->newStorage(sz3); + ASSERT_FALSE(result); + + provider->deleteStorage(s1, sz1); + + result = provider->newStorage(sz3); + ASSERT_TRUE(result); + auto *s3 = *result; + + size_t sz4 = SIZE * 2; + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s4 = *result; + + result = provider->newStorage(sz4); + ASSERT_TRUE(result); + auto *s5 = *result; + + provider->deleteStorage(s2, sz2); + provider->deleteStorage(s3, sz3); + provider->deleteStorage(s4, sz4); + provider->deleteStorage(s5, sz4); +} + /// StorageGuard will free storage on scope exit. class StorageGuard final { public: - StorageGuard(std::shared_ptr provider, void *storage) - : provider_(std::move(provider)), storage_(storage) {} + StorageGuard( + std::shared_ptr provider, + void *storage, + size_t sz) + : provider_(std::move(provider)), storage_(storage), sz_(sz) {} ~StorageGuard() { - provider_->deleteStorage(storage_); + provider_->deleteStorage(storage_, sz_); } void *raw() const { @@ -220,6 +270,7 @@ class StorageGuard final { private: std::shared_ptr provider_; void *storage_; + size_t sz_; }; #ifndef NDEBUG @@ -235,8 +286,8 @@ class SetVALimit final { } }; -static const size_t KB = 1 << 10; -static const size_t MB = KB * KB; +static constexpr size_t KB = 1 << 10; +static constexpr size_t MB = KB * KB; TEST(StorageProviderTest, SucceedsWithoutReducing) { // Should succeed without reducing the size at all. @@ -261,16 +312,13 @@ TEST(StorageProviderTest, SucceedsAfterReducing) { } { // Test using the aligned storage alignment - SetVALimit limit{50 * AlignedHeapSegment::storageSize()}; - auto result = vmAllocateAllowLess( - 100 * AlignedHeapSegment::storageSize(), - 30 * AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize()); + SetVALimit limit{50 * SIZE}; + auto result = vmAllocateAllowLess(100 * SIZE, 30 * SIZE, SIZE); ASSERT_TRUE(result); auto memAndSize = result.get(); EXPECT_TRUE(memAndSize.first != nullptr); - EXPECT_GE(memAndSize.second, 30 * AlignedHeapSegment::storageSize()); - EXPECT_LE(memAndSize.second, 50 * AlignedHeapSegment::storageSize()); + EXPECT_GE(memAndSize.second, 30 * SIZE); + EXPECT_LE(memAndSize.second, 50 * SIZE); } } @@ -282,11 +330,14 @@ TEST(StorageProviderTest, FailsDueToLimitLowerThanMin) { } TEST_P(StorageProviderTest, VirtualMemoryFreed) { - SetVALimit limit{10 * MB}; + SetVALimit limit{25 * MB}; + auto ¶ms = GetParam(); for (size_t i = 0; i < 20; i++) { - std::shared_ptr sp = GetStorageProvider(GetParam()); - StorageGuard sg{sp, *sp->newStorage()}; + std::shared_ptr sp = + GetStorageProvider(params.providerType, params.vaSize); + StorageGuard sg{ + sp, *sp->newStorage(params.storageSize), params.storageSize}; } } @@ -295,6 +346,17 @@ TEST_P(StorageProviderTest, VirtualMemoryFreed) { INSTANTIATE_TEST_CASE_P( StorageProviderTests, StorageProviderTest, - ::testing::Values(MmapProvider, ContiguousVAProvider)); + ::testing::Values( + StorageProviderParam{ + MmapProvider, + SIZE, + 0, + }, + StorageProviderParam{ + ContiguousVAProvider, + SIZE, + SIZE, + }, + StorageProviderParam{ContiguousVAProvider, SIZE * 5, SIZE * 5})); } // namespace