Changeset 77025 in webkit


Ignore:
Timestamp:
Jan 28, 2011 4:35:17 PM (13 years ago)
Author:
barraclough@apple.com
Message:

https://bugs.webkit.org/show_bug.cgi?id=53352
Heavy external fragmentation in FixedVMPoolAllocator can lead to a CRASH().

Reviewed by Geoff Garen.

The FixedVMPoolAllocator currently uses a best fix policy -
switch to first fit, this is less prone to external fragmentation.

  • jit/ExecutableAllocatorFixedVMPool.cpp:

(JSC::AllocationTableSizeClass::AllocationTableSizeClass):
(JSC::AllocationTableSizeClass::blockSize):
(JSC::AllocationTableSizeClass::blockCount):
(JSC::AllocationTableSizeClass::blockAlignment):
(JSC::AllocationTableSizeClass::size):
(JSC::AllocationTableLeaf::AllocationTableLeaf):
(JSC::AllocationTableLeaf::~AllocationTableLeaf):
(JSC::AllocationTableLeaf::allocate):
(JSC::AllocationTableLeaf::free):
(JSC::AllocationTableLeaf::isEmpty):
(JSC::AllocationTableLeaf::isFull):
(JSC::AllocationTableLeaf::size):
(JSC::AllocationTableLeaf::classForSize):
(JSC::AllocationTableLeaf::dump):
(JSC::LazyAllocationTable::LazyAllocationTable):
(JSC::LazyAllocationTable::~LazyAllocationTable):
(JSC::LazyAllocationTable::allocate):
(JSC::LazyAllocationTable::free):
(JSC::LazyAllocationTable::isEmpty):
(JSC::LazyAllocationTable::isFull):
(JSC::LazyAllocationTable::size):
(JSC::LazyAllocationTable::dump):
(JSC::LazyAllocationTable::classForSize):
(JSC::AllocationTableDirectory::AllocationTableDirectory):
(JSC::AllocationTableDirectory::~AllocationTableDirectory):
(JSC::AllocationTableDirectory::allocate):
(JSC::AllocationTableDirectory::free):
(JSC::AllocationTableDirectory::isEmpty):
(JSC::AllocationTableDirectory::isFull):
(JSC::AllocationTableDirectory::size):
(JSC::AllocationTableDirectory::classForSize):
(JSC::AllocationTableDirectory::dump):
(JSC::FixedVMPoolAllocator::FixedVMPoolAllocator):
(JSC::FixedVMPoolAllocator::alloc):
(JSC::FixedVMPoolAllocator::free):
(JSC::FixedVMPoolAllocator::allocated):
(JSC::FixedVMPoolAllocator::isValid):
(JSC::FixedVMPoolAllocator::classForSize):
(JSC::FixedVMPoolAllocator::offsetToPointer):
(JSC::FixedVMPoolAllocator::pointerToOffset):
(JSC::ExecutableAllocator::committedByteCount):
(JSC::ExecutableAllocator::isValid):
(JSC::ExecutableAllocator::underMemoryPressure):
(JSC::ExecutablePool::systemAlloc):
(JSC::ExecutablePool::systemRelease):

  • wtf/PageReservation.h:

(WTF::PageReservation::PageReservation):
(WTF::PageReservation::commit):
(WTF::PageReservation::decommit):
(WTF::PageReservation::committed):

Location:
trunk/Source/JavaScriptCore
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/JavaScriptCore/ChangeLog

    r77006 r77025  
     12011-01-28  Gavin Barraclough  <barraclough@apple.com>
     2
     3        Reviewed by Geoff Garen.
     4
     5        https://bugs.webkit.org/show_bug.cgi?id=53352
     6        Heavy external fragmentation in FixedVMPoolAllocator can lead to a CRASH().
     7
     8        The FixedVMPoolAllocator currently uses a best fix policy -
     9        switch to first fit, this is less prone to external fragmentation.
     10
     11        * jit/ExecutableAllocatorFixedVMPool.cpp:
     12        (JSC::AllocationTableSizeClass::AllocationTableSizeClass):
     13        (JSC::AllocationTableSizeClass::blockSize):
     14        (JSC::AllocationTableSizeClass::blockCount):
     15        (JSC::AllocationTableSizeClass::blockAlignment):
     16        (JSC::AllocationTableSizeClass::size):
     17        (JSC::AllocationTableLeaf::AllocationTableLeaf):
     18        (JSC::AllocationTableLeaf::~AllocationTableLeaf):
     19        (JSC::AllocationTableLeaf::allocate):
     20        (JSC::AllocationTableLeaf::free):
     21        (JSC::AllocationTableLeaf::isEmpty):
     22        (JSC::AllocationTableLeaf::isFull):
     23        (JSC::AllocationTableLeaf::size):
     24        (JSC::AllocationTableLeaf::classForSize):
     25        (JSC::AllocationTableLeaf::dump):
     26        (JSC::LazyAllocationTable::LazyAllocationTable):
     27        (JSC::LazyAllocationTable::~LazyAllocationTable):
     28        (JSC::LazyAllocationTable::allocate):
     29        (JSC::LazyAllocationTable::free):
     30        (JSC::LazyAllocationTable::isEmpty):
     31        (JSC::LazyAllocationTable::isFull):
     32        (JSC::LazyAllocationTable::size):
     33        (JSC::LazyAllocationTable::dump):
     34        (JSC::LazyAllocationTable::classForSize):
     35        (JSC::AllocationTableDirectory::AllocationTableDirectory):
     36        (JSC::AllocationTableDirectory::~AllocationTableDirectory):
     37        (JSC::AllocationTableDirectory::allocate):
     38        (JSC::AllocationTableDirectory::free):
     39        (JSC::AllocationTableDirectory::isEmpty):
     40        (JSC::AllocationTableDirectory::isFull):
     41        (JSC::AllocationTableDirectory::size):
     42        (JSC::AllocationTableDirectory::classForSize):
     43        (JSC::AllocationTableDirectory::dump):
     44        (JSC::FixedVMPoolAllocator::FixedVMPoolAllocator):
     45        (JSC::FixedVMPoolAllocator::alloc):
     46        (JSC::FixedVMPoolAllocator::free):
     47        (JSC::FixedVMPoolAllocator::allocated):
     48        (JSC::FixedVMPoolAllocator::isValid):
     49        (JSC::FixedVMPoolAllocator::classForSize):
     50        (JSC::FixedVMPoolAllocator::offsetToPointer):
     51        (JSC::FixedVMPoolAllocator::pointerToOffset):
     52        (JSC::ExecutableAllocator::committedByteCount):
     53        (JSC::ExecutableAllocator::isValid):
     54        (JSC::ExecutableAllocator::underMemoryPressure):
     55        (JSC::ExecutablePool::systemAlloc):
     56        (JSC::ExecutablePool::systemRelease):
     57        * wtf/PageReservation.h:
     58        (WTF::PageReservation::PageReservation):
     59        (WTF::PageReservation::commit):
     60        (WTF::PageReservation::decommit):
     61        (WTF::PageReservation::committed):
     62
    1632011-01-27  Oliver Hunt  <oliver@apple.com>
    264
  • trunk/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp

    r75992 r77025  
    4343#endif
    4444
    45 static const unsigned vmPoolSizeOvercommit = 2u * 1024u * 1024u * 1024u; // 2Gb
    46 static const unsigned coalesceLimitOvercommit = 16u * 1024u * 1024u; // 16Mb
    47 
    48 static const unsigned vmPoolSizeNoOvercommit = 32u * 1024u * 1024u; // 32Mb
    49 static const unsigned coalesceLimitNoOvercommit = 4u * 1024u * 1024u; // 4Mb
    50 
    51 static const unsigned vmPoolSizeEmbedded = 16u * 1024u * 1024u; // 16Mb
    52 static const unsigned coalesceLimitEmbedded = 4u * 1024u * 1024u; // 4Mb
    53 
    54 #if CPU(X86_64) && !OS(LINUX)
    55 // These limits suitable on 64-bit platforms (particularly x86-64,
    56 // where we require all jumps to have a 2Gb max range). We don't
    57 // enable this by default on Linux, since it needs overcommit and
    58 // distros commonly disable that feature. We'll check the value
    59 // for the overcommit feature at runtime and re-assign the Generic
    60 // values if it's enabled.
    61 static unsigned vmPoolSize = vmPoolSizeOvercommit;
    62 static unsigned coalesceLimit = coalesceLimitOvercommit;
    63 #elif CPU(ARM)
    64 static unsigned vmPoolSize = vmPoolSizeEmbedded;
    65 static unsigned coalesceLimit = coalesceLimitEmbedded;
    66 #else
    67 static unsigned vmPoolSize = vmPoolSizeNoOvercommit;
    68 static unsigned coalesceLimit = coalesceLimitNoOvercommit;
    69 #endif
    70 
    7145using namespace WTF;
    7246
    7347namespace JSC {
    7448   
    75 static size_t committedBytesCount = 0; 
    76 static SpinLock spinlock = SPINLOCK_INITIALIZER;
    77 
    78 // FreeListEntry describes a free chunk of memory, stored in the freeList.
    79 struct FreeListEntry {
    80     FreeListEntry(void* pointer, size_t size)
    81         : pointer(pointer)
    82         , size(size)
    83         , nextEntry(0)
    84         , less(0)
    85         , greater(0)
    86         , balanceFactor(0)
    87     {
    88     }
    89 
    90     // All entries of the same size share a single entry
    91     // in the AVLTree, and are linked together in a linked
    92     // list, using nextEntry.
    93     void* pointer;
    94     size_t size;
    95     FreeListEntry* nextEntry;
    96 
    97     // These fields are used by AVLTree.
    98     FreeListEntry* less;
    99     FreeListEntry* greater;
    100     int balanceFactor;
     49#define TwoPow(n) (1ull << n)
     50
     51class AllocationTableSizeClass {
     52public:
     53    AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
     54        : m_blockSize(blockSize)
     55    {
     56        ASSERT(blockSize == TwoPow(log2BlockSize));
     57
     58        // Calculate the number of blocks needed to hold size.
     59        size_t blockMask = blockSize - 1;
     60        m_blockCount = (size + blockMask) >> log2BlockSize;
     61
     62        // Align to the smallest power of two >= m_blockCount.
     63        m_blockAlignment = 1;
     64        while (m_blockAlignment < m_blockCount)
     65            m_blockAlignment += m_blockAlignment;
     66    }
     67
     68    size_t blockSize() const { return m_blockSize; }
     69    size_t blockCount() const { return m_blockCount; }
     70    size_t blockAlignment() const { return m_blockAlignment; }
     71
     72    size_t size()
     73    {
     74        return m_blockSize * m_blockCount;
     75    }
     76
     77private:
     78    size_t m_blockSize;
     79    size_t m_blockCount;
     80    size_t m_blockAlignment;
    10181};
    10282
    103 // Abstractor class for use in AVLTree.
    104 // Nodes in the AVLTree are of type FreeListEntry, keyed on
    105 // (and thus sorted by) their size.
    106 struct AVLTreeAbstractorForFreeList {
    107     typedef FreeListEntry* handle;
    108     typedef int32_t size;
    109     typedef size_t key;
    110 
    111     handle get_less(handle h) { return h->less; }
    112     void set_less(handle h, handle lh) { h->less = lh; }
    113     handle get_greater(handle h) { return h->greater; }
    114     void set_greater(handle h, handle gh) { h->greater = gh; }
    115     int get_balance_factor(handle h) { return h->balanceFactor; }
    116     void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
    117 
    118     static handle null() { return 0; }
    119 
    120     int compare_key_key(key va, key vb) { return va - vb; }
    121     int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
    122     int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
     83template<unsigned log2Entries>
     84class AllocationTableLeaf {
     85    typedef uint64_t BitField;
     86
     87public:
     88    static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
     89    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
     90
     91    static const size_t subregionSize = TwoPow(log2SubregionSize);
     92    static const size_t regionSize = TwoPow(log2RegionSize);
     93    static const unsigned entries = TwoPow(log2Entries);
     94    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
     95
     96    AllocationTableLeaf()
     97        : m_allocated(0)
     98    {
     99    }
     100
     101    ~AllocationTableLeaf()
     102    {
     103        ASSERT(isEmpty());
     104    }
     105
     106    size_t allocate(AllocationTableSizeClass& sizeClass)
     107    {
     108        ASSERT(sizeClass.blockSize() == subregionSize);
     109        ASSERT(!isFull());
     110
     111        size_t alignment = sizeClass.blockAlignment();
     112        size_t count = sizeClass.blockCount();
     113        // Use this mask to check for spans of free blocks.
     114        BitField mask = ((1ull << count) - 1) << (alignment - count);
     115
     116        // Step in units of alignment size.
     117        for (unsigned i = 0; i < entries; i += alignment) {
     118            if (!(m_allocated & mask)) {
     119                m_allocated |= mask;
     120                return (i + (alignment - count)) << log2SubregionSize;
     121            }
     122            mask <<= alignment;
     123        }
     124        return notFound;
     125    }
     126
     127    void free(size_t location, AllocationTableSizeClass& sizeClass)
     128    {
     129        ASSERT(sizeClass.blockSize() == subregionSize);
     130
     131        size_t entry = location >> log2SubregionSize;
     132        size_t count = sizeClass.blockCount();
     133        BitField mask = ((1ull << count) - 1) << entry;
     134
     135        ASSERT((m_allocated & mask) == mask);
     136        m_allocated &= ~mask;
     137    }
     138
     139    bool isEmpty()
     140    {
     141        return !m_allocated;
     142    }
     143
     144    bool isFull()
     145    {
     146        return !~m_allocated;
     147    }
     148
     149    static size_t size()
     150    {
     151        return regionSize;
     152    }
     153
     154    static AllocationTableSizeClass classForSize(size_t size)
     155    {
     156        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
     157    }
     158
     159#ifndef NDEBUG
     160    void dump(size_t parentOffset = 0, unsigned indent = 0)
     161    {
     162        for (unsigned i = 0; i < indent; ++i)
     163            fprintf(stderr, "    ");
     164        fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
     165    }
     166#endif
     167
     168private:
     169    BitField m_allocated;
    123170};
    124171
    125 // Used to reverse sort an array of FreeListEntry pointers.
    126 static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
    127 {
    128     FreeListEntry* left = *(FreeListEntry**)leftPtr;
    129     FreeListEntry* right = *(FreeListEntry**)rightPtr;
    130 
    131     return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
    132 }
    133 
    134 // Used to reverse sort an array of pointers.
    135 static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
    136 {
    137     void* left = *(void**)leftPtr;
    138     void* right = *(void**)rightPtr;
    139 
    140     return (intptr_t)right - (intptr_t)left;
    141 }
     172
     173template<class NextLevel>
     174class LazyAllocationTable {
     175public:
     176    static const unsigned log2RegionSize = NextLevel::log2RegionSize;
     177    static const unsigned entries = NextLevel::entries;
     178
     179    LazyAllocationTable()
     180        : m_ptr(0)
     181    {
     182    }
     183
     184    ~LazyAllocationTable()
     185    {
     186        ASSERT(isEmpty());
     187    }
     188
     189    size_t allocate(AllocationTableSizeClass& sizeClass)
     190    {
     191        if (!m_ptr)
     192            m_ptr = new NextLevel();
     193        return m_ptr->allocate(sizeClass);
     194    }
     195
     196    void free(size_t location, AllocationTableSizeClass& sizeClass)
     197    {
     198        ASSERT(m_ptr);
     199        m_ptr->free(location, sizeClass);
     200        if (m_ptr->isEmpty()) {
     201            delete m_ptr;
     202            m_ptr = 0;
     203        }
     204    }
     205
     206    bool isEmpty()
     207    {
     208        return !m_ptr;
     209    }
     210
     211    bool isFull()
     212    {
     213        return m_ptr && m_ptr->isFull();
     214    }
     215
     216    static size_t size()
     217    {
     218        return NextLevel::size();
     219    }
     220
     221#ifndef NDEBUG
     222    void dump(size_t parentOffset = 0, unsigned indent = 0)
     223    {
     224        ASSERT(m_ptr);
     225        m_ptr->dump(parentOffset, indent);
     226    }
     227#endif
     228
     229    static AllocationTableSizeClass classForSize(size_t size)
     230    {
     231        return NextLevel::classForSize(size);
     232    }
     233
     234private:
     235    NextLevel* m_ptr;
     236};
     237
     238template<class NextLevel, unsigned log2Entries>
     239class AllocationTableDirectory {
     240    typedef uint64_t BitField;
     241
     242public:
     243    static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
     244    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
     245
     246    static const size_t subregionSize = TwoPow(log2SubregionSize);
     247    static const size_t regionSize = TwoPow(log2RegionSize);
     248    static const unsigned entries = TwoPow(log2Entries);
     249    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
     250
     251    AllocationTableDirectory()
     252        : m_full(0)
     253        , m_hasSuballocation(0)
     254    {
     255    }
     256
     257    ~AllocationTableDirectory()
     258    {
     259        ASSERT(isEmpty());
     260    }
     261
     262    size_t allocate(AllocationTableSizeClass& sizeClass)
     263    {
     264        ASSERT(sizeClass.blockSize() <= subregionSize);
     265        ASSERT(!isFull());
     266
     267        if (sizeClass.blockSize() < subregionSize) {
     268            BitField bit = 1;
     269            for (unsigned i = 0; i < entries; ++i, bit += bit) {
     270                if (m_full & bit)
     271                    continue;
     272                size_t location = m_suballocations[i].allocate(sizeClass);
     273                if (location != notFound) {
     274                    // If this didn't already have a subregion, it does now!
     275                    m_hasSuballocation |= bit;
     276                    // Mirror the suballocation's full bit.
     277                    if (m_suballocations[i].isFull())
     278                        m_full |= bit;
     279                    return (i * subregionSize) | location;
     280                }
     281            }
     282            return notFound;
     283        }
     284
     285        // A block is allocated if either it is fully allocated or contains suballocations.
     286        BitField allocated = m_full | m_hasSuballocation;
     287
     288        size_t alignment = sizeClass.blockAlignment();
     289        size_t count = sizeClass.blockCount();
     290        // Use this mask to check for spans of free blocks.
     291        BitField mask = ((1ull << count) - 1) << (alignment - count);
     292
     293        // Step in units of alignment size.
     294        for (unsigned i = 0; i < entries; i += alignment) {
     295            if (!(allocated & mask)) {
     296                m_full |= mask;
     297                return (i + (alignment - count)) << log2SubregionSize;
     298            }
     299            mask <<= alignment;
     300        }
     301        return notFound;
     302    }
     303
     304    void free(size_t location, AllocationTableSizeClass& sizeClass)
     305    {
     306        ASSERT(sizeClass.blockSize() <= subregionSize);
     307
     308        size_t entry = location >> log2SubregionSize;
     309
     310        if (sizeClass.blockSize() < subregionSize) {
     311            BitField bit = 1ull << entry;
     312            m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
     313            // Check if the suballocation is now empty.
     314            if (m_suballocations[entry].isEmpty())
     315                m_hasSuballocation &= ~bit;
     316            // No need to check, it clearly isn't full any more!
     317            m_full &= ~bit;
     318        } else {
     319            size_t count = sizeClass.blockCount();
     320            BitField mask = ((1ull << count) - 1) << entry;
     321            ASSERT((m_full & mask) == mask);
     322            ASSERT(!(m_hasSuballocation & mask));
     323            m_full &= ~mask;
     324        }
     325    }
     326
     327    bool isEmpty()
     328    {
     329        return !(m_full | m_hasSuballocation);
     330    }
     331
     332    bool isFull()
     333    {   
     334        return !~m_full;
     335    }
     336
     337    static size_t size()
     338    {
     339        return regionSize;
     340    }
     341
     342    static AllocationTableSizeClass classForSize(size_t size)
     343    {
     344        if (size < subregionSize) {
     345            AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
     346            if (sizeClass.size() < NextLevel::size())
     347                return sizeClass;
     348        }
     349        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
     350    }
     351
     352#ifndef NDEBUG
     353    void dump(size_t parentOffset = 0, unsigned indent = 0)
     354    {
     355        for (unsigned i = 0; i < indent; ++i)
     356            fprintf(stderr, "    ");
     357        fprintf(stderr, "%08x: [", (int)parentOffset);
     358        for (unsigned i = 0; i < entries; ++i) {
     359            BitField bit = 1ull << i;
     360            char c = m_hasSuballocation & bit
     361                ? (m_full & bit ? 'N' : 'n')
     362                : (m_full & bit ? 'F' : '-');
     363            fprintf(stderr, "%c", c);
     364        }
     365        fprintf(stderr, "]\n");
     366
     367        for (unsigned i = 0; i < entries; ++i) {
     368            BitField bit = 1ull << i;
     369            size_t offset = parentOffset | (subregionSize * i);
     370            if (m_hasSuballocation & bit)
     371                m_suballocations[i].dump(offset, indent + 1);
     372        }
     373    }
     374#endif
     375
     376private:
     377    NextLevel m_suballocations[entries];
     378    // Subregions exist in one of four states:
     379    // (1) empty (both bits clear)
     380    // (2) fully allocated as a single allocation (m_full set)
     381    // (3) partially allocated through suballocations (m_hasSuballocation set)
     382    // (4) fully allocated through suballocations (both bits set)
     383    BitField m_full;
     384    BitField m_hasSuballocation;
     385};
     386
     387
     388typedef AllocationTableLeaf<6> PageTables256KB;
     389typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
     390typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
     391typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
     392
     393#if CPU(ARM)
     394typedef PageTables16MB FixedVMPoolPageTables;
     395#elif CPU(X86_64) && !OS(LINUX)
     396typedef PageTables1GB FixedVMPoolPageTables;
     397#else
     398typedef PageTables32MB FixedVMPoolPageTables;
     399#endif
     400
    142401
    143402class FixedVMPoolAllocator
    144403{
    145     // The free list is stored in a sorted tree.
    146     typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
    147 
    148     void release(void* position, size_t size)
    149     {
    150         m_allocation.decommit(position, size);
    151         addToCommittedByteCount(-static_cast<long>(size));
    152     }
    153 
    154     void reuse(void* position, size_t size)
    155     {
    156         m_allocation.commit(position, size);
    157         addToCommittedByteCount(static_cast<long>(size));
    158     }
    159 
    160     // All addition to the free list should go through this method, rather than
    161     // calling insert directly, to avoid multiple entries being added with the
    162     // same key.  All nodes being added should be singletons, they should not
    163     // already be a part of a chain.
    164     void addToFreeList(FreeListEntry* entry)
    165     {
    166         ASSERT(!entry->nextEntry);
    167 
    168         if (entry->size == m_commonSize) {
    169             m_commonSizedAllocations.append(entry->pointer);
    170             delete entry;
    171         } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
    172             // m_freeList already contain an entry for this size - insert this node into the chain.
    173             entry->nextEntry = entryInFreeList->nextEntry;
    174             entryInFreeList->nextEntry = entry;
    175         } else
    176             m_freeList.insert(entry);
    177     }
    178 
    179     // We do not attempt to coalesce addition, which may lead to fragmentation;
    180     // instead we periodically perform a sweep to try to coalesce neighboring
    181     // entries in m_freeList.  Presently this is triggered at the point 16MB
    182     // of memory has been released.
    183     void coalesceFreeSpace()
    184     {
    185         Vector<FreeListEntry*> freeListEntries;
    186         SizeSortedFreeTree::Iterator iter;
    187         iter.start_iter_least(m_freeList);
    188 
    189         // Empty m_freeList into a Vector.
    190         for (FreeListEntry* entry; (entry = *iter); ++iter) {
    191             // Each entry in m_freeList might correspond to multiple
    192             // free chunks of memory (of the same size).  Walk the chain
    193             // (this is likely of course only be one entry long!) adding
    194             // each entry to the Vector (at reseting the next in chain
    195             // pointer to separate each node out).
    196             FreeListEntry* next;
    197             do {
    198                 next = entry->nextEntry;
    199                 entry->nextEntry = 0;
    200                 freeListEntries.append(entry);
    201             } while ((entry = next));
    202         }
    203         // All entries are now in the Vector; purge the tree.
    204         m_freeList.purge();
    205 
    206         // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
    207         // We reverse-sort so that we can logically work forwards through memory,
    208         // whilst popping items off the end of the Vectors using last() and removeLast().
    209         qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
    210         qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
    211 
    212         // The entries from m_commonSizedAllocations that cannot be
    213         // coalesced into larger chunks will be temporarily stored here.
    214         Vector<void*> newCommonSizedAllocations;
    215 
    216         // Keep processing so long as entries remain in either of the vectors.
    217         while (freeListEntries.size() || m_commonSizedAllocations.size()) {
    218             // We're going to try to find a FreeListEntry node that we can coalesce onto.
    219             FreeListEntry* coalescionEntry = 0;
    220 
    221             // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
    222             if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
    223                 // Pop an item from the m_commonSizedAllocations vector - this is the lowest
    224                 // addressed free chunk.  Find out the begin and end addresses of the memory chunk.
    225                 void* begin = m_commonSizedAllocations.last();
    226                 void* end = (void*)((intptr_t)begin + m_commonSize);
    227                 m_commonSizedAllocations.removeLast();
    228 
    229                 // Try to find another free chunk abutting onto the end of the one we have already found.
    230                 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
    231                     // There is an existing FreeListEntry for the next chunk of memory!
    232                     // we can reuse this.  Pop it off the end of m_freeList.
    233                     coalescionEntry = freeListEntries.last();
    234                     freeListEntries.removeLast();
    235                     // Update the existing node to include the common-sized chunk that we also found.
    236                     coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
    237                     coalescionEntry->size += m_commonSize;
    238                 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
    239                     // There is a second common-sized chunk that can be coalesced.
    240                     // Allocate a new node.
    241                     m_commonSizedAllocations.removeLast();
    242                     coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
    243                 } else {
    244                     // Nope - this poor little guy is all on his own. :-(
    245                     // Add him into the newCommonSizedAllocations vector for now, we're
    246                     // going to end up adding him back into the m_commonSizedAllocations
    247                     // list when we're done.
    248                     newCommonSizedAllocations.append(begin);
    249                     continue;
    250                 }
    251             } else {
    252                 ASSERT(freeListEntries.size());
    253                 ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
    254                 // The lowest addressed item is from m_freeList; pop it from the Vector.
    255                 coalescionEntry = freeListEntries.last();
    256                 freeListEntries.removeLast();
    257             }
    258            
    259             // Right, we have a FreeListEntry, we just need check if there is anything else
    260             // to coalesce onto the end.
    261             ASSERT(coalescionEntry);
    262             while (true) {
    263                 // Calculate the end address of the chunk we have found so far.
    264                 void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
    265 
    266                 // Is there another chunk adjacent to the one we already have?
    267                 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
    268                     // Yes - another FreeListEntry -pop it from the list.
    269                     FreeListEntry* coalescee = freeListEntries.last();
    270                     freeListEntries.removeLast();
    271                     // Add it's size onto our existing node.
    272                     coalescionEntry->size += coalescee->size;
    273                     delete coalescee;
    274                 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
    275                     // We can coalesce the next common-sized chunk.
    276                     m_commonSizedAllocations.removeLast();
    277                     coalescionEntry->size += m_commonSize;
    278                 } else
    279                     break; // Nope, nothing to be added - stop here.
    280             }
    281 
    282             // We've coalesced everything we can onto the current chunk.
    283             // Add it back into m_freeList.
    284             addToFreeList(coalescionEntry);
    285         }
    286 
    287         // All chunks of free memory larger than m_commonSize should be
    288         // back in m_freeList by now.  All that remains to be done is to
    289         // copy the contents on the newCommonSizedAllocations back into
    290         // the m_commonSizedAllocations Vector.
    291         ASSERT(m_commonSizedAllocations.size() == 0);
    292         m_commonSizedAllocations.append(newCommonSizedAllocations);
    293     }
    294 
    295404public:
    296 
    297     FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
    298         : m_commonSize(commonSize)
    299         , m_countFreedSinceLastCoalesce(0)
    300     {
    301         m_allocation = PageReservation::reserve(totalHeapSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
    302 
    303         if (!!m_allocation)
    304             m_freeList.insert(new FreeListEntry(m_allocation.base(), m_allocation.size()));
     405    FixedVMPoolAllocator()
     406    {
     407        ASSERT(PageTables256KB::size() == 256 * 1024);
     408        ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
     409        ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
     410        ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
     411
     412        m_reservation = PageReservation::reserve(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
    305413#if !ENABLE(INTERPRETER)
    306         else
     414        if (!isValid())
    307415            CRASH();
    308416#endif
    309417    }
    310 
    311     ExecutablePool::Allocation alloc(size_t size)
    312     {
    313         return ExecutablePool::Allocation(allocInternal(size), size);
     418 
     419    ExecutablePool::Allocation alloc(size_t requestedSize)
     420    {
     421        ASSERT(requestedSize);
     422        AllocationTableSizeClass sizeClass = classForSize(requestedSize);
     423        size_t size = sizeClass.size();
     424        ASSERT(size);
     425
     426        if (size >= FixedVMPoolPageTables::size())
     427            CRASH();
     428        if (m_pages.isFull())
     429            CRASH();
     430
     431        size_t offset = m_pages.allocate(sizeClass);
     432        if (offset == notFound)
     433            CRASH();
     434
     435        void* pointer = offsetToPointer(offset);
     436        m_reservation.commit(pointer, size);
     437        return ExecutablePool::Allocation(pointer, size);
    314438    }
    315439
     
    318442        void* pointer = allocation.base();
    319443        size_t size = allocation.size();
    320 
    321         ASSERT(!!m_allocation);
    322         // Call release to report to the operating system that this
    323         // memory is no longer in use, and need not be paged out.
    324         ASSERT(isWithinVMPool(pointer, size));
    325         release(pointer, size);
    326 
    327         // Common-sized allocations are stored in the m_commonSizedAllocations
    328         // vector; all other freed chunks are added to m_freeList.
    329         if (size == m_commonSize)
    330             m_commonSizedAllocations.append(pointer);
    331         else
    332             addToFreeList(new FreeListEntry(pointer, size));
    333 
    334         // Do some housekeeping.  Every time we reach a point that
    335         // 16MB of allocations have been freed, sweep m_freeList
    336         // coalescing any neighboring fragments.
    337         m_countFreedSinceLastCoalesce += size;
    338         if (m_countFreedSinceLastCoalesce >= coalesceLimit) {
    339             m_countFreedSinceLastCoalesce = 0;
    340             coalesceFreeSpace();
    341         }
    342     }
    343 
    344     bool isValid() const { return !!m_allocation; }
     444        ASSERT(size);
     445
     446        m_reservation.decommit(pointer, size);
     447
     448        AllocationTableSizeClass sizeClass = classForSize(size);
     449        ASSERT(sizeClass.size() == size);
     450        m_pages.free(pointerToOffset(pointer), sizeClass);
     451    }
     452
     453    size_t allocated()
     454    {
     455        return m_reservation.committed();
     456    }
     457
     458    bool isValid() const
     459    {
     460        return !!m_reservation;
     461    }
    345462
    346463private:
    347     void* allocInternal(size_t size)
    348     {
    349 #if ENABLE(INTERPRETER)
    350         if (!m_allocation)
    351             return 0;
    352 #else
    353         ASSERT(!!m_allocation);
    354 #endif
    355         void* result;
    356 
    357         // Freed allocations of the common size are not stored back into the main
    358         // m_freeList, but are instead stored in a separate vector.  If the request
    359         // is for a common sized allocation, check this list.
    360         if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
    361             result = m_commonSizedAllocations.last();
    362             m_commonSizedAllocations.removeLast();
    363         } else {
    364             // Search m_freeList for a suitable sized chunk to allocate memory from.
    365             FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
    366 
    367             // This would be bad news.
    368             if (!entry) {
    369                 // Errk!  Lets take a last-ditch desperation attempt at defragmentation...
    370                 coalesceFreeSpace();
    371                 // Did that free up a large enough chunk?
    372                 entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
    373                 // No?...  *BOOM!*
    374                 if (!entry)
    375                     CRASH();
    376             }
    377             ASSERT(entry->size != m_commonSize);
    378 
    379             // Remove the entry from m_freeList.  But! -
    380             // Each entry in the tree may represent a chain of multiple chunks of the
    381             // same size, and we only want to remove one on them.  So, if this entry
    382             // does have a chain, just remove the first-but-one item from the chain.
    383             if (FreeListEntry* next = entry->nextEntry) {
    384                 // We're going to leave 'entry' in the tree; remove 'next' from its chain.
    385                 entry->nextEntry = next->nextEntry;
    386                 next->nextEntry = 0;
    387                 entry = next;
    388             } else
    389                 m_freeList.remove(entry->size);
    390 
    391             // Whoo!, we have a result!
    392             ASSERT(entry->size >= size);
    393             result = entry->pointer;
    394 
    395             // If the allocation exactly fits the chunk we found in the,
    396             // m_freeList then the FreeListEntry node is no longer needed.
    397             if (entry->size == size)
    398                 delete entry;
    399             else {
    400                 // There is memory left over, and it is not of the common size.
    401                 // We can reuse the existing FreeListEntry node to add this back
    402                 // into m_freeList.
    403                 entry->pointer = (void*)((intptr_t)entry->pointer + size);
    404                 entry->size -= size;
    405                 addToFreeList(entry);
    406             }
    407         }
    408 
    409         // Call reuse to report to the operating system that this memory is in use.
    410         ASSERT(isWithinVMPool(result, size));
    411         reuse(result, size);
    412         return result;
    413     }
    414 
    415 #ifndef NDEBUG
    416     bool isWithinVMPool(void* pointer, size_t size)
    417     {
    418         return pointer >= m_allocation.base() && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_allocation.base()) + m_allocation.size());
    419     }
    420 #endif
    421 
    422     void addToCommittedByteCount(long byteCount)
    423     {
    424         ASSERT(spinlock.IsHeld());
    425         ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
    426         committedBytesCount += byteCount;
    427     }
    428 
    429     // Freed space from the most common sized allocations will be held in this list, ...
    430     const size_t m_commonSize;
    431     Vector<void*> m_commonSizedAllocations;
    432 
    433     // ... and all other freed allocations are held in m_freeList.
    434     SizeSortedFreeTree m_freeList;
    435 
    436     // This is used for housekeeping, to trigger defragmentation of the freed lists.
    437     size_t m_countFreedSinceLastCoalesce;
    438 
    439     PageReservation m_allocation;
     464    AllocationTableSizeClass classForSize(size_t size)
     465    {
     466        return FixedVMPoolPageTables::classForSize(size);
     467    }
     468
     469    void* offsetToPointer(size_t offset)
     470    {
     471        return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
     472    }
     473
     474    size_t pointerToOffset(void* pointer)
     475    {
     476        return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
     477    }
     478
     479    PageReservation m_reservation;
     480    FixedVMPoolPageTables m_pages;
    440481};
    441482
     483
     484static SpinLock spinlock = SPINLOCK_INITIALIZER;
     485static FixedVMPoolAllocator* allocator = 0;
     486
     487
    442488size_t ExecutableAllocator::committedByteCount()
    443489{
    444490    SpinLockHolder lockHolder(&spinlock);
    445     return committedBytesCount;
     491    return allocator ? allocator->allocated() : 0;
    446492}   
    447493
     
    451497}
    452498
    453 static FixedVMPoolAllocator* allocator = 0;
    454 static size_t allocatedCount = 0;
    455 
    456 #if OS(LINUX)
    457 static void maybeModifyVMPoolSize()
    458 {
    459     FILE* fp = fopen("/proc/sys/vm/overcommit_memory", "r");
    460     if (!fp)
    461         return;
    462 
    463     unsigned overcommit = 0;
    464     if (fscanf(fp, "%u", &overcommit) == 1) {
    465         if (overcommit == 1) {
    466             vmPoolSize = vmPoolSizeOvercommit;
    467             coalesceLimit = coalesceLimitOvercommit;
    468         }
    469     }
    470 
    471     fclose(fp);
    472 }
    473 #endif
    474 
    475499bool ExecutableAllocator::isValid() const
    476500{
    477501    SpinLockHolder lock_holder(&spinlock);
    478     if (!allocator) {
    479 #if OS(LINUX)
    480         maybeModifyVMPoolSize();
    481 #endif
    482         allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, vmPoolSize);
    483     }
     502    if (!allocator)
     503        allocator = new FixedVMPoolAllocator();
    484504    return allocator->isValid();
    485505}
     
    489509    // Technically we should take the spin lock here, but we don't care if we get stale data.
    490510    // This is only really a heuristic anyway.
    491     return allocatedCount > (vmPoolSize / 2);
     511    return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
    492512}
    493513
     
    496516    SpinLockHolder lock_holder(&spinlock);
    497517    ASSERT(allocator);
    498     allocatedCount += size;
    499518    return allocator->alloc(size);
    500519}
     
    504523    SpinLockHolder lock_holder(&spinlock);
    505524    ASSERT(allocator);
    506     allocatedCount -= allocation.size();
    507525    allocator->free(allocation);
    508526}
  • trunk/Source/JavaScriptCore/wtf/PageReservation.h

    r76409 r77025  
    5858public:
    5959    PageReservation()
    60         : m_writable(false)
     60        : m_committed(0)
     61        , m_writable(false)
    6162        , m_executable(false)
    62 #ifndef NDEBUG
    63         , m_committed(0)
    64 #endif
    6563    {
    6664    }
     
    8482        ASSERT(contains(start, size));
    8583
    86 #ifndef NDEBUG
    8784        m_committed += size;
    88 #endif
    8985        OSAllocator::commit(start, size, m_writable, m_executable);
    9086    }
     
    9793        ASSERT(contains(start, size));
    9894
    99 #ifndef NDEBUG
    10095        m_committed -= size;
    101 #endif
    10296        OSAllocator::decommit(start, size);
     97    }
     98
     99    size_t committed()
     100    {
     101        return m_committed;
    103102    }
    104103
     
    127126    PageReservation(void* base, size_t size, bool writable, bool executable)
    128127        : PageBlock(base, size)
     128        , m_committed(0)
    129129        , m_writable(writable)
    130130        , m_executable(executable)
    131 #ifndef NDEBUG
    132         , m_committed(0)
    133 #endif
    134131    {
    135132    }
    136133
     134    size_t m_committed;
    137135    bool m_writable;
    138136    bool m_executable;
    139 #ifndef NDEBUG
    140     size_t m_committed;
    141 #endif
    142137};
    143138
Note: See TracChangeset for help on using the changeset viewer.