00001 // Copyright 2006-2008 the V8 project authors. All rights reserved. 00002 // Redistribution and use in source and binary forms, with or without 00003 // modification, are permitted provided that the following conditions are 00004 // met: 00005 // 00006 // * Redistributions of source code must retain the above copyright 00007 // notice, this list of conditions and the following disclaimer. 00008 // * Redistributions in binary form must reproduce the above 00009 // copyright notice, this list of conditions and the following 00010 // disclaimer in the documentation and/or other materials provided 00011 // with the distribution. 00012 // * Neither the name of Google Inc. nor the names of its 00013 // contributors may be used to endorse or promote products derived 00014 // from this software without specific prior written permission. 00015 // 00016 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 00017 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 00018 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 00019 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 00020 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00021 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 00022 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 00023 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 00024 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 00025 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 00026 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00027 00028 #ifndef V8_SPACES_H_ 00029 #define V8_SPACES_H_ 00030 00031 #include "list-inl.h" 00032 #include "log.h" 00033 00034 namespace v8 { namespace internal { 00035 00036 // ----------------------------------------------------------------------------- 00037 // Heap structures: 00038 // 00039 // A JS heap consists of a young generation, an old generation, and a large 00040 // object space. The young generation is divided into two semispaces. A 00041 // scavenger implements Cheney's copying algorithm. The old generation is 00042 // separated into a map space and an old object space. The map space contains 00043 // all (and only) map objects, the rest of old objects go into the old space. 00044 // The old generation is collected by a mark-sweep-compact collector. 00045 // 00046 // The semispaces of the young generation are contiguous. The old and map 00047 // spaces consists of a list of pages. A page has a page header, a remembered 00048 // set area, and an object area. A page size is deliberately chosen as 8K 00049 // bytes. The first word of a page is an opaque page header that has the 00050 // address of the next page and its ownership information. The second word may 00051 // have the allocation top address of this page. The next 248 bytes are 00052 // remembered sets. Heap objects are aligned to the pointer size (4 bytes). A 00053 // remembered set bit corresponds to a pointer in the object area. 00054 // 00055 // There is a separate large object space for objects larger than 00056 // Page::kMaxHeapObjectSize, so that they do not have to move during 00057 // collection. The large object space is paged and uses the same remembered 00058 // set implementation. Pages in large object space may be larger than 8K. 00059 // 00060 // NOTE: The mark-compact collector rebuilds the remembered set after a 00061 // collection. It reuses first a few words of the remembered set for 00062 // bookkeeping relocation information. 00063 00064 00065 // Some assertion macros used in the debugging mode. 00066 00067 #define ASSERT_PAGE_ALIGNED(address) \ 00068 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) 00069 00070 #define ASSERT_OBJECT_ALIGNED(address) \ 00071 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) 00072 00073 #define ASSERT_OBJECT_SIZE(size) \ 00074 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) 00075 00076 #define ASSERT_PAGE_OFFSET(offset) \ 00077 ASSERT((Page::kObjectStartOffset <= offset) \ 00078 && (offset <= Page::kPageSize)) 00079 00080 #define ASSERT_MAP_PAGE_INDEX(index) \ 00081 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) 00082 00083 00084 class PagedSpace; 00085 class MemoryAllocator; 00086 class AllocationInfo; 00087 00088 // ----------------------------------------------------------------------------- 00089 // A page normally has 8K bytes. Large object pages may be larger. A page 00090 // address is always aligned to the 8K page size. A page is divided into 00091 // three areas: the first two words are used for bookkeeping, the next 248 00092 // bytes are used as remembered set, and the rest of the page is the object 00093 // area. 00094 // 00095 // Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed 00096 // for a pointer in the remembered set. Given an address, its remembered set 00097 // bit position (offset from the start of the page) is calculated by dividing 00098 // its page offset by 32. Therefore, the object area in a page starts at the 00099 // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that 00100 // the first two words (64 bits) in a page can be used for other purposes. 00101 // 00102 // The mark-compact collector transforms a map pointer into a page index and a 00103 // page offset. The map space can have up to 1024 pages, and 8M bytes (1024 * 00104 // 8K) in total. Because a map pointer is aligned to the pointer size (4 00105 // bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the 00106 // page index + 11 for the offset in the page) are required to encode a map 00107 // pointer. 00108 // 00109 // The only way to get a page pointer is by calling factory methods: 00110 // Page* p = Page::FromAddress(addr); or 00111 // Page* p = Page::FromAllocationTop(top); 00112 class Page { 00113 public: 00114 // Returns the page containing a given address. The address ranges 00115 // from [page_addr .. page_addr + kPageSize[ 00116 // 00117 // Note that this function only works for addresses in normal paged 00118 // spaces and addresses in the first 8K of large object pages (ie, 00119 // the start of large objects but not necessarily derived pointers 00120 // within them). 00121 INLINE(static Page* FromAddress(Address a)) { 00122 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); 00123 } 00124 00125 // Returns the page containing an allocation top. Because an allocation 00126 // top address can be the upper bound of the page, we need to subtract 00127 // it with kPointerSize first. The address ranges from 00128 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. 00129 INLINE(static Page* FromAllocationTop(Address top)) { 00130 Page* p = FromAddress(top - kPointerSize); 00131 ASSERT_PAGE_OFFSET(p->Offset(top)); 00132 return p; 00133 } 00134 00135 // Returns the start address of this page. 00136 Address address() { return reinterpret_cast<Address>(this); } 00137 00138 // Checks whether this is a valid page address. 00139 bool is_valid() { return address() != NULL; } 00140 00141 // Returns the next page of this page. 00142 inline Page* next_page(); 00143 00144 // Return the end of allocation in this page. Undefined for unused pages. 00145 inline Address AllocationTop(); 00146 00147 // Returns the start address of the object area in this page. 00148 Address ObjectAreaStart() { return address() + kObjectStartOffset; } 00149 00150 // Returns the end address (exclusive) of the object area in this page. 00151 Address ObjectAreaEnd() { return address() + Page::kPageSize; } 00152 00153 // Returns the start address of the remembered set area. 00154 Address RSetStart() { return address() + kRSetStartOffset; } 00155 00156 // Returns the end address of the remembered set area (exclusive). 00157 Address RSetEnd() { return address() + kRSetEndOffset; } 00158 00159 // Checks whether an address is page aligned. 00160 static bool IsAlignedToPageSize(Address a) { 00161 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 00162 } 00163 00164 // True if this page is a large object page. 00165 bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; } 00166 00167 // Returns the offset of a given address to this page. 00168 INLINE(int Offset(Address a)) { 00169 int offset = a - address(); 00170 ASSERT_PAGE_OFFSET(offset); 00171 return offset; 00172 } 00173 00174 // Returns the address for a given offset to the this page. 00175 Address OffsetToAddress(int offset) { 00176 ASSERT_PAGE_OFFSET(offset); 00177 return address() + offset; 00178 } 00179 00180 // --------------------------------------------------------------------- 00181 // Remembered set support 00182 00183 // Clears remembered set in this page. 00184 inline void ClearRSet(); 00185 00186 // Return the address of the remembered set word corresponding to an 00187 // object address/offset pair, and the bit encoded as a single-bit 00188 // mask in the output parameter 'bitmask'. 00189 INLINE(static Address ComputeRSetBitPosition(Address address, int offset, 00190 uint32_t* bitmask)); 00191 00192 // Sets the corresponding remembered set bit for a given address. 00193 INLINE(static void SetRSet(Address address, int offset)); 00194 00195 // Clears the corresponding remembered set bit for a given address. 00196 static inline void UnsetRSet(Address address, int offset); 00197 00198 // Checks whether the remembered set bit for a given address is set. 00199 static inline bool IsRSetSet(Address address, int offset); 00200 00201 #ifdef DEBUG 00202 // Use a state to mark whether remembered set space can be used for other 00203 // purposes. 00204 enum RSetState { IN_USE, NOT_IN_USE }; 00205 static bool is_rset_in_use() { return rset_state_ == IN_USE; } 00206 static void set_rset_state(RSetState state) { rset_state_ = state; } 00207 #endif 00208 00209 // 8K bytes per page. 00210 static const int kPageSizeBits = 13; 00211 00212 // Page size in bytes. This must be a multiple of the OS page size. 00213 static const int kPageSize = 1 << kPageSizeBits; 00214 00215 // Page size mask. 00216 static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1; 00217 00218 // The end offset of the remembered set in a page 00219 // (heaps are aligned to pointer size). 00220 static const int kRSetEndOffset= kPageSize / kBitsPerPointer; 00221 00222 // The start offset of the remembered set in a page. 00223 static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer; 00224 00225 // The start offset of the object area in a page. 00226 static const int kObjectStartOffset = kRSetEndOffset; 00227 00228 // Object area size in bytes. 00229 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; 00230 00231 // Maximum object size that fits in a page. 00232 static const int kMaxHeapObjectSize = kObjectAreaSize; 00233 00234 //--------------------------------------------------------------------------- 00235 // Page header description. 00236 // 00237 // If a page is not in the large object space, the first word, 00238 // opaque_header, encodes the next page address (aligned to kPageSize 8K) 00239 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use 00240 // opaque_header. The value range of the opaque_header is [0..kPageSize[, 00241 // or [next_page_start, next_page_end[. It cannot point to a valid address 00242 // in the current page. If a page is in the large object space, the first 00243 // word *may* (if the page start and large object chunk start are the 00244 // same) contain the address of the next large object chunk. 00245 int opaque_header; 00246 00247 // If the page is not in the large object space, the low-order bit of the 00248 // second word is set. If the page is in the large object space, the 00249 // second word *may* (if the page start and large object chunk start are 00250 // the same) contain the large object chunk size. In either case, the 00251 // low-order bit for large object pages will be cleared. 00252 int is_normal_page; 00253 00254 // The following fields overlap with remembered set, they can only 00255 // be used in the mark-compact collector when remembered set is not 00256 // used. 00257 00258 // The allocation pointer after relocating objects to this page. 00259 Address mc_relocation_top; 00260 00261 // The index of the page in its owner space. 00262 int mc_page_index; 00263 00264 // The forwarding address of the first live object in this page. 00265 Address mc_first_forwarded; 00266 00267 #ifdef DEBUG 00268 private: 00269 static RSetState rset_state_; // state of the remembered set 00270 #endif 00271 }; 00272 00273 00274 // ---------------------------------------------------------------------------- 00275 // Space is the abstract superclass for all allocation spaces. 00276 class Space : public Malloced { 00277 public: 00278 Space(AllocationSpace id, Executability executable) 00279 : id_(id), executable_(executable) {} 00280 virtual ~Space() {} 00281 // Does the space need executable memory? 00282 Executability executable() { return executable_; } 00283 // Identity used in error reporting. 00284 AllocationSpace identity() { return id_; } 00285 virtual int Size() = 0; 00286 #ifdef DEBUG 00287 virtual void Verify() = 0; 00288 virtual void Print() = 0; 00289 #endif 00290 private: 00291 AllocationSpace id_; 00292 Executability executable_; 00293 }; 00294 00295 00296 // ---------------------------------------------------------------------------- 00297 // A space acquires chunks of memory from the operating system. The memory 00298 // allocator manages chunks for the paged heap spaces (old space and map 00299 // space). A paged chunk consists of pages. Pages in a chunk have contiguous 00300 // addresses and are linked as a list. 00301 // 00302 // The allocator keeps an initial chunk which is used for the new space. The 00303 // leftover regions of the initial chunk are used for the initial chunks of 00304 // old space and map space if they are big enough to hold at least one page. 00305 // The allocator assumes that there is one old space and one map space, each 00306 // expands the space by allocating kPagesPerChunk pages except the last 00307 // expansion (before running out of space). The first chunk may contain fewer 00308 // than kPagesPerChunk pages as well. 00309 // 00310 // The memory allocator also allocates chunks for the large object space, but 00311 // they are managed by the space itself. The new space does not expand. 00312 00313 class MemoryAllocator : public AllStatic { 00314 public: 00315 // Initializes its internal bookkeeping structures. 00316 // Max capacity of the total space. 00317 static bool Setup(int max_capacity); 00318 00319 // Deletes valid chunks. 00320 static void TearDown(); 00321 00322 // Reserves an initial address range of virtual memory to be split between 00323 // the two new space semispaces, the old space, and the map space. The 00324 // memory is not yet committed or assigned to spaces and split into pages. 00325 // The initial chunk is unmapped when the memory allocator is torn down. 00326 // This function should only be called when there is not already a reserved 00327 // initial chunk (initial_chunk_ should be NULL). It returns the start 00328 // address of the initial chunk if successful, with the side effect of 00329 // setting the initial chunk, or else NULL if unsuccessful and leaves the 00330 // initial chunk NULL. 00331 static void* ReserveInitialChunk(const size_t requested); 00332 00333 // Commits pages from an as-yet-unmanaged block of virtual memory into a 00334 // paged space. The block should be part of the initial chunk reserved via 00335 // a call to ReserveInitialChunk. The number of pages is always returned in 00336 // the output parameter num_pages. This function assumes that the start 00337 // address is non-null and that it is big enough to hold at least one 00338 // page-aligned page. The call always succeeds, and num_pages is always 00339 // greater than zero. 00340 static Page* CommitPages(Address start, size_t size, PagedSpace* owner, 00341 int* num_pages); 00342 00343 // Commit a contiguous block of memory from the initial chunk. Assumes that 00344 // the address is not NULL, the size is greater than zero, and that the 00345 // block is contained in the initial chunk. Returns true if it succeeded 00346 // and false otherwise. 00347 static bool CommitBlock(Address start, size_t size, Executability executable); 00348 00349 // Attempts to allocate the requested (non-zero) number of pages from the 00350 // OS. Fewer pages might be allocated than requested. If it fails to 00351 // allocate memory for the OS or cannot allocate a single page, this 00352 // function returns an invalid page pointer (NULL). The caller must check 00353 // whether the returned page is valid (by calling Page::is_valid()). It is 00354 // guaranteed that allocated pages have contiguous addresses. The actual 00355 // number of allocated page is returned in the output parameter 00356 // allocated_pages. 00357 static Page* AllocatePages(int requested_pages, int* allocated_pages, 00358 PagedSpace* owner); 00359 00360 // Frees pages from a given page and after. If 'p' is the first page 00361 // of a chunk, pages from 'p' are freed and this function returns an 00362 // invalid page pointer. Otherwise, the function searches a page 00363 // after 'p' that is the first page of a chunk. Pages after the 00364 // found page are freed and the function returns 'p'. 00365 static Page* FreePages(Page* p); 00366 00367 // Allocates and frees raw memory of certain size. 00368 // These are just thin wrappers around OS::Allocate and OS::Free, 00369 // but keep track of allocated bytes as part of heap. 00370 static void* AllocateRawMemory(const size_t requested, 00371 size_t* allocated, 00372 Executability executable); 00373 static void FreeRawMemory(void* buf, size_t length); 00374 00375 // Returns the maximum available bytes of heaps. 00376 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 00377 00378 // Returns maximum available bytes that the old space can have. 00379 static int MaxAvailable() { 00380 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 00381 } 00382 00383 // Links two pages. 00384 static inline void SetNextPage(Page* prev, Page* next); 00385 00386 // Returns the next page of a given page. 00387 static inline Page* GetNextPage(Page* p); 00388 00389 // Checks whether a page belongs to a space. 00390 static inline bool IsPageInSpace(Page* p, PagedSpace* space); 00391 00392 // Returns the space that owns the given page. 00393 static inline PagedSpace* PageOwner(Page* page); 00394 00395 // Finds the first/last page in the same chunk as a given page. 00396 static Page* FindFirstPageInSameChunk(Page* p); 00397 static Page* FindLastPageInSameChunk(Page* p); 00398 00399 #ifdef DEBUG 00400 // Reports statistic info of the space. 00401 static void ReportStatistics(); 00402 #endif 00403 00404 // Due to encoding limitation, we can only have 8K chunks. 00405 static const int kMaxNofChunks = 1 << Page::kPageSizeBits; 00406 // If a chunk has at least 32 pages, the maximum heap size is about 00407 // 8 * 1024 * 32 * 8K = 2G bytes. 00408 static const int kPagesPerChunk = 64; 00409 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; 00410 00411 private: 00412 // Maximum space size in bytes. 00413 static int capacity_; 00414 00415 // Allocated space size in bytes. 00416 static int size_; 00417 00418 // The initial chunk of virtual memory. 00419 static VirtualMemory* initial_chunk_; 00420 00421 // Allocated chunk info: chunk start address, chunk size, and owning space. 00422 class ChunkInfo BASE_EMBEDDED { 00423 public: 00424 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {} 00425 void init(Address a, size_t s, PagedSpace* o) { 00426 address_ = a; 00427 size_ = s; 00428 owner_ = o; 00429 } 00430 Address address() { return address_; } 00431 size_t size() { return size_; } 00432 PagedSpace* owner() { return owner_; } 00433 00434 private: 00435 Address address_; 00436 size_t size_; 00437 PagedSpace* owner_; 00438 }; 00439 00440 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. 00441 static List<ChunkInfo> chunks_; 00442 static List<int> free_chunk_ids_; 00443 static int max_nof_chunks_; 00444 static int top_; 00445 00446 // Push/pop a free chunk id onto/from the stack. 00447 static void Push(int free_chunk_id); 00448 static int Pop(); 00449 static bool OutOfChunkIds() { return top_ == 0; } 00450 00451 // Frees a chunk. 00452 static void DeleteChunk(int chunk_id); 00453 00454 // Basic check whether a chunk id is in the valid range. 00455 static inline bool IsValidChunkId(int chunk_id); 00456 00457 // Checks whether a chunk id identifies an allocated chunk. 00458 static inline bool IsValidChunk(int chunk_id); 00459 00460 // Returns the chunk id that a page belongs to. 00461 static inline int GetChunkId(Page* p); 00462 00463 // Initializes pages in a chunk. Returns the first page address. 00464 // This function and GetChunkId() are provided for the mark-compact 00465 // collector to rebuild page headers in the from space, which is 00466 // used as a marking stack and its page headers are destroyed. 00467 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, 00468 PagedSpace* owner); 00469 }; 00470 00471 00472 // ----------------------------------------------------------------------------- 00473 // Interface for heap object iterator to be implemented by all object space 00474 // object iterators. 00475 // 00476 // NOTE: The space specific object iterators also implements the own has_next() 00477 // and next() methods which are used to avoid using virtual functions 00478 // iterating a specific space. 00479 00480 class ObjectIterator : public Malloced { 00481 public: 00482 virtual ~ObjectIterator() { } 00483 00484 virtual bool has_next_object() = 0; 00485 virtual HeapObject* next_object() = 0; 00486 }; 00487 00488 00489 // ----------------------------------------------------------------------------- 00490 // Heap object iterator in new/old/map spaces. 00491 // 00492 // A HeapObjectIterator iterates objects from a given address to the 00493 // top of a space. The given address must be below the current 00494 // allocation pointer (space top). If the space top changes during 00495 // iteration (because of allocating new objects), the iterator does 00496 // not iterate new objects. The caller function must create a new 00497 // iterator starting from the old top in order to visit these new 00498 // objects. Heap::Scavenage() is such an example. 00499 00500 class HeapObjectIterator: public ObjectIterator { 00501 public: 00502 // Creates a new object iterator in a given space. If a start 00503 // address is not given, the iterator starts from the space bottom. 00504 // If the size function is not given, the iterator calls the default 00505 // Object::Size(). 00506 explicit HeapObjectIterator(PagedSpace* space); 00507 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); 00508 HeapObjectIterator(PagedSpace* space, Address start); 00509 HeapObjectIterator(PagedSpace* space, 00510 Address start, 00511 HeapObjectCallback size_func); 00512 00513 inline bool has_next(); 00514 inline HeapObject* next(); 00515 00516 // implementation of ObjectIterator. 00517 virtual bool has_next_object() { return has_next(); } 00518 virtual HeapObject* next_object() { return next(); } 00519 00520 private: 00521 Address cur_addr_; // current iteration point 00522 Address end_addr_; // end iteration point 00523 Address cur_limit_; // current page limit 00524 HeapObjectCallback size_func_; // size function 00525 Page* end_page_; // caches the page of the end address 00526 00527 // Slow path of has_next, checks whether there are more objects in 00528 // the next page. 00529 bool HasNextInNextPage(); 00530 00531 // Initializes fields. 00532 void Initialize(Address start, Address end, HeapObjectCallback size_func); 00533 00534 #ifdef DEBUG 00535 // Verifies whether fields have valid values. 00536 void Verify(); 00537 #endif 00538 }; 00539 00540 00541 // ----------------------------------------------------------------------------- 00542 // A PageIterator iterates pages in a space. 00543 // 00544 // The PageIterator class provides three modes for iterating pages in a space: 00545 // PAGES_IN_USE iterates pages that are in use by the allocator; 00546 // PAGES_USED_BY_GC iterates pages that hold relocated objects during a 00547 // mark-compact collection; 00548 // ALL_PAGES iterates all pages in the space. 00549 00550 class PageIterator BASE_EMBEDDED { 00551 public: 00552 enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES}; 00553 00554 PageIterator(PagedSpace* space, Mode mode); 00555 00556 inline bool has_next(); 00557 inline Page* next(); 00558 00559 private: 00560 Page* cur_page_; // next page to return 00561 Page* stop_page_; // page where to stop 00562 }; 00563 00564 00565 // ----------------------------------------------------------------------------- 00566 // A space has a list of pages. The next page can be accessed via 00567 // Page::next_page() call. The next page of the last page is an 00568 // invalid page pointer. A space can expand and shrink dynamically. 00569 00570 // An abstraction of allocation and relocation pointers in a page-structured 00571 // space. 00572 class AllocationInfo { 00573 public: 00574 Address top; // current allocation top 00575 Address limit; // current allocation limit 00576 00577 #ifdef DEBUG 00578 bool VerifyPagedAllocation() { 00579 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) 00580 && (top <= limit); 00581 } 00582 #endif 00583 }; 00584 00585 00586 // An abstraction of the accounting statistics of a page-structured space. 00587 // The 'capacity' of a space is the number of object-area bytes (ie, not 00588 // including page bookkeeping structures) currently in the space. The 'size' 00589 // of a space is the number of allocated bytes, the 'waste' in the space is 00590 // the number of bytes that are not allocated and not available to 00591 // allocation without reorganizing the space via a GC (eg, small blocks due 00592 // to internal fragmentation, top of page areas in map space), and the bytes 00593 // 'available' is the number of unallocated bytes that are not waste. The 00594 // capacity is the sum of size, waste, and available. 00595 // 00596 // The stats are only set by functions that ensure they stay balanced. These 00597 // functions increase or decrease one of the non-capacity stats in 00598 // conjunction with capacity, or else they always balance increases and 00599 // decreases to the non-capacity stats. 00600 class AllocationStats BASE_EMBEDDED { 00601 public: 00602 AllocationStats() { Clear(); } 00603 00604 // Zero out all the allocation statistics (ie, no capacity). 00605 void Clear() { 00606 capacity_ = 0; 00607 available_ = 0; 00608 size_ = 0; 00609 waste_ = 0; 00610 } 00611 00612 // Reset the allocation statistics (ie, available = capacity with no 00613 // wasted or allocated bytes). 00614 void Reset() { 00615 available_ = capacity_; 00616 size_ = 0; 00617 waste_ = 0; 00618 } 00619 00620 // Accessors for the allocation statistics. 00621 int Capacity() { return capacity_; } 00622 int Available() { return available_; } 00623 int Size() { return size_; } 00624 int Waste() { return waste_; } 00625 00626 // Grow the space by adding available bytes. 00627 void ExpandSpace(int size_in_bytes) { 00628 capacity_ += size_in_bytes; 00629 available_ += size_in_bytes; 00630 } 00631 00632 // Shrink the space by removing available bytes. 00633 void ShrinkSpace(int size_in_bytes) { 00634 capacity_ -= size_in_bytes; 00635 available_ -= size_in_bytes; 00636 } 00637 00638 // Allocate from available bytes (available -> size). 00639 void AllocateBytes(int size_in_bytes) { 00640 available_ -= size_in_bytes; 00641 size_ += size_in_bytes; 00642 } 00643 00644 // Free allocated bytes, making them available (size -> available). 00645 void DeallocateBytes(int size_in_bytes) { 00646 size_ -= size_in_bytes; 00647 available_ += size_in_bytes; 00648 } 00649 00650 // Waste free bytes (available -> waste). 00651 void WasteBytes(int size_in_bytes) { 00652 available_ -= size_in_bytes; 00653 waste_ += size_in_bytes; 00654 } 00655 00656 // Consider the wasted bytes to be allocated, as they contain filler 00657 // objects (waste -> size). 00658 void FillWastedBytes(int size_in_bytes) { 00659 waste_ -= size_in_bytes; 00660 size_ += size_in_bytes; 00661 } 00662 00663 private: 00664 int capacity_; 00665 int available_; 00666 int size_; 00667 int waste_; 00668 }; 00669 00670 00671 class PagedSpace : public Space { 00672 friend class PageIterator; 00673 public: 00674 // Creates a space with a maximum capacity, and an id. 00675 PagedSpace(int max_capacity, AllocationSpace id, Executability executable); 00676 00677 virtual ~PagedSpace() {} 00678 00679 // Set up the space using the given address range of virtual memory (from 00680 // the memory allocator's initial chunk) if possible. If the block of 00681 // addresses is not big enough to contain a single page-aligned page, a 00682 // fresh chunk will be allocated. 00683 bool Setup(Address start, size_t size); 00684 00685 // Returns true if the space has been successfully set up and not 00686 // subsequently torn down. 00687 bool HasBeenSetup(); 00688 00689 // Cleans up the space, frees all pages in this space except those belonging 00690 // to the initial chunk, uncommits addresses in the initial chunk. 00691 void TearDown(); 00692 00693 // Checks whether an object/address is in this space. 00694 inline bool Contains(Address a); 00695 bool Contains(HeapObject* o) { return Contains(o->address()); } 00696 00697 // Given an address occupied by a live object, return that object if it is 00698 // in this space, or Failure::Exception() if it is not. The implementation 00699 // iterates over objects in the page containing the address, the cost is 00700 // linear in the number of objects in the page. It may be slow. 00701 Object* FindObject(Address addr); 00702 00703 // Checks whether page is currently in use by this space. 00704 bool IsUsed(Page* page); 00705 00706 // Clears remembered sets of pages in this space. 00707 void ClearRSet(); 00708 00709 // Prepares for a mark-compact GC. 00710 virtual void PrepareForMarkCompact(bool will_compact) = 0; 00711 00712 virtual Address PageAllocationTop(Page* page) = 0; 00713 00714 // Current capacity without growing (Size() + Available() + Waste()). 00715 int Capacity() { return accounting_stats_.Capacity(); } 00716 00717 // Available bytes without growing. 00718 int Available() { return accounting_stats_.Available(); } 00719 00720 // Allocated bytes in this space. 00721 virtual int Size() { return accounting_stats_.Size(); } 00722 00723 // Wasted bytes due to fragmentation and not recoverable until the 00724 // next GC of this space. 00725 int Waste() { return accounting_stats_.Waste(); } 00726 00727 // Returns the address of the first object in this space. 00728 Address bottom() { return first_page_->ObjectAreaStart(); } 00729 00730 // Returns the allocation pointer in this space. 00731 Address top() { return allocation_info_.top; } 00732 00733 // Allocate the requested number of bytes in the space if possible, return a 00734 // failure object if not. 00735 inline Object* AllocateRaw(int size_in_bytes); 00736 00737 // Allocate the requested number of bytes for relocation during mark-compact 00738 // collection. 00739 inline Object* MCAllocateRaw(int size_in_bytes); 00740 00741 00742 // --------------------------------------------------------------------------- 00743 // Mark-compact collection support functions 00744 00745 // Set the relocation point to the beginning of the space. 00746 void MCResetRelocationInfo(); 00747 00748 // Writes relocation info to the top page. 00749 void MCWriteRelocationInfoToPage() { 00750 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top; 00751 } 00752 00753 // Computes the offset of a given address in this space to the beginning 00754 // of the space. 00755 int MCSpaceOffsetForAddress(Address addr); 00756 00757 // Updates the allocation pointer to the relocation top after a mark-compact 00758 // collection. 00759 virtual void MCCommitRelocationInfo() = 0; 00760 00761 // Releases half of unused pages. 00762 void Shrink(); 00763 00764 // Ensures that the capacity is at least 'capacity'. Returns false on failure. 00765 bool EnsureCapacity(int capacity); 00766 00767 #ifdef DEBUG 00768 // Print meta info and objects in this space. 00769 virtual void Print(); 00770 00771 // Report code object related statistics 00772 void CollectCodeStatistics(); 00773 static void ReportCodeStatistics(); 00774 static void ResetCodeStatistics(); 00775 #endif 00776 00777 protected: 00778 // Maximum capacity of this space. 00779 int max_capacity_; 00780 00781 // Accounting information for this space. 00782 AllocationStats accounting_stats_; 00783 00784 // The first page in this space. 00785 Page* first_page_; 00786 00787 // Normal allocation information. 00788 AllocationInfo allocation_info_; 00789 00790 // Relocation information during mark-compact collections. 00791 AllocationInfo mc_forwarding_info_; 00792 00793 // Sets allocation pointer to a page bottom. 00794 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); 00795 00796 // Returns the top page specified by an allocation info structure. 00797 static Page* TopPageOf(AllocationInfo alloc_info) { 00798 return Page::FromAllocationTop(alloc_info.limit); 00799 } 00800 00801 // Expands the space by allocating a fixed number of pages. Returns false if 00802 // it cannot allocate requested number of pages from OS. Newly allocated 00803 // pages are appened to the last_page; 00804 bool Expand(Page* last_page); 00805 00806 // Generic fast case allocation function that tries linear allocation in 00807 // the top page of 'alloc_info'. Returns NULL on failure. 00808 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, 00809 int size_in_bytes); 00810 00811 // During normal allocation or deserialization, roll to the next page in 00812 // the space (there is assumed to be one) and allocate there. This 00813 // function is space-dependent. 00814 virtual HeapObject* AllocateInNextPage(Page* current_page, 00815 int size_in_bytes) = 0; 00816 00817 // Slow path of AllocateRaw. This function is space-dependent. 00818 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; 00819 00820 // Slow path of MCAllocateRaw. 00821 HeapObject* SlowMCAllocateRaw(int size_in_bytes); 00822 00823 #ifdef DEBUG 00824 void DoPrintRSet(const char* space_name); 00825 #endif 00826 private: 00827 // Returns the page of the allocation pointer. 00828 Page* AllocationTopPage() { return TopPageOf(allocation_info_); } 00829 00830 // Returns a pointer to the page of the relocation pointer. 00831 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } 00832 00833 #ifdef DEBUG 00834 // Returns the number of total pages in this space. 00835 int CountTotalPages(); 00836 #endif 00837 }; 00838 00839 00840 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 00841 // HistogramInfo class for recording a single "bar" of a histogram. This 00842 // class is used for collecting statistics to print to stdout (when compiled 00843 // with DEBUG) or to the log file (when compiled with 00844 // ENABLE_LOGGING_AND_PROFILING). 00845 class HistogramInfo BASE_EMBEDDED { 00846 public: 00847 HistogramInfo() : number_(0), bytes_(0) {} 00848 00849 const char* name() { return name_; } 00850 void set_name(const char* name) { name_ = name; } 00851 00852 int number() { return number_; } 00853 void increment_number(int num) { number_ += num; } 00854 00855 int bytes() { return bytes_; } 00856 void increment_bytes(int size) { bytes_ += size; } 00857 00858 // Clear the number of objects and size fields, but not the name. 00859 void clear() { 00860 number_ = 0; 00861 bytes_ = 0; 00862 } 00863 00864 private: 00865 const char* name_; 00866 int number_; 00867 int bytes_; 00868 }; 00869 #endif 00870 00871 00872 // ----------------------------------------------------------------------------- 00873 // SemiSpace in young generation 00874 // 00875 // A semispace is a contiguous chunk of memory. The mark-compact collector 00876 // uses the memory in the from space as a marking stack when tracing live 00877 // objects. 00878 00879 class SemiSpace : public Space { 00880 public: 00881 // Constructor. 00882 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) { 00883 start_ = NULL; 00884 age_mark_ = NULL; 00885 } 00886 00887 // Sets up the semispace using the given chunk. 00888 bool Setup(Address start, int initial_capacity, int maximum_capacity); 00889 00890 // Tear down the space. Heap memory was not allocated by the space, so it 00891 // is not deallocated here. 00892 void TearDown(); 00893 00894 // True if the space has been set up but not torn down. 00895 bool HasBeenSetup() { return start_ != NULL; } 00896 00897 // Double the size of the semispace by committing extra virtual memory. 00898 // Assumes that the caller has checked that the semispace has not reached 00899 // its maxmimum capacity (and thus there is space available in the reserved 00900 // address range to grow). 00901 bool Double(); 00902 00903 // Returns the start address of the space. 00904 Address low() { return start_; } 00905 // Returns one past the end address of the space. 00906 Address high() { return low() + capacity_; } 00907 00908 // Age mark accessors. 00909 Address age_mark() { return age_mark_; } 00910 void set_age_mark(Address mark) { age_mark_ = mark; } 00911 00912 // True if the address is in the address range of this semispace (not 00913 // necessarily below the allocation pointer). 00914 bool Contains(Address a) { 00915 return (reinterpret_cast<uint32_t>(a) & address_mask_) 00916 == reinterpret_cast<uint32_t>(start_); 00917 } 00918 00919 // True if the object is a heap object in the address range of this 00920 // semispace (not necessarily below the allocation pointer). 00921 bool Contains(Object* o) { 00922 return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_; 00923 } 00924 00925 // The offset of an address from the begining of the space. 00926 int SpaceOffsetForAddress(Address addr) { return addr - low(); } 00927 00928 // If we don't have this here then SemiSpace will be abstract. However 00929 // it should never be called. 00930 virtual int Size() { 00931 UNREACHABLE(); 00932 return 0; 00933 } 00934 00935 #ifdef DEBUG 00936 virtual void Print(); 00937 virtual void Verify(); 00938 #endif 00939 00940 private: 00941 // The current and maximum capacity of the space. 00942 int capacity_; 00943 int maximum_capacity_; 00944 00945 // The start address of the space. 00946 Address start_; 00947 // Used to govern object promotion during mark-compact collection. 00948 Address age_mark_; 00949 00950 // Masks and comparison values to test for containment in this semispace. 00951 uint32_t address_mask_; 00952 uint32_t object_mask_; 00953 uint32_t object_expected_; 00954 00955 public: 00956 TRACK_MEMORY("SemiSpace") 00957 }; 00958 00959 00960 // A SemiSpaceIterator is an ObjectIterator that iterates over the active 00961 // semispace of the heap's new space. It iterates over the objects in the 00962 // semispace from a given start address (defaulting to the bottom of the 00963 // semispace) to the top of the semispace. New objects allocated after the 00964 // iterator is created are not iterated. 00965 class SemiSpaceIterator : public ObjectIterator { 00966 public: 00967 // Create an iterator over the objects in the given space. If no start 00968 // address is given, the iterator starts from the bottom of the space. If 00969 // no size function is given, the iterator calls Object::Size(). 00970 explicit SemiSpaceIterator(NewSpace* space); 00971 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); 00972 SemiSpaceIterator(NewSpace* space, Address start); 00973 00974 bool has_next() {return current_ < limit_; } 00975 00976 HeapObject* next() { 00977 ASSERT(has_next()); 00978 00979 HeapObject* object = HeapObject::FromAddress(current_); 00980 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); 00981 ASSERT_OBJECT_SIZE(size); 00982 00983 current_ += size; 00984 return object; 00985 } 00986 00987 // Implementation of the ObjectIterator functions. 00988 virtual bool has_next_object() { return has_next(); } 00989 virtual HeapObject* next_object() { return next(); } 00990 00991 private: 00992 void Initialize(NewSpace* space, Address start, Address end, 00993 HeapObjectCallback size_func); 00994 00995 // The semispace. 00996 SemiSpace* space_; 00997 // The current iteration point. 00998 Address current_; 00999 // The end of iteration. 01000 Address limit_; 01001 // The callback function. 01002 HeapObjectCallback size_func_; 01003 }; 01004 01005 01006 // ----------------------------------------------------------------------------- 01007 // The young generation space. 01008 // 01009 // The new space consists of a contiguous pair of semispaces. It simply 01010 // forwards most functions to the appropriate semispace. 01011 01012 class NewSpace : public Space { 01013 public: 01014 // Constructor. 01015 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {} 01016 01017 // Sets up the new space using the given chunk. 01018 bool Setup(Address start, int size); 01019 01020 // Tears down the space. Heap memory was not allocated by the space, so it 01021 // is not deallocated here. 01022 void TearDown(); 01023 01024 // True if the space has been set up but not torn down. 01025 bool HasBeenSetup() { 01026 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); 01027 } 01028 01029 // Flip the pair of spaces. 01030 void Flip(); 01031 01032 // Doubles the capacity of the semispaces. Assumes that they are not at 01033 // their maximum capacity. Returns a flag indicating success or failure. 01034 bool Double(); 01035 01036 // True if the address or object lies in the address range of either 01037 // semispace (not necessarily below the allocation pointer). 01038 bool Contains(Address a) { 01039 return (reinterpret_cast<uint32_t>(a) & address_mask_) 01040 == reinterpret_cast<uint32_t>(start_); 01041 } 01042 bool Contains(Object* o) { 01043 return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_; 01044 } 01045 01046 // Return the allocated bytes in the active semispace. 01047 virtual int Size() { return top() - bottom(); } 01048 // Return the current capacity of a semispace. 01049 int Capacity() { return capacity_; } 01050 // Return the available bytes without growing in the active semispace. 01051 int Available() { return Capacity() - Size(); } 01052 01053 // Return the maximum capacity of a semispace. 01054 int MaximumCapacity() { return maximum_capacity_; } 01055 01056 // Return the address of the allocation pointer in the active semispace. 01057 Address top() { return allocation_info_.top; } 01058 // Return the address of the first object in the active semispace. 01059 Address bottom() { return to_space_.low(); } 01060 01061 // Get the age mark of the inactive semispace. 01062 Address age_mark() { return from_space_.age_mark(); } 01063 // Set the age mark in the active semispace. 01064 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } 01065 01066 // The start address of the space and a bit mask. Anding an address in the 01067 // new space with the mask will result in the start address. 01068 Address start() { return start_; } 01069 uint32_t mask() { return address_mask_; } 01070 01071 // The allocation top and limit addresses. 01072 Address* allocation_top_address() { return &allocation_info_.top; } 01073 Address* allocation_limit_address() { return &allocation_info_.limit; } 01074 01075 Object* AllocateRaw(int size_in_bytes) { 01076 return AllocateRawInternal(size_in_bytes, &allocation_info_); 01077 } 01078 01079 // Allocate the requested number of bytes for relocation during mark-compact 01080 // collection. 01081 Object* MCAllocateRaw(int size_in_bytes) { 01082 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_); 01083 } 01084 01085 // Reset the allocation pointer to the beginning of the active semispace. 01086 void ResetAllocationInfo(); 01087 // Reset the reloction pointer to the bottom of the inactive semispace in 01088 // preparation for mark-compact collection. 01089 void MCResetRelocationInfo(); 01090 // Update the allocation pointer in the active semispace after a 01091 // mark-compact collection. 01092 void MCCommitRelocationInfo(); 01093 01094 // Get the extent of the inactive semispace (for use as a marking stack). 01095 Address FromSpaceLow() { return from_space_.low(); } 01096 Address FromSpaceHigh() { return from_space_.high(); } 01097 01098 // Get the extent of the active semispace (to sweep newly copied objects 01099 // during a scavenge collection). 01100 Address ToSpaceLow() { return to_space_.low(); } 01101 Address ToSpaceHigh() { return to_space_.high(); } 01102 01103 // Offsets from the beginning of the semispaces. 01104 int ToSpaceOffsetForAddress(Address a) { 01105 return to_space_.SpaceOffsetForAddress(a); 01106 } 01107 int FromSpaceOffsetForAddress(Address a) { 01108 return from_space_.SpaceOffsetForAddress(a); 01109 } 01110 01111 // True if the object is a heap object in the address range of the 01112 // respective semispace (not necessarily below the allocation pointer of the 01113 // semispace). 01114 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } 01115 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } 01116 01117 bool ToSpaceContains(Address a) { return to_space_.Contains(a); } 01118 bool FromSpaceContains(Address a) { return from_space_.Contains(a); } 01119 01120 #ifdef DEBUG 01121 // Verify the active semispace. 01122 virtual void Verify(); 01123 // Print the active semispace. 01124 virtual void Print() { to_space_.Print(); } 01125 #endif 01126 01127 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 01128 // Iterates the active semispace to collect statistics. 01129 void CollectStatistics(); 01130 // Reports previously collected statistics of the active semispace. 01131 void ReportStatistics(); 01132 // Clears previously collected statistics. 01133 void ClearHistograms(); 01134 01135 // Record the allocation or promotion of a heap object. Note that we don't 01136 // record every single allocation, but only those that happen in the 01137 // to space during a scavenge GC. 01138 void RecordAllocation(HeapObject* obj); 01139 void RecordPromotion(HeapObject* obj); 01140 #endif 01141 01142 private: 01143 // The current and maximum capacities of a semispace. 01144 int capacity_; 01145 int maximum_capacity_; 01146 01147 // The semispaces. 01148 SemiSpace to_space_; 01149 SemiSpace from_space_; 01150 01151 // Start address and bit mask for containment testing. 01152 Address start_; 01153 uint32_t address_mask_; 01154 uint32_t object_mask_; 01155 uint32_t object_expected_; 01156 01157 // Allocation pointer and limit for normal allocation and allocation during 01158 // mark-compact collection. 01159 AllocationInfo allocation_info_; 01160 AllocationInfo mc_forwarding_info_; 01161 01162 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 01163 HistogramInfo* allocated_histogram_; 01164 HistogramInfo* promoted_histogram_; 01165 #endif 01166 01167 // Implementation of AllocateRaw and MCAllocateRaw. 01168 inline Object* AllocateRawInternal(int size_in_bytes, 01169 AllocationInfo* alloc_info); 01170 01171 friend class SemiSpaceIterator; 01172 01173 public: 01174 TRACK_MEMORY("NewSpace") 01175 }; 01176 01177 01178 // ----------------------------------------------------------------------------- 01179 // Free lists for old object spaces 01180 // 01181 // Free-list nodes are free blocks in the heap. They look like heap objects 01182 // (free-list node pointers have the heap object tag, and they have a map like 01183 // a heap object). They have a size and a next pointer. The next pointer is 01184 // the raw address of the next free list node (or NULL). 01185 class FreeListNode: public HeapObject { 01186 public: 01187 // Obtain a free-list node from a raw address. This is not a cast because 01188 // it does not check nor require that the first word at the address is a map 01189 // pointer. 01190 static FreeListNode* FromAddress(Address address) { 01191 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); 01192 } 01193 01194 // Set the size in bytes, which can be read with HeapObject::Size(). This 01195 // function also writes a map to the first word of the block so that it 01196 // looks like a heap object to the garbage collector and heap iteration 01197 // functions. 01198 void set_size(int size_in_bytes); 01199 01200 // Accessors for the next field. 01201 inline Address next(); 01202 inline void set_next(Address next); 01203 01204 private: 01205 static const int kNextOffset = Array::kHeaderSize; 01206 01207 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); 01208 }; 01209 01210 01211 // The free list for the old space. 01212 class OldSpaceFreeList BASE_EMBEDDED { 01213 public: 01214 explicit OldSpaceFreeList(AllocationSpace owner); 01215 01216 // Clear the free list. 01217 void Reset(); 01218 01219 // Return the number of bytes available on the free list. 01220 int available() { return available_; } 01221 01222 // Place a node on the free list. The block of size 'size_in_bytes' 01223 // starting at 'start' is placed on the free list. The return value is the 01224 // number of bytes that have been lost due to internal fragmentation by 01225 // freeing the block. Bookkeeping information will be written to the block, 01226 // ie, its contents will be destroyed. The start address should be word 01227 // aligned, and the size should be a non-zero multiple of the word size. 01228 int Free(Address start, int size_in_bytes); 01229 01230 // Allocate a block of size 'size_in_bytes' from the free list. The block 01231 // is unitialized. A failure is returned if no block is available. The 01232 // number of bytes lost to fragmentation is returned in the output parameter 01233 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. 01234 Object* Allocate(int size_in_bytes, int* wasted_bytes); 01235 01236 private: 01237 // The size range of blocks, in bytes. (Smaller allocations are allowed, but 01238 // will always result in waste.) 01239 static const int kMinBlockSize = Array::kHeaderSize + kPointerSize; 01240 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; 01241 01242 // The identity of the owning space, for building allocation Failure 01243 // objects. 01244 AllocationSpace owner_; 01245 01246 // Total available bytes in all blocks on this free list. 01247 int available_; 01248 01249 // Blocks are put on exact free lists in an array, indexed by size in words. 01250 // The available sizes are kept in an increasingly ordered list. Entries 01251 // corresponding to sizes < kMinBlockSize always have an empty free list 01252 // (but index kHead is used for the head of the size list). 01253 struct SizeNode { 01254 // Address of the head FreeListNode of the implied block size or NULL. 01255 Address head_node_; 01256 // Size (words) of the next larger available size if head_node_ != NULL. 01257 int next_size_; 01258 }; 01259 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; 01260 SizeNode free_[kFreeListsLength]; 01261 01262 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. 01263 static const int kHead = kMinBlockSize / kPointerSize - 1; 01264 static const int kEnd = kMaxInt; 01265 01266 // We keep a "finger" in the size list to speed up a common pattern: 01267 // repeated requests for the same or increasing sizes. 01268 int finger_; 01269 01270 // Starting from *prev, find and return the smallest size >= index (words), 01271 // or kEnd. Update *prev to be the largest size < index, or kHead. 01272 int FindSize(int index, int* prev) { 01273 int cur = free_[*prev].next_size_; 01274 while (cur < index) { 01275 *prev = cur; 01276 cur = free_[cur].next_size_; 01277 } 01278 return cur; 01279 } 01280 01281 // Remove an existing element from the size list. 01282 void RemoveSize(int index) { 01283 int prev = kHead; 01284 int cur = FindSize(index, &prev); 01285 ASSERT(cur == index); 01286 free_[prev].next_size_ = free_[cur].next_size_; 01287 finger_ = prev; 01288 } 01289 01290 // Insert a new element into the size list. 01291 void InsertSize(int index) { 01292 int prev = kHead; 01293 int cur = FindSize(index, &prev); 01294 ASSERT(cur != index); 01295 free_[prev].next_size_ = index; 01296 free_[index].next_size_ = cur; 01297 } 01298 01299 // The size list is not updated during a sequence of calls to Free, but is 01300 // rebuilt before the next allocation. 01301 void RebuildSizeList(); 01302 bool needs_rebuild_; 01303 01304 #ifdef DEBUG 01305 // Does this free list contain a free block located at the address of 'node'? 01306 bool Contains(FreeListNode* node); 01307 #endif 01308 01309 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); 01310 }; 01311 01312 01313 // The free list for the map space. 01314 class MapSpaceFreeList BASE_EMBEDDED { 01315 public: 01316 explicit MapSpaceFreeList(AllocationSpace owner); 01317 01318 // Clear the free list. 01319 void Reset(); 01320 01321 // Return the number of bytes available on the free list. 01322 int available() { return available_; } 01323 01324 // Place a node on the free list. The block starting at 'start' (assumed to 01325 // have size Map::kSize) is placed on the free list. Bookkeeping 01326 // information will be written to the block, ie, its contents will be 01327 // destroyed. The start address should be word aligned. 01328 void Free(Address start); 01329 01330 // Allocate a map-sized block from the free list. The block is unitialized. 01331 // A failure is returned if no block is available. 01332 Object* Allocate(); 01333 01334 private: 01335 // Available bytes on the free list. 01336 int available_; 01337 01338 // The head of the free list. 01339 Address head_; 01340 01341 // The identity of the owning space, for building allocation Failure 01342 // objects. 01343 AllocationSpace owner_; 01344 01345 DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList); 01346 }; 01347 01348 01349 // ----------------------------------------------------------------------------- 01350 // Old object space (excluding map objects) 01351 01352 class OldSpace : public PagedSpace { 01353 public: 01354 // Creates an old space object with a given maximum capacity. 01355 // The constructor does not allocate pages from OS. 01356 explicit OldSpace(int max_capacity, 01357 AllocationSpace id, 01358 Executability executable) 01359 : PagedSpace(max_capacity, id, executable), free_list_(id) { 01360 } 01361 01362 // The bytes available on the free list (ie, not above the linear allocation 01363 // pointer). 01364 int AvailableFree() { return free_list_.available(); } 01365 01366 // The top of allocation in a page in this space. Undefined if page is unused. 01367 virtual Address PageAllocationTop(Page* page) { 01368 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd(); 01369 } 01370 01371 // Give a block of memory to the space's free list. It might be added to 01372 // the free list or accounted as waste. 01373 void Free(Address start, int size_in_bytes) { 01374 int wasted_bytes = free_list_.Free(start, size_in_bytes); 01375 accounting_stats_.DeallocateBytes(size_in_bytes); 01376 accounting_stats_.WasteBytes(wasted_bytes); 01377 } 01378 01379 // Prepare for full garbage collection. Resets the relocation pointer and 01380 // clears the free list. 01381 virtual void PrepareForMarkCompact(bool will_compact); 01382 01383 // Adjust the top of relocation pointer to point to the end of the object 01384 // given by 'address' and 'size_in_bytes'. Move it to the next page if 01385 // necessary, ensure that it points to the address, then increment it by the 01386 // size. 01387 void MCAdjustRelocationEnd(Address address, int size_in_bytes); 01388 01389 // Updates the allocation pointer to the relocation top after a mark-compact 01390 // collection. 01391 virtual void MCCommitRelocationInfo(); 01392 01393 #ifdef DEBUG 01394 // Verify integrity of this space. 01395 virtual void Verify(); 01396 01397 // Reports statistics for the space 01398 void ReportStatistics(); 01399 // Dump the remembered sets in the space to stdout. 01400 void PrintRSet(); 01401 #endif 01402 01403 protected: 01404 // Virtual function in the superclass. Slow path of AllocateRaw. 01405 HeapObject* SlowAllocateRaw(int size_in_bytes); 01406 01407 // Virtual function in the superclass. Allocate linearly at the start of 01408 // the page after current_page (there is assumed to be one). 01409 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); 01410 01411 private: 01412 // The space's free list. 01413 OldSpaceFreeList free_list_; 01414 01415 // During relocation, we keep a pointer to the most recently relocated 01416 // object in order to know when to move to the next page. 01417 Address mc_end_of_relocation_; 01418 01419 public: 01420 TRACK_MEMORY("OldSpace") 01421 }; 01422 01423 01424 // ----------------------------------------------------------------------------- 01425 // Old space for all map objects 01426 01427 class MapSpace : public PagedSpace { 01428 public: 01429 // Creates a map space object with a maximum capacity. 01430 explicit MapSpace(int max_capacity, AllocationSpace id) 01431 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { } 01432 01433 // The top of allocation in a page in this space. Undefined if page is unused. 01434 virtual Address PageAllocationTop(Page* page) { 01435 return page == TopPageOf(allocation_info_) ? top() 01436 : page->ObjectAreaEnd() - kPageExtra; 01437 } 01438 01439 // Give a map-sized block of memory to the space's free list. 01440 void Free(Address start) { 01441 free_list_.Free(start); 01442 accounting_stats_.DeallocateBytes(Map::kSize); 01443 } 01444 01445 // Given an index, returns the page address. 01446 Address PageAddress(int page_index) { return page_addresses_[page_index]; } 01447 01448 // Prepares for a mark-compact GC. 01449 virtual void PrepareForMarkCompact(bool will_compact); 01450 01451 // Updates the allocation pointer to the relocation top after a mark-compact 01452 // collection. 01453 virtual void MCCommitRelocationInfo(); 01454 01455 #ifdef DEBUG 01456 // Verify integrity of this space. 01457 virtual void Verify(); 01458 01459 // Reports statistic info of the space 01460 void ReportStatistics(); 01461 // Dump the remembered sets in the space to stdout. 01462 void PrintRSet(); 01463 #endif 01464 01465 // Constants. 01466 static const int kMapPageIndexBits = 10; 01467 static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1; 01468 01469 static const int kPageExtra = Page::kObjectAreaSize % Map::kSize; 01470 01471 protected: 01472 // Virtual function in the superclass. Slow path of AllocateRaw. 01473 HeapObject* SlowAllocateRaw(int size_in_bytes); 01474 01475 // Virtual function in the superclass. Allocate linearly at the start of 01476 // the page after current_page (there is assumed to be one). 01477 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); 01478 01479 private: 01480 // The space's free list. 01481 MapSpaceFreeList free_list_; 01482 01483 // An array of page start address in a map space. 01484 Address page_addresses_[kMaxMapPageIndex]; 01485 01486 public: 01487 TRACK_MEMORY("MapSpace") 01488 }; 01489 01490 01491 // ----------------------------------------------------------------------------- 01492 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by 01493 // the large object space. A large object is allocated from OS heap with 01494 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). 01495 // A large object always starts at Page::kObjectStartOffset to a page. 01496 // Large objects do not move during garbage collections. 01497 01498 // A LargeObjectChunk holds exactly one large object page with exactly one 01499 // large object. 01500 class LargeObjectChunk { 01501 public: 01502 // Allocates a new LargeObjectChunk that contains a large object page 01503 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large 01504 // object and possibly extra remembered set words) bytes after the object 01505 // area start of that page. The allocated chunk size is set in the output 01506 // parameter chunk_size. 01507 static LargeObjectChunk* New(int size_in_bytes, 01508 size_t* chunk_size, 01509 Executability executable); 01510 01511 // Interpret a raw address as a large object chunk. 01512 static LargeObjectChunk* FromAddress(Address address) { 01513 return reinterpret_cast<LargeObjectChunk*>(address); 01514 } 01515 01516 // Returns the address of this chunk. 01517 Address address() { return reinterpret_cast<Address>(this); } 01518 01519 // Accessors for the fields of the chunk. 01520 LargeObjectChunk* next() { return next_; } 01521 void set_next(LargeObjectChunk* chunk) { next_ = chunk; } 01522 01523 size_t size() { return size_; } 01524 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; } 01525 01526 // Returns the object in this chunk. 01527 inline HeapObject* GetObject(); 01528 01529 // Given a requested size (including any extra remembereed set words), 01530 // returns the physical size of a chunk to be allocated. 01531 static int ChunkSizeFor(int size_in_bytes); 01532 01533 // Given a chunk size, returns the object size it can accomodate (not 01534 // including any extra remembered set words). Used by 01535 // LargeObjectSpace::Available. Note that this can overestimate the size 01536 // of object that will fit in a chunk---if the object requires extra 01537 // remembered set words (eg, for large fixed arrays), the actual object 01538 // size for the chunk will be smaller than reported by this function. 01539 static int ObjectSizeFor(int chunk_size) { 01540 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; 01541 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; 01542 } 01543 01544 private: 01545 // A pointer to the next large object chunk in the space or NULL. 01546 LargeObjectChunk* next_; 01547 01548 // The size of this chunk. 01549 size_t size_; 01550 01551 public: 01552 TRACK_MEMORY("LargeObjectChunk") 01553 }; 01554 01555 01556 class LargeObjectSpace : public Space { 01557 friend class LargeObjectIterator; 01558 public: 01559 explicit LargeObjectSpace(AllocationSpace id); 01560 virtual ~LargeObjectSpace() {} 01561 01562 // Initializes internal data structures. 01563 bool Setup(); 01564 01565 // Releases internal resources, frees objects in this space. 01566 void TearDown(); 01567 01568 // Allocates a (non-FixedArray, non-Code) large object. 01569 Object* AllocateRaw(int size_in_bytes); 01570 // Allocates a large Code object. 01571 Object* AllocateRawCode(int size_in_bytes); 01572 // Allocates a large FixedArray. 01573 Object* AllocateRawFixedArray(int size_in_bytes); 01574 01575 // Available bytes for objects in this space, not including any extra 01576 // remembered set words. 01577 int Available() { 01578 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available()); 01579 } 01580 01581 virtual int Size() { 01582 return size_; 01583 } 01584 01585 int PageCount() { 01586 return page_count_; 01587 } 01588 01589 // Finds an object for a given address, returns Failure::Exception() 01590 // if it is not found. The function iterates through all objects in this 01591 // space, may be slow. 01592 Object* FindObject(Address a); 01593 01594 // Clears remembered sets. 01595 void ClearRSet(); 01596 01597 // Iterates objects whose remembered set bits are set. 01598 void IterateRSet(ObjectSlotCallback func); 01599 01600 // Frees unmarked objects. 01601 void FreeUnmarkedObjects(); 01602 01603 // Checks whether a heap object is in this space; O(1). 01604 bool Contains(HeapObject* obj); 01605 01606 // Checks whether the space is empty. 01607 bool IsEmpty() { return first_chunk_ == NULL; } 01608 01609 #ifdef DEBUG 01610 virtual void Verify(); 01611 virtual void Print(); 01612 void ReportStatistics(); 01613 void CollectCodeStatistics(); 01614 // Dump the remembered sets in the space to stdout. 01615 void PrintRSet(); 01616 #endif 01617 // Checks whether an address is in the object area in this space. It 01618 // iterates all objects in the space. May be slow. 01619 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } 01620 01621 private: 01622 // The head of the linked list of large object chunks. 01623 LargeObjectChunk* first_chunk_; 01624 int size_; // allocated bytes 01625 int page_count_; // number of chunks 01626 01627 01628 // Shared implementation of AllocateRaw, AllocateRawCode and 01629 // AllocateRawFixedArray. 01630 Object* AllocateRawInternal(int requested_size, 01631 int object_size, 01632 Executability executable); 01633 01634 // Returns the number of extra bytes (rounded up to the nearest full word) 01635 // required for extra_object_bytes of extra pointers (in bytes). 01636 static inline int ExtraRSetBytesFor(int extra_object_bytes); 01637 01638 public: 01639 TRACK_MEMORY("LargeObjectSpace") 01640 }; 01641 01642 01643 class LargeObjectIterator: public ObjectIterator { 01644 public: 01645 explicit LargeObjectIterator(LargeObjectSpace* space); 01646 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); 01647 01648 bool has_next() { return current_ != NULL; } 01649 HeapObject* next(); 01650 01651 // implementation of ObjectIterator. 01652 virtual bool has_next_object() { return has_next(); } 01653 virtual HeapObject* next_object() { return next(); } 01654 01655 private: 01656 LargeObjectChunk* current_; 01657 HeapObjectCallback size_func_; 01658 }; 01659 01660 01661 } } // namespace v8::internal 01662 01663 #endif // V8_SPACES_H_