00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 #include "v8.h"
00029
00030 #include "macro-assembler.h"
00031 #include "mark-compact.h"
00032 #include "platform.h"
00033
00034 namespace v8 { namespace internal {
00035
00036
00037
00038 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
00039 ASSERT((space).low() <= (info).top \
00040 && (info).top <= (space).high() \
00041 && (info).limit == (space).high())
00042
00043
00044
00045
00046
00047 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
00048 Initialize(space->bottom(), space->top(), NULL);
00049 }
00050
00051
00052 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
00053 HeapObjectCallback size_func) {
00054 Initialize(space->bottom(), space->top(), size_func);
00055 }
00056
00057
00058 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
00059 Initialize(start, space->top(), NULL);
00060 }
00061
00062
00063 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
00064 HeapObjectCallback size_func) {
00065 Initialize(start, space->top(), size_func);
00066 }
00067
00068
00069 void HeapObjectIterator::Initialize(Address cur, Address end,
00070 HeapObjectCallback size_f) {
00071 cur_addr_ = cur;
00072 end_addr_ = end;
00073 end_page_ = Page::FromAllocationTop(end);
00074 size_func_ = size_f;
00075 Page* p = Page::FromAllocationTop(cur_addr_);
00076 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
00077
00078 #ifdef DEBUG
00079 Verify();
00080 #endif
00081 }
00082
00083
00084 bool HeapObjectIterator::HasNextInNextPage() {
00085 if (cur_addr_ == end_addr_) return false;
00086
00087 Page* cur_page = Page::FromAllocationTop(cur_addr_);
00088 cur_page = cur_page->next_page();
00089 ASSERT(cur_page->is_valid());
00090
00091 cur_addr_ = cur_page->ObjectAreaStart();
00092 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
00093
00094 ASSERT(cur_addr_ < cur_limit_);
00095 #ifdef DEBUG
00096 Verify();
00097 #endif
00098 return true;
00099 }
00100
00101
00102 #ifdef DEBUG
00103 void HeapObjectIterator::Verify() {
00104 Page* p = Page::FromAllocationTop(cur_addr_);
00105 ASSERT(p == Page::FromAllocationTop(cur_limit_));
00106 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
00107 }
00108 #endif
00109
00110
00111
00112
00113
00114 PageIterator::PageIterator(PagedSpace* space, Mode mode) {
00115 cur_page_ = space->first_page_;
00116 switch (mode) {
00117 case PAGES_IN_USE:
00118 stop_page_ = space->AllocationTopPage()->next_page();
00119 break;
00120 case PAGES_USED_BY_MC:
00121 stop_page_ = space->MCRelocationTopPage()->next_page();
00122 break;
00123 case ALL_PAGES:
00124 stop_page_ = Page::FromAddress(NULL);
00125 break;
00126 default:
00127 UNREACHABLE();
00128 }
00129 }
00130
00131
00132
00133
00134
00135 #ifdef DEBUG
00136 Page::RSetState Page::rset_state_ = Page::IN_USE;
00137 #endif
00138
00139
00140
00141
00142 int MemoryAllocator::capacity_ = 0;
00143 int MemoryAllocator::size_ = 0;
00144
00145 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
00146
00147
00148
00149 const int kEstimatedNumberOfChunks = 270;
00150 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
00151 kEstimatedNumberOfChunks);
00152 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
00153 int MemoryAllocator::max_nof_chunks_ = 0;
00154 int MemoryAllocator::top_ = 0;
00155
00156
00157 void MemoryAllocator::Push(int free_chunk_id) {
00158 ASSERT(max_nof_chunks_ > 0);
00159 ASSERT(top_ < max_nof_chunks_);
00160 free_chunk_ids_[top_++] = free_chunk_id;
00161 }
00162
00163
00164 int MemoryAllocator::Pop() {
00165 ASSERT(top_ > 0);
00166 return free_chunk_ids_[--top_];
00167 }
00168
00169
00170 bool MemoryAllocator::Setup(int capacity) {
00171 capacity_ = RoundUp(capacity, Page::kPageSize);
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
00183 if (max_nof_chunks_ > kMaxNofChunks) return false;
00184
00185 size_ = 0;
00186 ChunkInfo info;
00187 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
00188 chunks_.Add(info);
00189 free_chunk_ids_.Add(i);
00190 }
00191 top_ = max_nof_chunks_;
00192 return true;
00193 }
00194
00195
00196 void MemoryAllocator::TearDown() {
00197 for (int i = 0; i < max_nof_chunks_; i++) {
00198 if (chunks_[i].address() != NULL) DeleteChunk(i);
00199 }
00200 chunks_.Clear();
00201 free_chunk_ids_.Clear();
00202
00203 if (initial_chunk_ != NULL) {
00204 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
00205 delete initial_chunk_;
00206 initial_chunk_ = NULL;
00207 }
00208
00209 ASSERT(top_ == max_nof_chunks_);
00210 top_ = 0;
00211 capacity_ = 0;
00212 size_ = 0;
00213 max_nof_chunks_ = 0;
00214 }
00215
00216
00217 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
00218 size_t* allocated,
00219 Executability executable) {
00220 if (size_ + static_cast<int>(requested) > capacity_) return NULL;
00221
00222 void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
00223 int alloced = *allocated;
00224 size_ += alloced;
00225 Counters::memory_allocated.Increment(alloced);
00226 return mem;
00227 }
00228
00229
00230 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
00231 OS::Free(mem, length);
00232 Counters::memory_allocated.Decrement(length);
00233 size_ -= length;
00234 ASSERT(size_ >= 0);
00235 }
00236
00237
00238 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
00239 ASSERT(initial_chunk_ == NULL);
00240
00241 initial_chunk_ = new VirtualMemory(requested);
00242 CHECK(initial_chunk_ != NULL);
00243 if (!initial_chunk_->IsReserved()) {
00244 delete initial_chunk_;
00245 initial_chunk_ = NULL;
00246 return NULL;
00247 }
00248
00249
00250 ASSERT(initial_chunk_->size() == requested);
00251 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
00252 size_ += requested;
00253 return initial_chunk_->address();
00254 }
00255
00256
00257 static int PagesInChunk(Address start, size_t size) {
00258
00259
00260
00261
00262 return (RoundDown(start + size, Page::kPageSize)
00263 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
00264 }
00265
00266
00267 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
00268 PagedSpace* owner) {
00269 if (requested_pages <= 0) return Page::FromAddress(NULL);
00270 size_t chunk_size = requested_pages * Page::kPageSize;
00271
00272
00273
00274 if (size_ + static_cast<int>(chunk_size) > capacity_) {
00275
00276 chunk_size = capacity_ - size_;
00277 requested_pages = chunk_size >> Page::kPageSizeBits;
00278
00279 if (requested_pages <= 0) return Page::FromAddress(NULL);
00280 }
00281 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
00282 if (chunk == NULL) return Page::FromAddress(NULL);
00283 LOG(NewEvent("PagedChunk", chunk, chunk_size));
00284
00285 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
00286 if (*allocated_pages == 0) {
00287 FreeRawMemory(chunk, chunk_size);
00288 LOG(DeleteEvent("PagedChunk", chunk));
00289 return Page::FromAddress(NULL);
00290 }
00291
00292 int chunk_id = Pop();
00293 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
00294
00295 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
00296 }
00297
00298
00299 Page* MemoryAllocator::CommitPages(Address start, size_t size,
00300 PagedSpace* owner, int* num_pages) {
00301 ASSERT(start != NULL);
00302 *num_pages = PagesInChunk(start, size);
00303 ASSERT(*num_pages > 0);
00304 ASSERT(initial_chunk_ != NULL);
00305 ASSERT(initial_chunk_->address() <= start);
00306 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
00307 + initial_chunk_->size());
00308 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
00309 return Page::FromAddress(NULL);
00310 }
00311 Counters::memory_allocated.Increment(size);
00312
00313
00314
00315 CHECK(!OutOfChunkIds());
00316 int chunk_id = Pop();
00317 chunks_[chunk_id].init(start, size, owner);
00318 return InitializePagesInChunk(chunk_id, *num_pages, owner);
00319 }
00320
00321
00322 bool MemoryAllocator::CommitBlock(Address start,
00323 size_t size,
00324 Executability executable) {
00325 ASSERT(start != NULL);
00326 ASSERT(size > 0);
00327 ASSERT(initial_chunk_ != NULL);
00328 ASSERT(initial_chunk_->address() <= start);
00329 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
00330 + initial_chunk_->size());
00331
00332 if (!initial_chunk_->Commit(start, size, executable)) return false;
00333 Counters::memory_allocated.Increment(size);
00334 return true;
00335 }
00336
00337
00338 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
00339 PagedSpace* owner) {
00340 ASSERT(IsValidChunk(chunk_id));
00341 ASSERT(pages_in_chunk > 0);
00342
00343 Address chunk_start = chunks_[chunk_id].address();
00344
00345 Address low = RoundUp(chunk_start, Page::kPageSize);
00346
00347 #ifdef DEBUG
00348 size_t chunk_size = chunks_[chunk_id].size();
00349 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
00350 ASSERT(pages_in_chunk <=
00351 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
00352 #endif
00353
00354 Address page_addr = low;
00355 for (int i = 0; i < pages_in_chunk; i++) {
00356 Page* p = Page::FromAddress(page_addr);
00357 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
00358 p->is_normal_page = 1;
00359 page_addr += Page::kPageSize;
00360 }
00361
00362
00363 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
00364 last_page->opaque_header = OffsetFrom(0) | chunk_id;
00365
00366 return Page::FromAddress(low);
00367 }
00368
00369
00370 Page* MemoryAllocator::FreePages(Page* p) {
00371 if (!p->is_valid()) return p;
00372
00373
00374 Page* first_page = FindFirstPageInSameChunk(p);
00375 Page* page_to_return = Page::FromAddress(NULL);
00376
00377 if (p != first_page) {
00378
00379 Page* last_page = FindLastPageInSameChunk(p);
00380 first_page = GetNextPage(last_page);
00381
00382
00383 SetNextPage(last_page, Page::FromAddress(NULL));
00384 page_to_return = p;
00385 }
00386
00387 while (first_page->is_valid()) {
00388 int chunk_id = GetChunkId(first_page);
00389 ASSERT(IsValidChunk(chunk_id));
00390
00391
00392 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
00393
00394
00395 DeleteChunk(chunk_id);
00396 }
00397
00398 return page_to_return;
00399 }
00400
00401
00402 void MemoryAllocator::DeleteChunk(int chunk_id) {
00403 ASSERT(IsValidChunk(chunk_id));
00404
00405 ChunkInfo& c = chunks_[chunk_id];
00406
00407
00408
00409
00410 bool in_initial_chunk = false;
00411 if (initial_chunk_ != NULL) {
00412 Address start = static_cast<Address>(initial_chunk_->address());
00413 Address end = start + initial_chunk_->size();
00414 in_initial_chunk = (start <= c.address()) && (c.address() < end);
00415 }
00416
00417 if (in_initial_chunk) {
00418
00419
00420 initial_chunk_->Uncommit(c.address(), c.size());
00421 Counters::memory_allocated.Decrement(c.size());
00422 } else {
00423 LOG(DeleteEvent("PagedChunk", c.address()));
00424 FreeRawMemory(c.address(), c.size());
00425 }
00426 c.init(NULL, 0, NULL);
00427 Push(chunk_id);
00428 }
00429
00430
00431 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
00432 int chunk_id = GetChunkId(p);
00433 ASSERT(IsValidChunk(chunk_id));
00434
00435 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
00436 return Page::FromAddress(low);
00437 }
00438
00439
00440 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
00441 int chunk_id = GetChunkId(p);
00442 ASSERT(IsValidChunk(chunk_id));
00443
00444 Address chunk_start = chunks_[chunk_id].address();
00445 size_t chunk_size = chunks_[chunk_id].size();
00446
00447 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
00448 ASSERT(chunk_start <= p->address() && p->address() < high);
00449
00450 return Page::FromAddress(high - Page::kPageSize);
00451 }
00452
00453
00454 #ifdef DEBUG
00455 void MemoryAllocator::ReportStatistics() {
00456 float pct = static_cast<float>(capacity_ - size_) / capacity_;
00457 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
00458 capacity_, size_, static_cast<int>(pct*100));
00459 }
00460 #endif
00461
00462
00463
00464
00465
00466 PagedSpace::PagedSpace(int max_capacity,
00467 AllocationSpace id,
00468 Executability executable)
00469 : Space(id, executable) {
00470 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
00471 * Page::kObjectAreaSize;
00472 accounting_stats_.Clear();
00473
00474 allocation_info_.top = NULL;
00475 allocation_info_.limit = NULL;
00476
00477 mc_forwarding_info_.top = NULL;
00478 mc_forwarding_info_.limit = NULL;
00479 }
00480
00481
00482 bool PagedSpace::Setup(Address start, size_t size) {
00483 if (HasBeenSetup()) return false;
00484
00485 int num_pages = 0;
00486
00487
00488 int pages_in_chunk = PagesInChunk(start, size);
00489 if (pages_in_chunk > 0) {
00490 first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
00491 Page::kPageSize * pages_in_chunk,
00492 this, &num_pages);
00493 } else {
00494 int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
00495 max_capacity_ / Page::kObjectAreaSize);
00496 first_page_ =
00497 MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
00498 if (!first_page_->is_valid()) return false;
00499 }
00500
00501
00502
00503 ASSERT(first_page_->is_valid());
00504 ASSERT(num_pages > 0);
00505 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
00506 ASSERT(Capacity() <= max_capacity_);
00507
00508 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
00509 p->ClearRSet();
00510 }
00511
00512
00513 SetAllocationInfo(&allocation_info_, first_page_);
00514
00515 return true;
00516 }
00517
00518
00519 bool PagedSpace::HasBeenSetup() {
00520 return (Capacity() > 0);
00521 }
00522
00523
00524 void PagedSpace::TearDown() {
00525 first_page_ = MemoryAllocator::FreePages(first_page_);
00526 ASSERT(!first_page_->is_valid());
00527
00528 accounting_stats_.Clear();
00529 }
00530
00531
00532 void PagedSpace::ClearRSet() {
00533 PageIterator it(this, PageIterator::ALL_PAGES);
00534 while (it.has_next()) {
00535 it.next()->ClearRSet();
00536 }
00537 }
00538
00539
00540 Object* PagedSpace::FindObject(Address addr) {
00541 #ifdef DEBUG
00542
00543
00544 ASSERT(!MarkCompactCollector::in_use());
00545 #endif
00546
00547 if (!Contains(addr)) return Failure::Exception();
00548
00549 Page* p = Page::FromAddress(addr);
00550 ASSERT(IsUsed(p));
00551 Address cur = p->ObjectAreaStart();
00552 Address end = p->AllocationTop();
00553 while (cur < end) {
00554 HeapObject* obj = HeapObject::FromAddress(cur);
00555 Address next = cur + obj->Size();
00556 if ((cur <= addr) && (addr < next)) return obj;
00557 cur = next;
00558 }
00559
00560 UNREACHABLE();
00561 return Failure::Exception();
00562 }
00563
00564
00565 bool PagedSpace::IsUsed(Page* page) {
00566 PageIterator it(this, PageIterator::PAGES_IN_USE);
00567 while (it.has_next()) {
00568 if (page == it.next()) return true;
00569 }
00570 return false;
00571 }
00572
00573
00574 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
00575 alloc_info->top = p->ObjectAreaStart();
00576 alloc_info->limit = p->ObjectAreaEnd();
00577 ASSERT(alloc_info->VerifyPagedAllocation());
00578 }
00579
00580
00581 void PagedSpace::MCResetRelocationInfo() {
00582
00583 int i = 0;
00584 PageIterator it(this, PageIterator::ALL_PAGES);
00585 while (it.has_next()) {
00586 Page* p = it.next();
00587 p->mc_page_index = i++;
00588 }
00589
00590
00591 SetAllocationInfo(&mc_forwarding_info_, first_page_);
00592
00593
00594 accounting_stats_.Reset();
00595 }
00596
00597
00598 int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
00599 #ifdef DEBUG
00600
00601
00602
00603 if (Page::IsAlignedToPageSize(addr)) {
00604 ASSERT(Contains(addr - kPointerSize));
00605 } else {
00606 ASSERT(Contains(addr));
00607 }
00608 #endif
00609
00610
00611 Page* p = Page::IsAlignedToPageSize(addr)
00612 ? Page::FromAllocationTop(addr)
00613 : Page::FromAddress(addr);
00614 int index = p->mc_page_index;
00615 return (index * Page::kPageSize) + p->Offset(addr);
00616 }
00617
00618
00619
00620
00621 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
00622 Page* current_page = TopPageOf(mc_forwarding_info_);
00623 if (!current_page->next_page()->is_valid()) {
00624 if (!Expand(current_page)) {
00625 return NULL;
00626 }
00627 }
00628
00629
00630 ASSERT(current_page->next_page()->is_valid());
00631
00632
00633
00634
00635
00636
00637
00638
00639
00640 current_page->mc_relocation_top = mc_forwarding_info_.top;
00641 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
00642 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
00643 }
00644
00645
00646 bool PagedSpace::Expand(Page* last_page) {
00647 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
00648 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
00649
00650 if (Capacity() == max_capacity_) return false;
00651
00652 ASSERT(Capacity() < max_capacity_);
00653
00654 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
00655
00656 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
00657 if (available_pages <= 0) return false;
00658
00659 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
00660 Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
00661 if (!p->is_valid()) return false;
00662
00663 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
00664 ASSERT(Capacity() <= max_capacity_);
00665
00666 MemoryAllocator::SetNextPage(last_page, p);
00667
00668
00669 while (p->is_valid()) {
00670 p->ClearRSet();
00671 p = p->next_page();
00672 }
00673
00674 return true;
00675 }
00676
00677
00678 #ifdef DEBUG
00679 int PagedSpace::CountTotalPages() {
00680 int count = 0;
00681 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
00682 count++;
00683 }
00684 return count;
00685 }
00686 #endif
00687
00688
00689 void PagedSpace::Shrink() {
00690
00691 Page* top_page = AllocationTopPage();
00692 ASSERT(top_page->is_valid());
00693
00694
00695
00696 int free_pages = 0;
00697 int pages_to_keep = 0;
00698 Page* last_page_to_keep = top_page;
00699 Page* current_page = top_page->next_page();
00700
00701 while (current_page->is_valid()) {
00702
00703 if ((free_pages & 0x1) == 1) {
00704 pages_to_keep++;
00705 last_page_to_keep = last_page_to_keep->next_page();
00706 }
00707 free_pages++;
00708 current_page = current_page->next_page();
00709 }
00710
00711
00712 Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
00713 MemoryAllocator::SetNextPage(last_page_to_keep, p);
00714
00715
00716
00717 while (p->is_valid()) {
00718 pages_to_keep++;
00719 p = p->next_page();
00720 }
00721
00722
00723
00724 ASSERT(pages_to_keep <= free_pages);
00725 int bytes_freed = (free_pages - pages_to_keep) * Page::kObjectAreaSize;
00726 accounting_stats_.ShrinkSpace(bytes_freed);
00727
00728 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
00729 }
00730
00731
00732 bool PagedSpace::EnsureCapacity(int capacity) {
00733 if (Capacity() >= capacity) return true;
00734
00735
00736 Page* last_page = AllocationTopPage();
00737 Page* next_page = last_page->next_page();
00738 while (next_page->is_valid()) {
00739 last_page = MemoryAllocator::FindLastPageInSameChunk(next_page);
00740 next_page = last_page->next_page();
00741 }
00742
00743
00744 do {
00745 if (!Expand(last_page)) return false;
00746 ASSERT(last_page->next_page()->is_valid());
00747 last_page =
00748 MemoryAllocator::FindLastPageInSameChunk(last_page->next_page());
00749 } while (Capacity() < capacity);
00750
00751 return true;
00752 }
00753
00754
00755 #ifdef DEBUG
00756 void PagedSpace::Print() { }
00757 #endif
00758
00759
00760
00761
00762
00763
00764 bool NewSpace::Setup(Address start, int size) {
00765
00766
00767
00768
00769 int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
00770 int maximum_semispace_capacity = Heap::SemiSpaceSize();
00771
00772 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
00773 ASSERT(IsPowerOf2(maximum_semispace_capacity));
00774 maximum_capacity_ = maximum_semispace_capacity;
00775 capacity_ = initial_semispace_capacity;
00776
00777
00778 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
00779 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
00780 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
00781
00782 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
00783 promoted_histogram_[name].set_name(#name);
00784 INSTANCE_TYPE_LIST(SET_NAME)
00785 #undef SET_NAME
00786 #endif
00787
00788 ASSERT(size == 2 * maximum_capacity_);
00789 ASSERT(IsAddressAligned(start, size, 0));
00790
00791 if (!to_space_.Setup(start, capacity_, maximum_capacity_)) {
00792 return false;
00793 }
00794 if (!from_space_.Setup(start + maximum_capacity_,
00795 capacity_,
00796 maximum_capacity_)) {
00797 return false;
00798 }
00799
00800 start_ = start;
00801 address_mask_ = ~(size - 1);
00802 object_mask_ = address_mask_ | kHeapObjectTag;
00803 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
00804
00805 allocation_info_.top = to_space_.low();
00806 allocation_info_.limit = to_space_.high();
00807 mc_forwarding_info_.top = NULL;
00808 mc_forwarding_info_.limit = NULL;
00809
00810 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
00811 return true;
00812 }
00813
00814
00815 void NewSpace::TearDown() {
00816 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
00817 if (allocated_histogram_) {
00818 DeleteArray(allocated_histogram_);
00819 allocated_histogram_ = NULL;
00820 }
00821 if (promoted_histogram_) {
00822 DeleteArray(promoted_histogram_);
00823 promoted_histogram_ = NULL;
00824 }
00825 #endif
00826
00827 start_ = NULL;
00828 capacity_ = 0;
00829 allocation_info_.top = NULL;
00830 allocation_info_.limit = NULL;
00831 mc_forwarding_info_.top = NULL;
00832 mc_forwarding_info_.limit = NULL;
00833
00834 to_space_.TearDown();
00835 from_space_.TearDown();
00836 }
00837
00838
00839 void NewSpace::Flip() {
00840 SemiSpace tmp = from_space_;
00841 from_space_ = to_space_;
00842 to_space_ = tmp;
00843 }
00844
00845
00846 bool NewSpace::Double() {
00847 ASSERT(capacity_ <= maximum_capacity_ / 2);
00848
00849
00850
00851 if (!to_space_.Double() || !from_space_.Double()) return false;
00852 capacity_ *= 2;
00853 allocation_info_.limit = to_space_.high();
00854 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
00855 return true;
00856 }
00857
00858
00859 void NewSpace::ResetAllocationInfo() {
00860 allocation_info_.top = to_space_.low();
00861 allocation_info_.limit = to_space_.high();
00862 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
00863 }
00864
00865
00866 void NewSpace::MCResetRelocationInfo() {
00867 mc_forwarding_info_.top = from_space_.low();
00868 mc_forwarding_info_.limit = from_space_.high();
00869 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
00870 }
00871
00872
00873 void NewSpace::MCCommitRelocationInfo() {
00874
00875
00876 allocation_info_.top = mc_forwarding_info_.top;
00877 allocation_info_.limit = to_space_.high();
00878 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
00879 }
00880
00881
00882 #ifdef DEBUG
00883
00884
00885 void NewSpace::Verify() {
00886
00887 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
00888
00889
00890
00891 Address current = to_space_.low();
00892 while (current < top()) {
00893 HeapObject* object = HeapObject::FromAddress(current);
00894
00895
00896
00897 Map* map = object->map();
00898 ASSERT(map->IsMap());
00899 ASSERT(Heap::map_space()->Contains(map));
00900
00901
00902 ASSERT(!object->IsMap());
00903 ASSERT(!object->IsCode());
00904
00905
00906 object->Verify();
00907
00908
00909 VerifyPointersVisitor visitor;
00910 int size = object->Size();
00911 object->IterateBody(map->instance_type(), size, &visitor);
00912
00913 current += size;
00914 }
00915
00916
00917 ASSERT(current == top());
00918 }
00919 #endif
00920
00921
00922
00923
00924
00925 bool SemiSpace::Setup(Address start,
00926 int initial_capacity,
00927 int maximum_capacity) {
00928
00929
00930
00931
00932
00933
00934 capacity_ = initial_capacity;
00935 maximum_capacity_ = maximum_capacity;
00936
00937 if (!MemoryAllocator::CommitBlock(start, capacity_, executable())) {
00938 return false;
00939 }
00940
00941 start_ = start;
00942 address_mask_ = ~(maximum_capacity - 1);
00943 object_mask_ = address_mask_ | kHeapObjectTag;
00944 object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
00945
00946 age_mark_ = start_;
00947 return true;
00948 }
00949
00950
00951 void SemiSpace::TearDown() {
00952 start_ = NULL;
00953 capacity_ = 0;
00954 }
00955
00956
00957 bool SemiSpace::Double() {
00958 if (!MemoryAllocator::CommitBlock(high(), capacity_, executable())) {
00959 return false;
00960 }
00961 capacity_ *= 2;
00962 return true;
00963 }
00964
00965
00966 #ifdef DEBUG
00967 void SemiSpace::Print() { }
00968
00969
00970 void SemiSpace::Verify() { }
00971 #endif
00972
00973
00974
00975
00976 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
00977 Initialize(space, space->bottom(), space->top(), NULL);
00978 }
00979
00980
00981 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
00982 HeapObjectCallback size_func) {
00983 Initialize(space, space->bottom(), space->top(), size_func);
00984 }
00985
00986
00987 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
00988 Initialize(space, start, space->top(), NULL);
00989 }
00990
00991
00992 void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
00993 Address end,
00994 HeapObjectCallback size_func) {
00995 ASSERT(space->ToSpaceContains(start));
00996 ASSERT(space->ToSpaceLow() <= end
00997 && end <= space->ToSpaceHigh());
00998 space_ = &space->to_space_;
00999 current_ = start;
01000 limit_ = end;
01001 size_func_ = size_func;
01002 }
01003
01004
01005 #ifdef DEBUG
01006
01007 static HistogramInfo heap_histograms[LAST_TYPE+1];
01008 static JSObject::SpillInformation js_spill_information;
01009
01010
01011 static void ClearHistograms() {
01012
01013 #define DEF_TYPE_NAME(name) heap_histograms[name].set_name(#name);
01014 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
01015 #undef DEF_TYPE_NAME
01016
01017 #define CLEAR_HISTOGRAM(name) heap_histograms[name].clear();
01018 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
01019 #undef CLEAR_HISTOGRAM
01020
01021 js_spill_information.Clear();
01022 }
01023
01024
01025 static int code_kind_statistics[Code::NUMBER_OF_KINDS];
01026
01027
01028 static void ClearCodeKindStatistics() {
01029 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
01030 code_kind_statistics[i] = 0;
01031 }
01032 }
01033
01034
01035 static void ReportCodeKindStatistics() {
01036 const char* table[Code::NUMBER_OF_KINDS];
01037
01038 #define CASE(name) \
01039 case Code::name: table[Code::name] = #name; \
01040 break
01041
01042 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
01043 switch (static_cast<Code::Kind>(i)) {
01044 CASE(FUNCTION);
01045 CASE(STUB);
01046 CASE(BUILTIN);
01047 CASE(LOAD_IC);
01048 CASE(KEYED_LOAD_IC);
01049 CASE(STORE_IC);
01050 CASE(KEYED_STORE_IC);
01051 CASE(CALL_IC);
01052 }
01053 }
01054
01055 #undef CASE
01056
01057 PrintF("\n Code kind histograms: \n");
01058 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
01059 if (code_kind_statistics[i] > 0) {
01060 PrintF(" %-20s: %10d bytes\n", table[i], code_kind_statistics[i]);
01061 }
01062 }
01063 PrintF("\n");
01064 }
01065
01066
01067 static int CollectHistogramInfo(HeapObject* obj) {
01068 InstanceType type = obj->map()->instance_type();
01069 ASSERT(0 <= type && type <= LAST_TYPE);
01070 ASSERT(heap_histograms[type].name() != NULL);
01071 heap_histograms[type].increment_number(1);
01072 heap_histograms[type].increment_bytes(obj->Size());
01073
01074 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
01075 JSObject::cast(obj)->IncrementSpillStatistics(&js_spill_information);
01076 }
01077
01078 return obj->Size();
01079 }
01080
01081
01082 static void ReportHistogram(bool print_spill) {
01083 PrintF("\n Object Histogram:\n");
01084 for (int i = 0; i <= LAST_TYPE; i++) {
01085 if (heap_histograms[i].number() > 0) {
01086 PrintF(" %-33s%10d (%10d bytes)\n",
01087 heap_histograms[i].name(),
01088 heap_histograms[i].number(),
01089 heap_histograms[i].bytes());
01090 }
01091 }
01092 PrintF("\n");
01093
01094
01095 int string_number = 0;
01096 int string_bytes = 0;
01097 #define INCREMENT(type, size, name) \
01098 string_number += heap_histograms[type].number(); \
01099 string_bytes += heap_histograms[type].bytes();
01100 STRING_TYPE_LIST(INCREMENT)
01101 #undef INCREMENT
01102 if (string_number > 0) {
01103 PrintF(" %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
01104 string_bytes);
01105 }
01106
01107 if (FLAG_collect_heap_spill_statistics && print_spill) {
01108 js_spill_information.Print();
01109 }
01110 }
01111 #endif // DEBUG
01112
01113
01114
01115 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
01116 void NewSpace::ClearHistograms() {
01117 for (int i = 0; i <= LAST_TYPE; i++) {
01118 allocated_histogram_[i].clear();
01119 promoted_histogram_[i].clear();
01120 }
01121 }
01122
01123
01124
01125
01126
01127
01128 void NewSpace::CollectStatistics() {
01129 ClearHistograms();
01130 SemiSpaceIterator it(this);
01131 while (it.has_next()) RecordAllocation(it.next());
01132 }
01133
01134
01135 #ifdef ENABLE_LOGGING_AND_PROFILING
01136 static void DoReportStatistics(HistogramInfo* info, const char* description) {
01137 LOG(HeapSampleBeginEvent("NewSpace", description));
01138
01139 int string_number = 0;
01140 int string_bytes = 0;
01141 #define INCREMENT(type, size, name) \
01142 string_number += info[type].number(); \
01143 string_bytes += info[type].bytes();
01144 STRING_TYPE_LIST(INCREMENT)
01145 #undef INCREMENT
01146 if (string_number > 0) {
01147 LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
01148 }
01149
01150
01151 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
01152 if (info[i].number() > 0) {
01153 LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
01154 info[i].bytes()));
01155 }
01156 }
01157 LOG(HeapSampleEndEvent("NewSpace", description));
01158 }
01159 #endif // ENABLE_LOGGING_AND_PROFILING
01160
01161
01162 void NewSpace::ReportStatistics() {
01163 #ifdef DEBUG
01164 if (FLAG_heap_stats) {
01165 float pct = static_cast<float>(Available()) / Capacity();
01166 PrintF(" capacity: %d, available: %d, %%%d\n",
01167 Capacity(), Available(), static_cast<int>(pct*100));
01168 PrintF("\n Object Histogram:\n");
01169 for (int i = 0; i <= LAST_TYPE; i++) {
01170 if (allocated_histogram_[i].number() > 0) {
01171 PrintF(" %-33s%10d (%10d bytes)\n",
01172 allocated_histogram_[i].name(),
01173 allocated_histogram_[i].number(),
01174 allocated_histogram_[i].bytes());
01175 }
01176 }
01177 PrintF("\n");
01178 }
01179 #endif // DEBUG
01180
01181 #ifdef ENABLE_LOGGING_AND_PROFILING
01182 if (FLAG_log_gc) {
01183 DoReportStatistics(allocated_histogram_, "allocated");
01184 DoReportStatistics(promoted_histogram_, "promoted");
01185 }
01186 #endif // ENABLE_LOGGING_AND_PROFILING
01187 }
01188
01189
01190 void NewSpace::RecordAllocation(HeapObject* obj) {
01191 InstanceType type = obj->map()->instance_type();
01192 ASSERT(0 <= type && type <= LAST_TYPE);
01193 allocated_histogram_[type].increment_number(1);
01194 allocated_histogram_[type].increment_bytes(obj->Size());
01195 }
01196
01197
01198 void NewSpace::RecordPromotion(HeapObject* obj) {
01199 InstanceType type = obj->map()->instance_type();
01200 ASSERT(0 <= type && type <= LAST_TYPE);
01201 promoted_histogram_[type].increment_number(1);
01202 promoted_histogram_[type].increment_bytes(obj->Size());
01203 }
01204 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
01205
01206
01207
01208
01209
01210 void FreeListNode::set_size(int size_in_bytes) {
01211 ASSERT(size_in_bytes > 0);
01212 ASSERT(IsAligned(size_in_bytes, kPointerSize));
01213
01214
01215
01216
01217
01218
01219
01220
01221 if (size_in_bytes > Array::kHeaderSize) {
01222 set_map(Heap::byte_array_map());
01223 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
01224 } else if (size_in_bytes == kPointerSize) {
01225 set_map(Heap::one_word_filler_map());
01226 } else if (size_in_bytes == 2 * kPointerSize) {
01227 set_map(Heap::two_word_filler_map());
01228 } else {
01229 UNREACHABLE();
01230 }
01231 ASSERT(Size() == size_in_bytes);
01232 }
01233
01234
01235 Address FreeListNode::next() {
01236 ASSERT(map() == Heap::byte_array_map());
01237 ASSERT(Size() >= kNextOffset + kPointerSize);
01238 return Memory::Address_at(address() + kNextOffset);
01239 }
01240
01241
01242 void FreeListNode::set_next(Address next) {
01243 ASSERT(map() == Heap::byte_array_map());
01244 ASSERT(Size() >= kNextOffset + kPointerSize);
01245 Memory::Address_at(address() + kNextOffset) = next;
01246 }
01247
01248
01249 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
01250 Reset();
01251 }
01252
01253
01254 void OldSpaceFreeList::Reset() {
01255 available_ = 0;
01256 for (int i = 0; i < kFreeListsLength; i++) {
01257 free_[i].head_node_ = NULL;
01258 }
01259 needs_rebuild_ = false;
01260 finger_ = kHead;
01261 free_[kHead].next_size_ = kEnd;
01262 }
01263
01264
01265 void OldSpaceFreeList::RebuildSizeList() {
01266 ASSERT(needs_rebuild_);
01267 int cur = kHead;
01268 for (int i = cur + 1; i < kFreeListsLength; i++) {
01269 if (free_[i].head_node_ != NULL) {
01270 free_[cur].next_size_ = i;
01271 cur = i;
01272 }
01273 }
01274 free_[cur].next_size_ = kEnd;
01275 needs_rebuild_ = false;
01276 }
01277
01278
01279 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
01280 #ifdef DEBUG
01281 for (int i = 0; i < size_in_bytes; i += kPointerSize) {
01282 Memory::Address_at(start + i) = kZapValue;
01283 }
01284 #endif
01285 FreeListNode* node = FreeListNode::FromAddress(start);
01286 node->set_size(size_in_bytes);
01287
01288
01289
01290
01291 if (size_in_bytes < kMinBlockSize) {
01292 return size_in_bytes;
01293 }
01294
01295
01296 int index = size_in_bytes >> kPointerSizeLog2;
01297 node->set_next(free_[index].head_node_);
01298 free_[index].head_node_ = node->address();
01299 available_ += size_in_bytes;
01300 needs_rebuild_ = true;
01301 return 0;
01302 }
01303
01304
01305 Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
01306 ASSERT(0 < size_in_bytes);
01307 ASSERT(size_in_bytes <= kMaxBlockSize);
01308 ASSERT(IsAligned(size_in_bytes, kPointerSize));
01309
01310 if (needs_rebuild_) RebuildSizeList();
01311 int index = size_in_bytes >> kPointerSizeLog2;
01312
01313 if (free_[index].head_node_ != NULL) {
01314 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
01315
01316 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
01317 available_ -= size_in_bytes;
01318 *wasted_bytes = 0;
01319 return node;
01320 }
01321
01322 int prev = finger_ < index ? finger_ : kHead;
01323 int cur = FindSize(index, &prev);
01324 ASSERT(index < cur);
01325 if (cur == kEnd) {
01326
01327 *wasted_bytes = 0;
01328 return Failure::RetryAfterGC(size_in_bytes, owner_);
01329 }
01330 int rem = cur - index;
01331 int rem_bytes = rem << kPointerSizeLog2;
01332 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
01333 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
01334 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
01335 size_in_bytes);
01336
01337
01338 if (prev < rem) {
01339
01340 finger_ = prev;
01341 free_[prev].next_size_ = rem;
01342
01343 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
01344 free_[rem].next_size_ = free_[cur].next_size_;
01345 } else {
01346 free_[rem].next_size_ = cur;
01347 }
01348
01349 rem_node->set_size(rem_bytes);
01350 rem_node->set_next(free_[rem].head_node_);
01351 free_[rem].head_node_ = rem_node->address();
01352 } else {
01353
01354 if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
01355 finger_ = prev;
01356 free_[prev].next_size_ = free_[cur].next_size_;
01357 }
01358 if (rem_bytes < kMinBlockSize) {
01359
01360 rem_node->set_size(rem_bytes);
01361 available_ -= size_in_bytes + rem_bytes;
01362 *wasted_bytes = rem_bytes;
01363 return cur_node;
01364 }
01365
01366 rem_node->set_size(rem_bytes);
01367 rem_node->set_next(free_[rem].head_node_);
01368 free_[rem].head_node_ = rem_node->address();
01369 if (rem_node->next() == NULL) InsertSize(rem);
01370 }
01371 available_ -= size_in_bytes;
01372 *wasted_bytes = 0;
01373 return cur_node;
01374 }
01375
01376
01377 #ifdef DEBUG
01378 bool OldSpaceFreeList::Contains(FreeListNode* node) {
01379 for (int i = 0; i < kFreeListsLength; i++) {
01380 Address cur_addr = free_[i].head_node_;
01381 while (cur_addr != NULL) {
01382 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
01383 if (cur_node == node) return true;
01384 cur_addr = cur_node->next();
01385 }
01386 }
01387 return false;
01388 }
01389 #endif
01390
01391
01392 MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
01393 owner_ = owner;
01394 Reset();
01395 }
01396
01397
01398 void MapSpaceFreeList::Reset() {
01399 available_ = 0;
01400 head_ = NULL;
01401 }
01402
01403
01404 void MapSpaceFreeList::Free(Address start) {
01405 #ifdef DEBUG
01406 for (int i = 0; i < Map::kSize; i += kPointerSize) {
01407 Memory::Address_at(start + i) = kZapValue;
01408 }
01409 #endif
01410 FreeListNode* node = FreeListNode::FromAddress(start);
01411 node->set_size(Map::kSize);
01412 node->set_next(head_);
01413 head_ = node->address();
01414 available_ += Map::kSize;
01415 }
01416
01417
01418 Object* MapSpaceFreeList::Allocate() {
01419 if (head_ == NULL) {
01420 return Failure::RetryAfterGC(Map::kSize, owner_);
01421 }
01422
01423 FreeListNode* node = FreeListNode::FromAddress(head_);
01424 head_ = node->next();
01425 available_ -= Map::kSize;
01426 return node;
01427 }
01428
01429
01430
01431
01432
01433 void OldSpace::PrepareForMarkCompact(bool will_compact) {
01434 if (will_compact) {
01435
01436
01437
01438 MCResetRelocationInfo();
01439 mc_end_of_relocation_ = bottom();
01440 ASSERT(Available() == Capacity());
01441 } else {
01442
01443
01444
01445
01446 accounting_stats_.AllocateBytes(free_list_.available());
01447 accounting_stats_.FillWastedBytes(Waste());
01448 }
01449
01450
01451 free_list_.Reset();
01452 }
01453
01454
01455 void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
01456 ASSERT(Contains(address));
01457 Address current_top = mc_end_of_relocation_;
01458 Page* current_page = Page::FromAllocationTop(current_top);
01459
01460
01461 ASSERT(current_top <= current_page->mc_relocation_top);
01462 if (current_top == current_page->mc_relocation_top) {
01463
01464 Page* next_page = current_page->next_page();
01465 CHECK(next_page->is_valid());
01466 mc_end_of_relocation_ = next_page->ObjectAreaStart();
01467 }
01468 ASSERT(mc_end_of_relocation_ == address);
01469 mc_end_of_relocation_ += size_in_bytes;
01470 }
01471
01472
01473 void OldSpace::MCCommitRelocationInfo() {
01474
01475 allocation_info_.top = mc_forwarding_info_.top;
01476 allocation_info_.limit = mc_forwarding_info_.limit;
01477 ASSERT(allocation_info_.VerifyPagedAllocation());
01478
01479
01480
01481 ASSERT(Waste() == 0);
01482 ASSERT(AvailableFree() == 0);
01483
01484
01485 int computed_size = 0;
01486 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
01487 while (it.has_next()) {
01488 Page* p = it.next();
01489
01490 computed_size += p->mc_relocation_top - p->ObjectAreaStart();
01491 if (it.has_next()) {
01492
01493
01494
01495 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
01496 if (extra_size > 0) {
01497 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
01498
01499
01500 accounting_stats_.WasteBytes(wasted_bytes);
01501 }
01502 }
01503 }
01504
01505
01506
01507 ASSERT(computed_size == Size());
01508 }
01509
01510
01511
01512
01513
01514 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
01515
01516
01517
01518
01519 Page* current_page = TopPageOf(allocation_info_);
01520 if (current_page->next_page()->is_valid()) {
01521 return AllocateInNextPage(current_page, size_in_bytes);
01522 }
01523
01524
01525 int wasted_bytes;
01526 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
01527 accounting_stats_.WasteBytes(wasted_bytes);
01528 if (!result->IsFailure()) {
01529 accounting_stats_.AllocateBytes(size_in_bytes);
01530 return HeapObject::cast(result);
01531 }
01532
01533
01534
01535
01536 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
01537 return NULL;
01538 }
01539
01540
01541 ASSERT(!current_page->next_page()->is_valid());
01542 if (Expand(current_page)) {
01543 return AllocateInNextPage(current_page, size_in_bytes);
01544 }
01545
01546
01547 return NULL;
01548 }
01549
01550
01551
01552
01553
01554 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
01555 int size_in_bytes) {
01556 ASSERT(current_page->next_page()->is_valid());
01557
01558 int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
01559 if (free_size > 0) {
01560 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
01561 accounting_stats_.WasteBytes(wasted_bytes);
01562 }
01563 SetAllocationInfo(&allocation_info_, current_page->next_page());
01564 return AllocateLinearly(&allocation_info_, size_in_bytes);
01565 }
01566
01567
01568 #ifdef DEBUG
01569
01570
01571 void OldSpace::Verify() {
01572
01573
01574 ASSERT(allocation_info_.VerifyPagedAllocation());
01575 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
01576 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
01577
01578
01579 bool above_allocation_top = false;
01580 Page* current_page = first_page_;
01581 while (current_page->is_valid()) {
01582 if (above_allocation_top) {
01583
01584 } else {
01585
01586
01587 Address top = current_page->AllocationTop();
01588 if (current_page == top_page) {
01589 ASSERT(top == allocation_info_.top);
01590
01591 above_allocation_top = true;
01592 } else {
01593 ASSERT(top == current_page->ObjectAreaEnd());
01594 }
01595
01596
01597 Address current = current_page->ObjectAreaStart();
01598 while (current < top) {
01599 HeapObject* object = HeapObject::FromAddress(current);
01600
01601
01602
01603 Map* map = object->map();
01604 ASSERT(map->IsMap());
01605 ASSERT(Heap::map_space()->Contains(map));
01606
01607
01608 ASSERT(!object->IsMap());
01609
01610
01611 object->Verify();
01612
01613
01614
01615
01616 VerifyPointersAndRSetVisitor rset_visitor;
01617 VerifyPointersVisitor no_rset_visitor;
01618 int size = object->Size();
01619 if (object->IsCode()) {
01620 Code::cast(object)->ConvertICTargetsFromAddressToObject();
01621 object->IterateBody(map->instance_type(), size, &no_rset_visitor);
01622 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
01623 } else {
01624 object->IterateBody(map->instance_type(), size, &rset_visitor);
01625 }
01626
01627 current += size;
01628 }
01629
01630
01631 ASSERT(current == top);
01632 }
01633
01634 current_page = current_page->next_page();
01635 }
01636 }
01637
01638
01639 struct CommentStatistic {
01640 const char* comment;
01641 int size;
01642 int count;
01643 void Clear() {
01644 comment = NULL;
01645 size = 0;
01646 count = 0;
01647 }
01648 };
01649
01650
01651
01652 const int kMaxComments = 64;
01653 static CommentStatistic comments_statistics[kMaxComments+1];
01654
01655
01656 void PagedSpace::ReportCodeStatistics() {
01657 ReportCodeKindStatistics();
01658 PrintF("Code comment statistics (\" [ comment-txt : size/ "
01659 "count (average)\"):\n");
01660 for (int i = 0; i <= kMaxComments; i++) {
01661 const CommentStatistic& cs = comments_statistics[i];
01662 if (cs.size > 0) {
01663 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
01664 cs.size/cs.count);
01665 }
01666 }
01667 PrintF("\n");
01668 }
01669
01670
01671 void PagedSpace::ResetCodeStatistics() {
01672 ClearCodeKindStatistics();
01673 for (int i = 0; i < kMaxComments; i++) comments_statistics[i].Clear();
01674 comments_statistics[kMaxComments].comment = "Unknown";
01675 comments_statistics[kMaxComments].size = 0;
01676 comments_statistics[kMaxComments].count = 0;
01677 }
01678
01679
01680
01681
01682 static void EnterComment(const char* comment, int delta) {
01683
01684 if (delta <= 0) return;
01685 CommentStatistic* cs = &comments_statistics[kMaxComments];
01686
01687
01688 for (int i = 0; i < kMaxComments; i++) {
01689 if (comments_statistics[i].comment == NULL) {
01690 cs = &comments_statistics[i];
01691 cs->comment = comment;
01692 break;
01693 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
01694 cs = &comments_statistics[i];
01695 break;
01696 }
01697 }
01698
01699 cs->size += delta;
01700 cs->count += 1;
01701 }
01702
01703
01704
01705
01706 static void CollectCommentStatistics(RelocIterator* it) {
01707 ASSERT(!it->done());
01708 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
01709 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
01710 if (tmp[0] != '[') {
01711
01712 return;
01713 }
01714
01715
01716 const char* const comment_txt =
01717 reinterpret_cast<const char*>(it->rinfo()->data());
01718 const byte* prev_pc = it->rinfo()->pc();
01719 int flat_delta = 0;
01720 it->next();
01721 while (true) {
01722
01723
01724 ASSERT(!it->done());
01725 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
01726 const char* const txt =
01727 reinterpret_cast<const char*>(it->rinfo()->data());
01728 flat_delta += it->rinfo()->pc() - prev_pc;
01729 if (txt[0] == ']') break;
01730
01731 CollectCommentStatistics(it);
01732
01733 prev_pc = it->rinfo()->pc();
01734 }
01735 it->next();
01736 }
01737 EnterComment(comment_txt, flat_delta);
01738 }
01739
01740
01741
01742
01743
01744 void PagedSpace::CollectCodeStatistics() {
01745 HeapObjectIterator obj_it(this);
01746 while (obj_it.has_next()) {
01747 HeapObject* obj = obj_it.next();
01748 if (obj->IsCode()) {
01749 Code* code = Code::cast(obj);
01750 code_kind_statistics[code->kind()] += code->Size();
01751 RelocIterator it(code);
01752 int delta = 0;
01753 const byte* prev_pc = code->instruction_start();
01754 while (!it.done()) {
01755 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
01756 delta += it.rinfo()->pc() - prev_pc;
01757 CollectCommentStatistics(&it);
01758 prev_pc = it.rinfo()->pc();
01759 }
01760 it.next();
01761 }
01762
01763 ASSERT(code->instruction_start() <= prev_pc &&
01764 prev_pc <= code->relocation_start());
01765 delta += code->relocation_start() - prev_pc;
01766 EnterComment("NoComment", delta);
01767 }
01768 }
01769 }
01770
01771
01772 void OldSpace::ReportStatistics() {
01773 int pct = Available() * 100 / Capacity();
01774 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
01775 Capacity(), Waste(), Available(), pct);
01776
01777
01778 int rset_marked_pointers = 0;
01779 int rset_marked_arrays = 0;
01780 int rset_marked_array_elements = 0;
01781 int cross_gen_pointers = 0;
01782 int cross_gen_array_elements = 0;
01783
01784 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
01785 while (page_it.has_next()) {
01786 Page* p = page_it.next();
01787
01788 for (Address rset_addr = p->RSetStart();
01789 rset_addr < p->RSetEnd();
01790 rset_addr += kIntSize) {
01791 int rset = Memory::int_at(rset_addr);
01792 if (rset != 0) {
01793
01794 int intoff = rset_addr - p->address();
01795 int bitoff = 0;
01796 for (; bitoff < kBitsPerInt; ++bitoff) {
01797 if ((rset & (1 << bitoff)) != 0) {
01798 int bitpos = intoff*kBitsPerByte + bitoff;
01799 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
01800 Object** obj = reinterpret_cast<Object**>(slot);
01801 if (*obj == Heap::fixed_array_map()) {
01802 rset_marked_arrays++;
01803 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
01804
01805 rset_marked_array_elements += fa->length();
01806
01807 Address elm_start = slot + FixedArray::kHeaderSize;
01808 Address elm_stop = elm_start + fa->length() * kPointerSize;
01809 for (Address elm_addr = elm_start;
01810 elm_addr < elm_stop; elm_addr += kPointerSize) {
01811
01812 Object** elm_p = reinterpret_cast<Object**>(elm_addr);
01813 if (Heap::InNewSpace(*elm_p))
01814 cross_gen_array_elements++;
01815 }
01816 } else {
01817 rset_marked_pointers++;
01818 if (Heap::InNewSpace(*obj))
01819 cross_gen_pointers++;
01820 }
01821 }
01822 }
01823 }
01824 }
01825 }
01826
01827 pct = rset_marked_pointers == 0 ?
01828 0 : cross_gen_pointers * 100 / rset_marked_pointers;
01829 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
01830 rset_marked_pointers, cross_gen_pointers, pct);
01831 PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
01832 PrintF(" elements %d, ", rset_marked_array_elements);
01833 pct = rset_marked_array_elements == 0 ? 0
01834 : cross_gen_array_elements * 100 / rset_marked_array_elements;
01835 PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
01836 PrintF(" total rset-marked bits %d\n",
01837 (rset_marked_pointers + rset_marked_arrays));
01838 pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
01839 : (cross_gen_pointers + cross_gen_array_elements) * 100 /
01840 (rset_marked_pointers + rset_marked_array_elements);
01841 PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
01842 (rset_marked_pointers + rset_marked_array_elements),
01843 (cross_gen_pointers + cross_gen_array_elements),
01844 pct);
01845
01846 ClearHistograms();
01847 HeapObjectIterator obj_it(this);
01848 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
01849 ReportHistogram(true);
01850 }
01851
01852
01853
01854
01855
01856
01857
01858 static void PrintRSetRange(Address start, Address end, Object** object_p,
01859 Address allocation_top) {
01860 Address rset_address = start;
01861
01862
01863
01864 if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {
01865 PrintF(" ");
01866 }
01867
01868
01869 while (rset_address < end) {
01870 uint32_t rset_word = Memory::uint32_at(rset_address);
01871 int bit_position = 0;
01872
01873
01874 while (bit_position < kBitsPerInt) {
01875 if (object_p == reinterpret_cast<Object**>(allocation_top)) {
01876
01877 PrintF("|");
01878 } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
01879
01880 PrintF("#");
01881 } else if ((rset_word & (1 << bit_position)) == 0) {
01882
01883 PrintF(".");
01884 } else if (Heap::InNewSpace(*object_p)) {
01885
01886 PrintF("X");
01887 } else {
01888
01889 PrintF("o");
01890 }
01891
01892
01893 if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
01894 PrintF(" ");
01895 }
01896
01897
01898 bit_position++;
01899 object_p++;
01900 }
01901
01902
01903 if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {
01904 PrintF("\n");
01905 } else {
01906 PrintF(" ");
01907 }
01908
01909
01910 rset_address += kIntSize;
01911 }
01912 }
01913
01914
01915 void PagedSpace::DoPrintRSet(const char* space_name) {
01916 PageIterator it(this, PageIterator::PAGES_IN_USE);
01917 while (it.has_next()) {
01918 Page* p = it.next();
01919 PrintF("%s page 0x%x:\n", space_name, p);
01920 PrintRSetRange(p->RSetStart(), p->RSetEnd(),
01921 reinterpret_cast<Object**>(p->ObjectAreaStart()),
01922 p->AllocationTop());
01923 PrintF("\n");
01924 }
01925 }
01926
01927
01928 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
01929 #endif
01930
01931
01932
01933
01934 void MapSpace::PrepareForMarkCompact(bool will_compact) {
01935 if (will_compact) {
01936
01937 MCResetRelocationInfo();
01938
01939
01940 int page_count = 0;
01941 PageIterator it(this, PageIterator::ALL_PAGES);
01942 while (it.has_next()) {
01943 ASSERT_MAP_PAGE_INDEX(page_count);
01944
01945 Page* p = it.next();
01946 ASSERT(p->mc_page_index == page_count);
01947
01948 page_addresses_[page_count++] = p->address();
01949 }
01950
01951
01952
01953
01954 ASSERT(Available() == Capacity());
01955 } else {
01956
01957
01958
01959
01960 accounting_stats_.AllocateBytes(free_list_.available());
01961 }
01962
01963
01964 free_list_.Reset();
01965 }
01966
01967
01968 void MapSpace::MCCommitRelocationInfo() {
01969
01970 allocation_info_.top = mc_forwarding_info_.top;
01971 allocation_info_.limit = mc_forwarding_info_.limit;
01972 ASSERT(allocation_info_.VerifyPagedAllocation());
01973
01974
01975 ASSERT(Waste() == 0);
01976
01977
01978 int computed_size = 0;
01979 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
01980 while (it.has_next()) {
01981 Page* page = it.next();
01982 Address page_top = page->AllocationTop();
01983 computed_size += page_top - page->ObjectAreaStart();
01984 if (it.has_next()) {
01985 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
01986 }
01987 }
01988
01989
01990
01991 ASSERT(computed_size == Size());
01992 }
01993
01994
01995
01996
01997
01998 HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
01999
02000
02001
02002 Page* current_page = TopPageOf(allocation_info_);
02003 if (current_page->next_page()->is_valid()) {
02004 return AllocateInNextPage(current_page, size_in_bytes);
02005 }
02006
02007
02008
02009
02010 if (size_in_bytes == Map::kSize) {
02011 Object* result = free_list_.Allocate();
02012 if (!result->IsFailure()) {
02013 accounting_stats_.AllocateBytes(size_in_bytes);
02014 return HeapObject::cast(result);
02015 }
02016 }
02017
02018
02019
02020
02021 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
02022 return NULL;
02023 }
02024
02025
02026 ASSERT(!current_page->next_page()->is_valid());
02027 if (Expand(current_page)) {
02028 return AllocateInNextPage(current_page, size_in_bytes);
02029 }
02030
02031
02032 return NULL;
02033 }
02034
02035
02036
02037
02038
02039 HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
02040 int size_in_bytes) {
02041 ASSERT(current_page->next_page()->is_valid());
02042 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
02043 accounting_stats_.WasteBytes(kPageExtra);
02044 SetAllocationInfo(&allocation_info_, current_page->next_page());
02045 return AllocateLinearly(&allocation_info_, size_in_bytes);
02046 }
02047
02048
02049 #ifdef DEBUG
02050
02051
02052 void MapSpace::Verify() {
02053
02054
02055 ASSERT(allocation_info_.VerifyPagedAllocation());
02056 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
02057 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
02058
02059
02060 bool above_allocation_top = false;
02061 Page* current_page = first_page_;
02062 while (current_page->is_valid()) {
02063 if (above_allocation_top) {
02064
02065 } else {
02066
02067
02068
02069 Address top = current_page->AllocationTop();
02070 if (current_page == top_page) {
02071 ASSERT(top == allocation_info_.top);
02072
02073 above_allocation_top = true;
02074 } else {
02075 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
02076 }
02077
02078
02079 Address current = current_page->ObjectAreaStart();
02080 while (current < top) {
02081 HeapObject* object = HeapObject::FromAddress(current);
02082
02083
02084
02085 Map* map = object->map();
02086 ASSERT(map->IsMap());
02087 ASSERT(Heap::map_space()->Contains(map));
02088
02089
02090 ASSERT(object->IsMap() || object->IsByteArray());
02091
02092
02093 object->Verify();
02094
02095
02096
02097 VerifyPointersAndRSetVisitor visitor;
02098 int size = object->Size();
02099 object->IterateBody(map->instance_type(), size, &visitor);
02100
02101 current += size;
02102 }
02103
02104
02105 ASSERT(current == top);
02106 }
02107
02108 current_page = current_page->next_page();
02109 }
02110 }
02111
02112
02113 void MapSpace::ReportStatistics() {
02114 int pct = Available() * 100 / Capacity();
02115 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
02116 Capacity(), Waste(), Available(), pct);
02117
02118
02119 int rset_marked_pointers = 0;
02120 int cross_gen_pointers = 0;
02121
02122 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
02123 while (page_it.has_next()) {
02124 Page* p = page_it.next();
02125
02126 for (Address rset_addr = p->RSetStart();
02127 rset_addr < p->RSetEnd();
02128 rset_addr += kIntSize) {
02129 int rset = Memory::int_at(rset_addr);
02130 if (rset != 0) {
02131
02132 int intoff = rset_addr - p->address();
02133 int bitoff = 0;
02134 for (; bitoff < kBitsPerInt; ++bitoff) {
02135 if ((rset & (1 << bitoff)) != 0) {
02136 int bitpos = intoff*kBitsPerByte + bitoff;
02137 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
02138 Object** obj = reinterpret_cast<Object**>(slot);
02139 rset_marked_pointers++;
02140 if (Heap::InNewSpace(*obj))
02141 cross_gen_pointers++;
02142 }
02143 }
02144 }
02145 }
02146 }
02147
02148 pct = rset_marked_pointers == 0 ?
02149 0 : cross_gen_pointers * 100 / rset_marked_pointers;
02150 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
02151 rset_marked_pointers, cross_gen_pointers, pct);
02152
02153 ClearHistograms();
02154 HeapObjectIterator obj_it(this);
02155 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
02156 ReportHistogram(false);
02157 }
02158
02159
02160 void MapSpace::PrintRSet() { DoPrintRSet("map"); }
02161 #endif
02162
02163
02164
02165
02166
02167 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
02168 current_ = space->first_chunk_;
02169 size_func_ = NULL;
02170 }
02171
02172
02173 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
02174 HeapObjectCallback size_func) {
02175 current_ = space->first_chunk_;
02176 size_func_ = size_func;
02177 }
02178
02179
02180 HeapObject* LargeObjectIterator::next() {
02181 ASSERT(has_next());
02182 HeapObject* object = current_->GetObject();
02183 current_ = current_->next();
02184 return object;
02185 }
02186
02187
02188
02189
02190
02191 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
02192 size_t* chunk_size,
02193 Executability executable) {
02194 size_t requested = ChunkSizeFor(size_in_bytes);
02195 void* mem = MemoryAllocator::AllocateRawMemory(requested,
02196 chunk_size,
02197 executable);
02198 if (mem == NULL) return NULL;
02199 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
02200 if (*chunk_size < requested) {
02201 MemoryAllocator::FreeRawMemory(mem, *chunk_size);
02202 LOG(DeleteEvent("LargeObjectChunk", mem));
02203 return NULL;
02204 }
02205 return reinterpret_cast<LargeObjectChunk*>(mem);
02206 }
02207
02208
02209 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
02210 int os_alignment = OS::AllocateAlignment();
02211 if (os_alignment < Page::kPageSize)
02212 size_in_bytes += (Page::kPageSize - os_alignment);
02213 return size_in_bytes + Page::kObjectStartOffset;
02214 }
02215
02216
02217
02218
02219 LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
02220 : Space(id, NOT_EXECUTABLE),
02221 first_chunk_(NULL),
02222 size_(0),
02223 page_count_(0) {}
02224
02225
02226 bool LargeObjectSpace::Setup() {
02227 first_chunk_ = NULL;
02228 size_ = 0;
02229 page_count_ = 0;
02230 return true;
02231 }
02232
02233
02234 void LargeObjectSpace::TearDown() {
02235 while (first_chunk_ != NULL) {
02236 LargeObjectChunk* chunk = first_chunk_;
02237 first_chunk_ = first_chunk_->next();
02238 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
02239 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
02240 }
02241
02242 size_ = 0;
02243 page_count_ = 0;
02244 }
02245
02246
02247 Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
02248 int object_size,
02249 Executability executable) {
02250 ASSERT(0 < object_size && object_size <= requested_size);
02251
02252
02253
02254 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
02255 return Failure::RetryAfterGC(requested_size, identity());
02256 }
02257
02258 size_t chunk_size;
02259 LargeObjectChunk* chunk =
02260 LargeObjectChunk::New(requested_size, &chunk_size, executable);
02261 if (chunk == NULL) {
02262 return Failure::RetryAfterGC(requested_size, identity());
02263 }
02264
02265 size_ += chunk_size;
02266 page_count_++;
02267 chunk->set_next(first_chunk_);
02268 chunk->set_size(chunk_size);
02269 first_chunk_ = chunk;
02270
02271
02272
02273 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
02274 Address object_address = page->ObjectAreaStart();
02275
02276
02277
02278 ASSERT((chunk_size & 0x1) == 0);
02279 page->is_normal_page &= ~0x1;
02280 page->ClearRSet();
02281 int extra_bytes = requested_size - object_size;
02282 if (extra_bytes > 0) {
02283
02284 memset(object_address + object_size, 0, extra_bytes);
02285 }
02286
02287 return HeapObject::FromAddress(object_address);
02288 }
02289
02290
02291 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
02292 ASSERT(0 < size_in_bytes);
02293 return AllocateRawInternal(size_in_bytes,
02294 size_in_bytes,
02295 EXECUTABLE);
02296 }
02297
02298
02299 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
02300 ASSERT(0 < size_in_bytes);
02301 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
02302 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
02303 size_in_bytes,
02304 NOT_EXECUTABLE);
02305 }
02306
02307
02308 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
02309 ASSERT(0 < size_in_bytes);
02310 return AllocateRawInternal(size_in_bytes,
02311 size_in_bytes,
02312 NOT_EXECUTABLE);
02313 }
02314
02315
02316
02317 Object* LargeObjectSpace::FindObject(Address a) {
02318 for (LargeObjectChunk* chunk = first_chunk_;
02319 chunk != NULL;
02320 chunk = chunk->next()) {
02321 Address chunk_address = chunk->address();
02322 if (chunk_address <= a && a < chunk_address + chunk->size()) {
02323 return chunk->GetObject();
02324 }
02325 }
02326 return Failure::Exception();
02327 }
02328
02329
02330 void LargeObjectSpace::ClearRSet() {
02331 ASSERT(Page::is_rset_in_use());
02332
02333 LargeObjectIterator it(this);
02334 while (it.has_next()) {
02335 HeapObject* object = it.next();
02336
02337
02338 if (object->IsFixedArray()) {
02339
02340 Page* page = Page::FromAddress(object->address());
02341 page->ClearRSet();
02342
02343
02344 int size = object->Size();
02345 int extra_rset_bytes = ExtraRSetBytesFor(size);
02346 memset(object->address() + size, 0, extra_rset_bytes);
02347 }
02348 }
02349 }
02350
02351
02352 void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
02353 ASSERT(Page::is_rset_in_use());
02354
02355 LargeObjectIterator it(this);
02356 while (it.has_next()) {
02357
02358
02359
02360 HeapObject* object = it.next();
02361 if (object->IsFixedArray()) {
02362
02363 Page* page = Page::FromAddress(object->address());
02364 Address object_end = object->address() + object->Size();
02365 Heap::IterateRSetRange(page->ObjectAreaStart(),
02366 Min(page->ObjectAreaEnd(), object_end),
02367 page->RSetStart(),
02368 copy_object_func);
02369
02370
02371 if (object_end > page->ObjectAreaEnd()) {
02372 Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
02373 object_end, copy_object_func);
02374 }
02375 }
02376 }
02377 }
02378
02379
02380 void LargeObjectSpace::FreeUnmarkedObjects() {
02381 LargeObjectChunk* previous = NULL;
02382 LargeObjectChunk* current = first_chunk_;
02383 while (current != NULL) {
02384 HeapObject* object = current->GetObject();
02385 if (object->IsMarked()) {
02386 object->ClearMark();
02387 MarkCompactCollector::tracer()->decrement_marked_count();
02388 previous = current;
02389 current = current->next();
02390 } else {
02391 Address chunk_address = current->address();
02392 size_t chunk_size = current->size();
02393
02394
02395 current = current->next();
02396 if (previous == NULL) {
02397 first_chunk_ = current;
02398 } else {
02399 previous->set_next(current);
02400 }
02401
02402
02403 if (object->IsCode()) {
02404 LOG(CodeDeleteEvent(object->address()));
02405 }
02406 size_ -= chunk_size;
02407 page_count_--;
02408 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
02409 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
02410 }
02411 }
02412 }
02413
02414
02415 bool LargeObjectSpace::Contains(HeapObject* object) {
02416 Address address = object->address();
02417 Page* page = Page::FromAddress(address);
02418
02419 SLOW_ASSERT(!page->IsLargeObjectPage()
02420 || !FindObject(address)->IsFailure());
02421
02422 return page->IsLargeObjectPage();
02423 }
02424
02425
02426 #ifdef DEBUG
02427
02428
02429 void LargeObjectSpace::Verify() {
02430 for (LargeObjectChunk* chunk = first_chunk_;
02431 chunk != NULL;
02432 chunk = chunk->next()) {
02433
02434
02435 HeapObject* object = chunk->GetObject();
02436 Page* page = Page::FromAddress(object->address());
02437 ASSERT(object->address() == page->ObjectAreaStart());
02438
02439
02440
02441 Map* map = object->map();
02442 ASSERT(map->IsMap());
02443 ASSERT(Heap::map_space()->Contains(map));
02444
02445
02446
02447 ASSERT(object->IsCode() || object->IsSeqString()
02448 || object->IsFixedArray() || object->IsByteArray());
02449
02450
02451 object->Verify();
02452
02453
02454 if (object->IsCode()) {
02455 VerifyPointersVisitor code_visitor;
02456 Code::cast(object)->ConvertICTargetsFromAddressToObject();
02457 object->IterateBody(map->instance_type(),
02458 object->Size(),
02459 &code_visitor);
02460 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
02461 } else if (object->IsFixedArray()) {
02462
02463
02464
02465 FixedArray* array = FixedArray::cast(object);
02466 for (int j = 0; j < array->length(); j++) {
02467 Object* element = array->get(j);
02468 if (element->IsHeapObject()) {
02469 HeapObject* element_object = HeapObject::cast(element);
02470 ASSERT(Heap::Contains(element_object));
02471 ASSERT(element_object->map()->IsMap());
02472 if (Heap::InNewSpace(element_object)) {
02473 ASSERT(Page::IsRSetSet(object->address(),
02474 FixedArray::kHeaderSize + j * kPointerSize));
02475 }
02476 }
02477 }
02478 }
02479 }
02480 }
02481
02482
02483 void LargeObjectSpace::Print() {
02484 LargeObjectIterator it(this);
02485 while (it.has_next()) {
02486 it.next()->Print();
02487 }
02488 }
02489
02490
02491 void LargeObjectSpace::ReportStatistics() {
02492 PrintF(" size: %d\n", size_);
02493 int num_objects = 0;
02494 ClearHistograms();
02495 LargeObjectIterator it(this);
02496 while (it.has_next()) {
02497 num_objects++;
02498 CollectHistogramInfo(it.next());
02499 }
02500
02501 PrintF(" number of objects %d\n", num_objects);
02502 if (num_objects > 0) ReportHistogram(false);
02503 }
02504
02505
02506 void LargeObjectSpace::CollectCodeStatistics() {
02507 LargeObjectIterator obj_it(this);
02508 while (obj_it.has_next()) {
02509 HeapObject* obj = obj_it.next();
02510 if (obj->IsCode()) {
02511 Code* code = Code::cast(obj);
02512 code_kind_statistics[code->kind()] += code->Size();
02513 }
02514 }
02515 }
02516
02517
02518 void LargeObjectSpace::PrintRSet() {
02519 LargeObjectIterator it(this);
02520 while (it.has_next()) {
02521 HeapObject* object = it.next();
02522 if (object->IsFixedArray()) {
02523 Page* page = Page::FromAddress(object->address());
02524
02525 Address allocation_top = object->address() + object->Size();
02526 PrintF("large page 0x%x:\n", page);
02527 PrintRSetRange(page->RSetStart(), page->RSetEnd(),
02528 reinterpret_cast<Object**>(object->address()),
02529 allocation_top);
02530 int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
02531 int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
02532 kBitsPerInt);
02533 PrintF("------------------------------------------------------------"
02534 "-----------\n");
02535 PrintRSetRange(allocation_top,
02536 allocation_top + extra_rset_bits / kBitsPerByte,
02537 reinterpret_cast<Object**>(object->address()
02538 + Page::kObjectAreaSize),
02539 allocation_top);
02540 PrintF("\n");
02541 }
02542 }
02543 }
02544 #endif // DEBUG
02545
02546 } }