説明を見る。00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 #ifndef V8_SPACES_INL_H_
00029 #define V8_SPACES_INL_H_
00030
00031 #include "memory.h"
00032 #include "spaces.h"
00033
00034 namespace v8 { namespace internal {
00035
00036
00037
00038
00039
00040 bool HeapObjectIterator::has_next() {
00041 if (cur_addr_ < cur_limit_) {
00042 return true;
00043 }
00044 ASSERT(cur_addr_ == cur_limit_);
00045 return HasNextInNextPage();
00046 }
00047
00048
00049 HeapObject* HeapObjectIterator::next() {
00050 ASSERT(has_next());
00051
00052 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
00053 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
00054 ASSERT_OBJECT_SIZE(obj_size);
00055
00056 cur_addr_ += obj_size;
00057 ASSERT(cur_addr_ <= cur_limit_);
00058
00059 return obj;
00060 }
00061
00062
00063
00064
00065
00066 bool PageIterator::has_next() {
00067 return cur_page_ != stop_page_;
00068 }
00069
00070
00071 Page* PageIterator::next() {
00072 ASSERT(has_next());
00073 Page* result = cur_page_;
00074 cur_page_ = cur_page_->next_page();
00075 return result;
00076 }
00077
00078
00079
00080
00081
00082 Page* Page::next_page() {
00083 return MemoryAllocator::GetNextPage(this);
00084 }
00085
00086
00087 Address Page::AllocationTop() {
00088 PagedSpace* owner = MemoryAllocator::PageOwner(this);
00089 return owner->PageAllocationTop(this);
00090 }
00091
00092
00093 void Page::ClearRSet() {
00094
00095 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
00096 }
00097
00098
00099
00100
00101
00102
00103
00104 Address Page::ComputeRSetBitPosition(Address address, int offset,
00105 uint32_t* bitmask) {
00106 ASSERT(Page::is_rset_in_use());
00107
00108 Page* page = Page::FromAddress(address);
00109 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
00110 kObjectAlignmentBits);
00111 *bitmask = 1 << (bit_offset % kBitsPerInt);
00112
00113 Address rset_address =
00114 page->address() + (bit_offset / kBitsPerInt) * kIntSize;
00115
00116
00117 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
00118 || page->IsLargeObjectPage());
00119
00120 if (rset_address >= page->RSetEnd()) {
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130 ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
00131 rset_address +=
00132 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
00133 + Array::kLengthOffset));
00134 }
00135 return rset_address;
00136 }
00137
00138
00139 void Page::SetRSet(Address address, int offset) {
00140 uint32_t bitmask = 0;
00141 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
00142 Memory::uint32_at(rset_address) |= bitmask;
00143
00144 ASSERT(IsRSetSet(address, offset));
00145 }
00146
00147
00148
00149 void Page::UnsetRSet(Address address, int offset) {
00150 uint32_t bitmask = 0;
00151 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
00152 Memory::uint32_at(rset_address) &= ~bitmask;
00153
00154 ASSERT(!IsRSetSet(address, offset));
00155 }
00156
00157
00158 bool Page::IsRSetSet(Address address, int offset) {
00159 uint32_t bitmask = 0;
00160 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
00161 return (Memory::uint32_at(rset_address) & bitmask) != 0;
00162 }
00163
00164
00165
00166
00167
00168 bool MemoryAllocator::IsValidChunk(int chunk_id) {
00169 if (!IsValidChunkId(chunk_id)) return false;
00170
00171 ChunkInfo& c = chunks_[chunk_id];
00172 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
00173 }
00174
00175
00176 bool MemoryAllocator::IsValidChunkId(int chunk_id) {
00177 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
00178 }
00179
00180
00181 bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
00182 ASSERT(p->is_valid());
00183
00184 int chunk_id = GetChunkId(p);
00185 if (!IsValidChunkId(chunk_id)) return false;
00186
00187 ChunkInfo& c = chunks_[chunk_id];
00188 return (c.address() <= p->address()) &&
00189 (p->address() < c.address() + c.size()) &&
00190 (space == c.owner());
00191 }
00192
00193
00194 Page* MemoryAllocator::GetNextPage(Page* p) {
00195 ASSERT(p->is_valid());
00196 int raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
00197 return Page::FromAddress(AddressFrom<Address>(raw_addr));
00198 }
00199
00200
00201 int MemoryAllocator::GetChunkId(Page* p) {
00202 ASSERT(p->is_valid());
00203 return p->opaque_header & Page::kPageAlignmentMask;
00204 }
00205
00206
00207 void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
00208 ASSERT(prev->is_valid());
00209 int chunk_id = prev->opaque_header & Page::kPageAlignmentMask;
00210 ASSERT_PAGE_ALIGNED(next->address());
00211 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
00212 }
00213
00214
00215 PagedSpace* MemoryAllocator::PageOwner(Page* page) {
00216 int chunk_id = GetChunkId(page);
00217 ASSERT(IsValidChunk(chunk_id));
00218 return chunks_[chunk_id].owner();
00219 }
00220
00221
00222
00223
00224
00225 bool PagedSpace::Contains(Address addr) {
00226 Page* p = Page::FromAddress(addr);
00227 ASSERT(p->is_valid());
00228
00229 return MemoryAllocator::IsPageInSpace(p, this);
00230 }
00231
00232
00233
00234
00235
00236
00237 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
00238 int size_in_bytes) {
00239 Address current_top = alloc_info->top;
00240 Address new_top = current_top + size_in_bytes;
00241 if (new_top > alloc_info->limit) return NULL;
00242
00243 alloc_info->top = new_top;
00244 ASSERT(alloc_info->VerifyPagedAllocation());
00245 accounting_stats_.AllocateBytes(size_in_bytes);
00246 return HeapObject::FromAddress(current_top);
00247 }
00248
00249
00250
00251 Object* PagedSpace::AllocateRaw(int size_in_bytes) {
00252 ASSERT(HasBeenSetup());
00253 ASSERT_OBJECT_SIZE(size_in_bytes);
00254 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
00255 if (object != NULL) return object;
00256
00257 object = SlowAllocateRaw(size_in_bytes);
00258 if (object != NULL) return object;
00259
00260 return Failure::RetryAfterGC(size_in_bytes, identity());
00261 }
00262
00263
00264
00265 Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
00266 ASSERT(HasBeenSetup());
00267 ASSERT_OBJECT_SIZE(size_in_bytes);
00268 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
00269 if (object != NULL) return object;
00270
00271 object = SlowMCAllocateRaw(size_in_bytes);
00272 if (object != NULL) return object;
00273
00274 return Failure::RetryAfterGC(size_in_bytes, identity());
00275 }
00276
00277
00278
00279
00280
00281 HeapObject* LargeObjectChunk::GetObject() {
00282
00283
00284 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
00285 return HeapObject::FromAddress(page->ObjectAreaStart());
00286 }
00287
00288
00289
00290
00291
00292 int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
00293 int extra_rset_bits =
00294 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
00295 kBitsPerInt);
00296 return extra_rset_bits / kBitsPerByte;
00297 }
00298
00299
00300 Object* NewSpace::AllocateRawInternal(int size_in_bytes,
00301 AllocationInfo* alloc_info) {
00302 Address new_top = alloc_info->top + size_in_bytes;
00303 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
00304
00305 Object* obj = HeapObject::FromAddress(alloc_info->top);
00306 alloc_info->top = new_top;
00307 #ifdef DEBUG
00308 SemiSpace* space =
00309 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
00310 ASSERT(space->low() <= alloc_info->top
00311 && alloc_info->top <= space->high()
00312 && alloc_info->limit == space->high());
00313 #endif
00314 return obj;
00315 }
00316
00317 } }
00318
00319 #endif // V8_SPACES_INL_H_