diff --git a/patches/v8/.patches b/patches/v8/.patches index 1aed70a00fa61..5851ea6875f3f 100644 --- a/patches/v8/.patches +++ b/patches/v8/.patches @@ -14,3 +14,4 @@ merged_regexp_reserve_space_for_all_registers_in_interpreter.patch cherry-pick-d4ddf645c3ca.patch turn_some_dchecks_into_checks_in_schedule_methods.patch debugger_allow_termination-on-resume_when_paused_at_a_breakpoint.patch +backport_1084820.patch diff --git a/patches/v8/backport_1084820.patch b/patches/v8/backport_1084820.patch new file mode 100644 index 0000000000000..25ebcd2e2481d --- /dev/null +++ b/patches/v8/backport_1084820.patch @@ -0,0 +1,771 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Cheng Zhao +Date: Thu, 4 Oct 2018 14:57:02 -0700 +Subject: fix: object materialization + +[1084820] [High] [CVE-2020-6512]: DCHECK failure in value.IsHeapObject() in objects +debug.cc +Backport https://chromium.googlesource.com/v8/v8.git/+/c16f62d6e943756d8b4170a36f61e666ced82e6d + +diff --git a/src/deoptimizer/deoptimizer.cc b/src/deoptimizer/deoptimizer.cc +index 5655d045c0bca8166aac6176bd6468e55ff0decc..ce6e5ff214313c4298322b74f909631a5ba2ccdc 100644 +--- a/src/deoptimizer/deoptimizer.cc ++++ b/src/deoptimizer/deoptimizer.cc +@@ -47,7 +47,6 @@ class FrameWriter { + + void PushRawValue(intptr_t value, const char* debug_hint) { + PushValue(value); +- + if (trace_scope_ != nullptr) { + DebugPrintOutputValue(value, debug_hint); + } +@@ -82,13 +81,10 @@ class FrameWriter { + void PushTranslatedValue(const TranslatedFrame::iterator& iterator, + const char* debug_hint = "") { + Object obj = iterator->GetRawValue(); +- + PushRawObject(obj, debug_hint); +- + if (trace_scope_) { + PrintF(trace_scope_->file(), " (input #%d)\n", iterator.input_index()); + } +- + deoptimizer_->QueueValueForMaterialization(output_address(top_offset_), obj, + iterator); + } +@@ -2394,6 +2390,11 @@ int TranslatedValue::object_index() const { + Object TranslatedValue::GetRawValue() const { + // If we have a value, return it. + if (materialization_state() == kFinished) { ++ int smi; ++ if (storage_->IsHeapNumber() && ++ DoubleToSmiInteger(storage_->Number(), &smi)) { ++ return Smi::FromInt(smi); ++ } + return *storage_; + } + +@@ -2436,6 +2437,22 @@ Object TranslatedValue::GetRawValue() const { + } + } + ++ case kFloat: { ++ int smi; ++ if (DoubleToSmiInteger(float_value().get_scalar(), &smi)) { ++ return Smi::FromInt(smi); ++ } ++ break; ++ } ++ ++ case kDouble: { ++ int smi; ++ if (DoubleToSmiInteger(double_value().get_scalar(), &smi)) { ++ return Smi::FromInt(smi); ++ } ++ break; ++ } ++ + default: + break; + } +@@ -2445,106 +2462,76 @@ Object TranslatedValue::GetRawValue() const { + return ReadOnlyRoots(isolate()).arguments_marker(); + } + +-void TranslatedValue::set_initialized_storage(Handle storage) { ++void TranslatedValue::set_initialized_storage(Handle storage) { + DCHECK_EQ(kUninitialized, materialization_state()); + storage_ = storage; + materialization_state_ = kFinished; + } + + Handle TranslatedValue::GetValue() { +- // If we already have a value, then get it. +- if (materialization_state() == kFinished) return storage_; +- +- // Otherwise we have to materialize. +- switch (kind()) { +- case TranslatedValue::kTagged: +- case TranslatedValue::kInt32: +- case TranslatedValue::kInt64: +- case TranslatedValue::kUInt32: +- case TranslatedValue::kBoolBit: +- case TranslatedValue::kFloat: +- case TranslatedValue::kDouble: { +- MaterializeSimple(); +- return storage_; +- } +- +- case TranslatedValue::kCapturedObject: +- case TranslatedValue::kDuplicatedObject: { +- // We need to materialize the object (or possibly even object graphs). +- // To make the object verifier happy, we materialize in two steps. +- +- // 1. Allocate storage for reachable objects. This makes sure that for +- // each object we have allocated space on heap. The space will be +- // a byte array that will be later initialized, or a fully +- // initialized object if it is safe to allocate one that will +- // pass the verifier. +- container_->EnsureObjectAllocatedAt(this); +- +- // 2. Initialize the objects. If we have allocated only byte arrays +- // for some objects, we now overwrite the byte arrays with the +- // correct object fields. Note that this phase does not allocate +- // any new objects, so it does not trigger the object verifier. +- return container_->InitializeObjectAt(this); +- } +- +- case TranslatedValue::kInvalid: +- FATAL("unexpected case"); +- return Handle::null(); ++ Handle value(GetRawValue(), isolate()); ++ if (materialization_state() == kFinished) return value; ++ ++ if (value->IsSmi()) { ++ // Even though stored as a Smi, this number might instead be needed as a ++ // HeapNumber when materializing a JSObject with a field of HeapObject ++ // representation. Since we don't have this information available here, we ++ // just always allocate a HeapNumber and later extract the Smi again if we ++ // don't need a HeapObject. ++ set_initialized_storage( ++ isolate()->factory()->NewHeapNumber(value->Number())); ++ return value; + } + +- FATAL("internal error: value missing"); +- return Handle::null(); +-} +- +-void TranslatedValue::MaterializeSimple() { +- // If we already have materialized, return. +- if (materialization_state() == kFinished) return; +- +- Object raw_value = GetRawValue(); +- if (raw_value != ReadOnlyRoots(isolate()).arguments_marker()) { +- // We can get the value without allocation, just return it here. +- set_initialized_storage(Handle(raw_value, isolate())); +- return; ++ if (*value != ReadOnlyRoots(isolate()).arguments_marker()) { ++ set_initialized_storage(Handle::cast(value)); ++ return storage_; + } + +- switch (kind()) { +- case kInt32: +- set_initialized_storage( +- Handle(isolate()->factory()->NewNumber(int32_value()))); +- return; ++ // Otherwise we have to materialize. + +- case kInt64: +- set_initialized_storage(Handle( +- isolate()->factory()->NewNumber(static_cast(int64_value())))); +- return; ++ if (kind() == TranslatedValue::kCapturedObject || ++ kind() == TranslatedValue::kDuplicatedObject) { ++ // We need to materialize the object (or possibly even object graphs). ++ // To make the object verifier happy, we materialize in two steps. + +- case kUInt32: +- set_initialized_storage( +- Handle(isolate()->factory()->NewNumber(uint32_value()))); +- return; ++ // 1. Allocate storage for reachable objects. This makes sure that for ++ // each object we have allocated space on heap. The space will be ++ // a byte array that will be later initialized, or a fully ++ // initialized object if it is safe to allocate one that will ++ // pass the verifier. ++ container_->EnsureObjectAllocatedAt(this); + +- case kFloat: { +- double scalar_value = float_value().get_scalar(); +- set_initialized_storage( +- Handle(isolate()->factory()->NewNumber(scalar_value))); +- return; +- } +- +- case kDouble: { +- double scalar_value = double_value().get_scalar(); +- set_initialized_storage( +- Handle(isolate()->factory()->NewNumber(scalar_value))); +- return; +- } ++ // 2. Initialize the objects. If we have allocated only byte arrays ++ // for some objects, we now overwrite the byte arrays with the ++ // correct object fields. Note that this phase does not allocate ++ // any new objects, so it does not trigger the object verifier. ++ return container_->InitializeObjectAt(this); ++ } + +- case kCapturedObject: +- case kDuplicatedObject: +- case kInvalid: +- case kTagged: +- case kBoolBit: +- FATAL("internal error: unexpected materialization."); ++ double number; ++ switch (kind()) { ++ case TranslatedValue::kInt32: ++ number = int32_value(); ++ break; ++ case TranslatedValue::kInt64: ++ number = int64_value(); ++ break; ++ case TranslatedValue::kUInt32: ++ number = uint32_value(); ++ break; ++ case TranslatedValue::kFloat: ++ number = float_value().get_scalar(); + break; ++ case TranslatedValue::kDouble: ++ number = double_value().get_scalar(); ++ break; ++ default: ++ UNREACHABLE(); + } ++ DCHECK(!IsSmiDouble(number)); ++ set_initialized_storage(isolate()->factory()->NewHeapNumber(number)); ++ return storage_; + } + + bool TranslatedValue::IsMaterializedObject() const { +@@ -2600,8 +2587,9 @@ Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) { + } + + void TranslatedValue::Handlify() { +- if (kind() == kTagged) { +- set_initialized_storage(Handle(raw_literal(), isolate())); ++ if (kind() == kTagged && raw_literal().IsHeapObject()) { ++ set_initialized_storage( ++ Handle(HeapObject::cast(raw_literal()), isolate())); + raw_literal_ = Object(); + } + } +@@ -3352,7 +3340,7 @@ TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) { + return &(frames_[pos.frame_index_].values_[pos.value_index_]); + } + +-Handle TranslatedState::InitializeObjectAt(TranslatedValue* slot) { ++Handle TranslatedState::InitializeObjectAt(TranslatedValue* slot) { + slot = ResolveCapturedObject(slot); + + DisallowHeapAllocation no_allocation; +@@ -3367,7 +3355,7 @@ Handle TranslatedState::InitializeObjectAt(TranslatedValue* slot) { + InitializeCapturedObjectAt(index, &worklist, no_allocation); + } + } +- return slot->GetStorage(); ++ return slot->storage(); + } + + void TranslatedState::InitializeCapturedObjectAt( +@@ -3467,11 +3455,17 @@ void TranslatedState::EnsureObjectAllocatedAt(TranslatedValue* slot) { + } + } + ++int TranslatedValue::GetSmiValue() const { ++ Object value = GetRawValue(); ++ CHECK(value.IsSmi()); ++ return Smi::cast(value).value(); ++} ++ + void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame, + int* value_index, + TranslatedValue* slot, + Handle map) { +- int length = Smi::cast(frame->values_[*value_index].GetRawValue()).value(); ++ int length = frame->values_[*value_index].GetSmiValue(); + (*value_index)++; + Handle array = Handle::cast( + isolate()->factory()->NewFixedDoubleArray(length)); +@@ -3505,10 +3499,10 @@ void TranslatedState::MaterializeHeapNumber(TranslatedFrame* frame, + + namespace { + +-enum DoubleStorageKind : uint8_t { ++enum StorageKind : uint8_t { + kStoreTagged, + kStoreUnboxedDouble, +- kStoreMutableHeapNumber, ++ kStoreHeapObject + }; + + } // namespace +@@ -3580,9 +3574,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt( + case SIMPLE_NUMBER_DICTIONARY_TYPE: + case STRING_TABLE_TYPE: { + // Check we have the right size. +- int array_length = +- Smi::cast(frame->values_[value_index].GetRawValue()).value(); +- ++ int array_length = frame->values_[value_index].GetSmiValue(); + int instance_size = FixedArray::SizeFor(array_length); + CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); + +@@ -3601,13 +3593,13 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt( + + case PROPERTY_ARRAY_TYPE: { + // Check we have the right size. +- int length_or_hash = +- Smi::cast(frame->values_[value_index].GetRawValue()).value(); ++ int length_or_hash = frame->values_[value_index].GetSmiValue(); + int array_length = PropertyArray::LengthField::decode(length_or_hash); + int instance_size = PropertyArray::SizeFor(array_length); + CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize); + + slot->set_storage(AllocateStorageFor(slot)); ++ + // Make sure all the remaining children (after the map) are allocated. + return EnsureChildrenAllocated(slot->GetChildrenCount() - 1, frame, + &value_index, worklist); +@@ -3652,7 +3644,7 @@ void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame, + } else { + // Make sure the simple values (heap numbers, etc.) are properly + // initialized. +- child_slot->MaterializeSimple(); ++ child_slot->GetValue(); + } + SkipSlots(1, frame, value_index); + } +@@ -3667,16 +3659,17 @@ void TranslatedState::EnsurePropertiesAllocatedAndMarked( + properties_slot->mark_allocated(); + properties_slot->set_storage(object_storage); + +- // Set markers for the double properties. ++ // Set markers for out-of-object properties. + Handle descriptors(map->instance_descriptors(), isolate()); + for (InternalIndex i : map->IterateOwnDescriptors()) { + FieldIndex index = FieldIndex::ForDescriptor(*map, i); +- if (descriptors->GetDetails(i).representation().IsDouble() && +- !index.is_inobject()) { ++ Representation representation = descriptors->GetDetails(i).representation(); ++ if (!index.is_inobject() && ++ (representation.IsDouble() || representation.IsHeapObject())) { + CHECK(!map->IsUnboxedDoubleField(index)); + int outobject_index = index.outobject_array_index(); + int array_index = outobject_index * kTaggedSize; +- object_storage->set(array_index, kStoreMutableHeapNumber); ++ object_storage->set(array_index, kStoreHeapObject); + } + } + } +@@ -3702,31 +3695,44 @@ void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot, + // Now we handle the interesting (JSObject) case. + Handle descriptors(map->instance_descriptors(), isolate()); + +- // Set markers for the double properties. ++ // Set markers for in-object properties. + for (InternalIndex i : map->IterateOwnDescriptors()) { + FieldIndex index = FieldIndex::ForDescriptor(*map, i); +- if (descriptors->GetDetails(i).representation().IsDouble() && +- index.is_inobject()) { ++ Representation representation = descriptors->GetDetails(i).representation(); ++ if (index.is_inobject() && ++ (representation.IsDouble() || representation.IsHeapObject())) { + CHECK_GE(index.index(), FixedArray::kHeaderSize / kTaggedSize); + int array_index = index.index() * kTaggedSize - FixedArray::kHeaderSize; +- uint8_t marker = map->IsUnboxedDoubleField(index) +- ? kStoreUnboxedDouble +- : kStoreMutableHeapNumber; ++ uint8_t marker = map->IsUnboxedDoubleField(index) ? kStoreUnboxedDouble ++ : kStoreHeapObject; + object_storage->set(array_index, marker); + } + } + slot->set_storage(object_storage); + } + +-Handle TranslatedState::GetValueAndAdvance(TranslatedFrame* frame, +- int* value_index) { +- TranslatedValue* slot = frame->ValueAt(*value_index); +- SkipSlots(1, frame, value_index); ++TranslatedValue* TranslatedState::GetResolvedSlot(TranslatedFrame* frame, ++ int value_index) { ++ TranslatedValue* slot = frame->ValueAt(value_index); + if (slot->kind() == TranslatedValue::kDuplicatedObject) { + slot = ResolveCapturedObject(slot); + } +- CHECK_NE(TranslatedValue::kUninitialized, slot->materialization_state()); +- return slot->GetStorage(); ++ CHECK_NE(slot->materialization_state(), TranslatedValue::kUninitialized); ++ return slot; ++} ++ ++TranslatedValue* TranslatedState::GetResolvedSlotAndAdvance( ++ TranslatedFrame* frame, int* value_index) { ++ TranslatedValue* slot = GetResolvedSlot(frame, *value_index); ++ SkipSlots(1, frame, value_index); ++ return slot; ++} ++ ++Handle TranslatedState::GetValueAndAdvance(TranslatedFrame* frame, ++ int* value_index) { ++ TranslatedValue* slot = GetResolvedSlot(frame, *value_index); ++ SkipSlots(1, frame, value_index); ++ return slot->GetValue(); + } + + void TranslatedState::InitializeJSObjectAt( +@@ -3754,29 +3760,25 @@ void TranslatedState::InitializeJSObjectAt( + // marker to see if we store an unboxed double. + DCHECK_EQ(kTaggedSize, JSObject::kPropertiesOrHashOffset); + for (int i = 2; i < slot->GetChildrenCount(); i++) { +- // Initialize and extract the value from its slot. +- Handle field_value = GetValueAndAdvance(frame, value_index); +- ++ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index); + // Read out the marker and ensure the field is consistent with + // what the markers in the storage say (note that all heap numbers + // should be fully initialized by now). + int offset = i * kTaggedSize; + uint8_t marker = object_storage->ReadField(offset); + if (marker == kStoreUnboxedDouble) { +- double double_field_value; +- if (field_value->IsSmi()) { +- double_field_value = Smi::cast(*field_value).value(); +- } else { +- CHECK(field_value->IsHeapNumber()); +- double_field_value = HeapNumber::cast(*field_value).value(); +- } +- object_storage->WriteField(offset, double_field_value); +- } else if (marker == kStoreMutableHeapNumber) { ++ Handle field_value = slot->storage(); + CHECK(field_value->IsHeapNumber()); ++ object_storage->WriteField(offset, field_value->Number()); ++ } else if (marker == kStoreHeapObject) { ++ Handle field_value = slot->storage(); + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } else { + CHECK_EQ(kStoreTagged, marker); ++ Handle field_value = slot->GetValue(); ++ DCHECK_IMPLIES(field_value->IsHeapNumber(), ++ !IsSmiDouble(field_value->Number())); + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } +@@ -3802,15 +3804,18 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt( + + // Write the fields to the object. + for (int i = 1; i < slot->GetChildrenCount(); i++) { +- Handle field_value = GetValueAndAdvance(frame, value_index); ++ TranslatedValue* slot = GetResolvedSlotAndAdvance(frame, value_index); + int offset = i * kTaggedSize; + uint8_t marker = object_storage->ReadField(offset); +- if (i > 1 && marker == kStoreMutableHeapNumber) { +- CHECK(field_value->IsHeapNumber()); ++ Handle field_value; ++ if (i > 1 && marker == kStoreHeapObject) { ++ field_value = slot->storage(); + } else { + CHECK(marker == kStoreTagged || i == 1); ++ field_value = slot->GetValue(); ++ DCHECK_IMPLIES(field_value->IsHeapNumber(), ++ !IsSmiDouble(field_value->Number())); + } +- + WRITE_FIELD(*object_storage, offset, *field_value); + WRITE_BARRIER(*object_storage, offset, *field_value); + } +@@ -3877,10 +3882,7 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex( + // argument (the receiver). + static constexpr int kTheContext = 1; + const int height = frames_[i].height() + kTheContext; +- Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue(); +- CHECK(argc_object.IsSmi()); +- *args_count = Smi::ToInt(argc_object); +- ++ *args_count = frames_[i].ValueAt(height - 1)->GetSmiValue(); + DCHECK_EQ(*args_count, 1); + } else { + *args_count = InternalFormalParameterCountWithReceiver( +@@ -3922,21 +3924,30 @@ void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) { + + CHECK(value_info->IsMaterializedObject()); + +- // Skip duplicate objects (i.e., those that point to some +- // other object id). ++ // Skip duplicate objects (i.e., those that point to some other object id). + if (value_info->object_index() != i) continue; + ++ Handle previous_value(previously_materialized_objects->get(i), ++ isolate_); + Handle value(value_info->GetRawValue(), isolate_); + +- if (!value.is_identical_to(marker)) { +- if (previously_materialized_objects->get(i) == *marker) { ++ if (value.is_identical_to(marker)) { ++ DCHECK_EQ(*previous_value, *marker); ++ } else { ++ if (*previous_value == *marker) { ++ if (value->IsSmi()) { ++ value = isolate()->factory()->NewHeapNumber(value->Number()); ++ } + previously_materialized_objects->set(i, *value); + value_changed = true; + } else { +- CHECK(previously_materialized_objects->get(i) == *value); ++ CHECK(*previous_value == *value || ++ (previous_value->IsHeapNumber() && value->IsSmi() && ++ previous_value->Number() == value->Number())); + } + } + } ++ + if (new_store && value_changed) { + materialized_store->Set(stack_frame_pointer_, + previously_materialized_objects); +@@ -3970,8 +3981,10 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() { + CHECK(value_info->IsMaterializedObject()); + + if (value_info->kind() == TranslatedValue::kCapturedObject) { +- value_info->set_initialized_storage( +- Handle(previously_materialized_objects->get(i), isolate_)); ++ Handle object(previously_materialized_objects->get(i), ++ isolate_); ++ CHECK(object->IsHeapObject()); ++ value_info->set_initialized_storage(Handle::cast(object)); + } + } + } +@@ -3985,7 +3998,7 @@ void TranslatedState::VerifyMaterializedObjects() { + if (slot->kind() == TranslatedValue::kCapturedObject) { + CHECK_EQ(slot, GetValueByObjectIndex(slot->object_index())); + if (slot->materialization_state() == TranslatedValue::kFinished) { +- slot->GetStorage()->ObjectVerify(isolate()); ++ slot->storage()->ObjectVerify(isolate()); + } else { + CHECK_EQ(slot->materialization_state(), + TranslatedValue::kUninitialized); +diff --git a/src/deoptimizer/deoptimizer.h b/src/deoptimizer/deoptimizer.h +index 6d0a350aaceb59fa6486d41566ad22ee3fbe1bdd..3d8155616cfa4730c1a8665f3180856eaf7f9133 100644 +--- a/src/deoptimizer/deoptimizer.h ++++ b/src/deoptimizer/deoptimizer.h +@@ -39,13 +39,17 @@ enum class BuiltinContinuationMode; + + class TranslatedValue { + public: +- // Allocation-less getter of the value. ++ // Allocation-free getter of the value. + // Returns ReadOnlyRoots::arguments_marker() if allocation would be necessary +- // to get the value. ++ // to get the value. In the case of numbers, returns a Smi if possible. + Object GetRawValue() const; + +- // Getter for the value, takes care of materializing the subgraph +- // reachable from this value. ++ // Convenience wrapper around GetRawValue (checked). ++ int GetSmiValue() const; ++ ++ // Returns the value, possibly materializing it first (and the whole subgraph ++ // reachable from this value). In the case of numbers, returns a Smi if ++ // possible. + Handle GetValue(); + + bool IsMaterializedObject() const; +@@ -102,15 +106,14 @@ class TranslatedValue { + static TranslatedValue NewInvalid(TranslatedState* container); + + Isolate* isolate() const; +- void MaterializeSimple(); + + void set_storage(Handle storage) { storage_ = storage; } +- void set_initialized_storage(Handle storage); ++ void set_initialized_storage(Handle storage); + void mark_finished() { materialization_state_ = kFinished; } + void mark_allocated() { materialization_state_ = kAllocated; } + +- Handle GetStorage() { +- DCHECK_NE(kUninitialized, materialization_state()); ++ Handle storage() { ++ DCHECK_NE(materialization_state(), kUninitialized); + return storage_; + } + +@@ -120,9 +123,9 @@ class TranslatedValue { + // objects and constructing handles (to get + // to the isolate). + +- Handle storage_; // Contains the materialized value or the +- // byte-array that will be later morphed into +- // the materialized object. ++ Handle storage_; // Contains the materialized value or the ++ // byte-array that will be later morphed into ++ // the materialized object. + + struct MaterializedObjectInfo { + int id_; +@@ -376,7 +379,7 @@ class TranslatedState { + int* value_index, std::stack* worklist); + void EnsureCapturedObjectAllocatedAt(int object_index, + std::stack* worklist); +- Handle InitializeObjectAt(TranslatedValue* slot); ++ Handle InitializeObjectAt(TranslatedValue* slot); + void InitializeCapturedObjectAt(int object_index, std::stack* worklist, + const DisallowHeapAllocation& no_allocation); + void InitializeJSObjectAt(TranslatedFrame* frame, int* value_index, +@@ -392,6 +395,9 @@ class TranslatedState { + TranslatedValue* ResolveCapturedObject(TranslatedValue* slot); + TranslatedValue* GetValueByObjectIndex(int object_index); + Handle GetValueAndAdvance(TranslatedFrame* frame, int* value_index); ++ TranslatedValue* GetResolvedSlot(TranslatedFrame* frame, int value_index); ++ TranslatedValue* GetResolvedSlotAndAdvance(TranslatedFrame* frame, ++ int* value_index); + + static uint32_t GetUInt32Slot(Address fp, int slot_index); + static uint64_t GetUInt64Slot(Address fp, int slot_index); +@@ -772,7 +778,7 @@ class FrameDescription { + intptr_t continuation_; + + // This must be at the end of the object as the object is allocated larger +- // than it's definition indicate to extend this array. ++ // than its definition indicates to extend this array. + intptr_t frame_content_[1]; + + intptr_t* GetFrameSlotPointer(unsigned offset) { +diff --git a/test/mjsunit/compiler/regress-1084820.js b/test/mjsunit/compiler/regress-1084820.js +new file mode 100644 +index 0000000000000000000000000000000000000000..beb168b413ff045c5aff8e68d2e6da32b27800d6 +--- /dev/null ++++ b/test/mjsunit/compiler/regress-1084820.js +@@ -0,0 +1,27 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Flags: --allow-natives-syntax ++ ++// Create a map where 'my_property' has HeapObject representation. ++const dummy_obj = {}; ++dummy_obj.my_property = 'some HeapObject'; ++dummy_obj.my_property = 'some other HeapObject'; ++ ++function gaga() { ++ const obj = {}; ++ // Store a HeapNumber and then a Smi. ++ // This must happen in a loop, even if it's only 2 iterations: ++ for (let j = -3_000_000_000; j <= -1_000_000_000; j += 2_000_000_000) { ++ obj.my_property = j; ++ } ++ // Trigger (soft) deopt. ++ if (!%IsBeingInterpreted()) obj + obj; ++} ++ ++%PrepareFunctionForOptimization(gaga); ++gaga(); ++gaga(); ++%OptimizeFunctionOnNextCall(gaga); ++gaga(); +diff --git a/test/mjsunit/compiler/regress-1092650.js b/test/mjsunit/compiler/regress-1092650.js +new file mode 100644 +index 0000000000000000000000000000000000000000..ba94375aeb8262536e28d5d409d69115e385c3b3 +--- /dev/null ++++ b/test/mjsunit/compiler/regress-1092650.js +@@ -0,0 +1,23 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Flags: --allow-natives-syntax ++ ++// Create map with HeapNumber in field 'a' ++({a: 2**30}); ++ ++function foo() { ++ return foo.arguments[0]; ++} ++ ++function main() { ++ foo({a: 42}); ++} ++ ++%PrepareFunctionForOptimization(foo); ++%PrepareFunctionForOptimization(main); ++main(); ++main(); ++%OptimizeFunctionOnNextCall(main); ++main(); +diff --git a/test/mjsunit/compiler/regress-1094132.js b/test/mjsunit/compiler/regress-1094132.js +new file mode 100644 +index 0000000000000000000000000000000000000000..418637d86f8c363b9c0c41c450914e758ff73e9c +--- /dev/null ++++ b/test/mjsunit/compiler/regress-1094132.js +@@ -0,0 +1,78 @@ ++// Copyright 2020 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++// Flags: --allow-natives-syntax ++ ++function prettyPrinted() {} ++ ++function formatFailureText() { ++ if (expectedText.length <= 40 && foundText.length <= 40) { ++ message += ": expected <" + expectedText + "> found <" + foundText + ">"; ++ message += ":\nexpected:\n" + expectedText + "\nfound:\n" + foundText; ++ } ++} ++ ++function fail(expectedText, found, name_opt) { ++ formatFailureText(expectedText, found, name_opt); ++ if (!a[aProps[i]][aProps[i]]) { } ++} ++ ++function deepEquals(a, b) { ++ if (a === 0) return 1 / a === 1 / b; ++ if (typeof a !== typeof a) return false; ++ if (typeof a !== "object" && typeof a !== "function") return false; ++ if (objectClass !== classOf()) return false; ++ if (objectClass === "RegExp") { } ++} ++ ++function assertEquals() { ++ if (!deepEquals()) { ++ fail(prettyPrinted(), undefined, undefined); ++ } ++} ++ ++({y: {}, x: 0.42}); ++ ++function gaga() { ++ return {gx: bar.arguments[0], hx: baz.arguments[0]}; ++} ++ ++function baz() { ++ return gaga(); ++} ++ ++function bar(obj) { ++ return baz(obj.y); ++} ++ ++function foo() { ++ bar({y: {}, x: 42}); ++ try { assertEquals() } catch (e) {} ++ try { assertEquals() } catch (e) {} ++ assertEquals(); ++} ++ ++%PrepareFunctionForOptimization(prettyPrinted); ++%PrepareFunctionForOptimization(formatFailureText); ++%PrepareFunctionForOptimization(fail); ++%PrepareFunctionForOptimization(deepEquals); ++%PrepareFunctionForOptimization(assertEquals); ++%PrepareFunctionForOptimization(gaga); ++%PrepareFunctionForOptimization(baz); ++%PrepareFunctionForOptimization(bar); ++%PrepareFunctionForOptimization(foo); ++try { foo() } catch (e) {} ++%OptimizeFunctionOnNextCall(foo); ++try { foo() } catch (e) {} ++%PrepareFunctionForOptimization(prettyPrinted); ++%PrepareFunctionForOptimization(formatFailureText); ++%PrepareFunctionForOptimization(fail); ++%PrepareFunctionForOptimization(deepEquals); ++%PrepareFunctionForOptimization(assertEquals); ++%PrepareFunctionForOptimization(gaga); ++%PrepareFunctionForOptimization(baz); ++%PrepareFunctionForOptimization(bar); ++%PrepareFunctionForOptimization(foo); ++%OptimizeFunctionOnNextCall(foo); ++try { foo() } catch (e) {}