Revision: 5518
Author: [email protected]
Date: Fri Sep 24 01:18:33 2010
Log: Prevent modification of cached normalized maps.
Finally sovles the problem that r5342 attempted to solve.
When adding a stub to a map's code cache we need to make
sure that this map is not used by object that do not need
this stub.
Existing solution had 2 flaws:
1. It checked that the map is cached by asking the current context.
If the object escaped into another context then NormalizedMapCache::Contains
returns false negative.
2. If a map gets evicted from the cache we should not try to modify it
even though Contains returns false.
This patch implements much less fragile solution of the same problem:
A map now has a flag (is_shared) that is set once the map is added
to a cache, stays set even after the cache eviction, and is cleared
if the object goes back to fast mode.
Added a regression test.
Review URL: http://codereview.chromium.org/3472006
http://code.google.com/p/v8/source/detail?r=5518
Modified:
/branches/bleeding_edge/src/objects-debug.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/test/cctest/test-api.cc
=======================================
--- /branches/bleeding_edge/src/objects-debug.cc Wed Aug 25 06:25:54 2010
+++ /branches/bleeding_edge/src/objects-debug.cc Fri Sep 24 01:18:33 2010
@@ -649,8 +649,9 @@
}
-void Map::NormalizedMapVerify() {
+void Map::SharedMapVerify() {
MapVerify();
+ ASSERT(is_shared());
ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
ASSERT_EQ(0, pre_allocated_property_fields());
@@ -1381,7 +1382,7 @@
for (int i = 0; i < length(); i++) {
Object* e = get(i);
if (e->IsMap()) {
- Map::cast(e)->NormalizedMapVerify();
+ Map::cast(e)->SharedMapVerify();
} else {
ASSERT(e->IsUndefined());
}
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Thu Sep 23 05:23:35 2010
+++ /branches/bleeding_edge/src/objects-inl.h Fri Sep 24 01:18:33 2010
@@ -2290,6 +2290,19 @@
bool Map::attached_to_shared_function_info() {
return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
}
+
+
+void Map::set_is_shared(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kIsShared));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kIsShared));
+ }
+}
+
+bool Map::is_shared() {
+ return ((1 << kIsShared) & bit_field2()) != 0;
+}
JSFunction* Map::unchecked_constructor() {
=======================================
--- /branches/bleeding_edge/src/objects.cc Thu Sep 23 04:25:01 2010
+++ /branches/bleeding_edge/src/objects.cc Fri Sep 24 01:18:33 2010
@@ -2097,61 +2097,34 @@
LocalLookup(name, &result);
return GetPropertyAttribute(this, &result, name, false);
}
-
-
-bool NormalizedMapCache::IsCacheable(JSObject* object) {
- // Caching for global objects is not worth it (there are too few of
them).
- return !object->IsGlobalObject();
-}
Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode
mode) {
- Object* result;
-
Map* fast = obj->map();
- if (!IsCacheable(obj)) {
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- } else {
- int index = Hash(fast) % kEntries;
- result = get(index);
-
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+ int index = Hash(fast) % kEntries;
+ Object* result = get(index);
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Make sure that the new slow map has exactly the same hash as the
- // original fast map. This way we can use hash to check if a slow
map
- // is already in the hash (see Contains method).
- ASSERT(Hash(fast) == Hash(Map::cast(result)));
- // The cached map should match newly created normalized map
bit-by-bit.
- Object* fresh = fast->CopyNormalized(mode);
- if (!fresh->IsFailure()) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kSize) == 0);
- }
- }
-#endif
- return result;
- }
-
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- set(index, result);
- }
+ if (FLAG_enable_slow_asserts) {
+ // The cached map should match newly created normalized map
bit-by-bit.
+ Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (!fresh->IsFailure()) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kSize) == 0);
+ }
+ }
+#endif
+ return result;
+ }
+
+ result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (result->IsFailure()) return result;
+ set(index, result);
Counters::normalized_maps.Increment();
return result;
}
-
-
-bool NormalizedMapCache::Contains(Map* map) {
- // If the map is present in the cache it can only be at one place:
- // at the index calculated from the hash. We assume that a slow map has
the
- // same hash as a fast map it has been generated from.
- int index = Hash(map) % kEntries;
- return get(index) == map;
-}
void NormalizedMapCache::Clear() {
@@ -2184,7 +2157,7 @@
Map* fast,
PropertyNormalizationMode mode) {
#ifdef DEBUG
- slow->NormalizedMapVerify();
+ slow->SharedMapVerify();
#endif
return
slow->constructor() == fast->constructor() &&
@@ -2194,17 +2167,17 @@
fast->inobject_properties()) &&
slow->instance_type() == fast->instance_type() &&
slow->bit_field() == fast->bit_field() &&
- slow->bit_field2() == fast->bit_field2();
+ (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
}
Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (!HasFastProperties() &&
- NormalizedMapCache::IsCacheable(this) &&
- Top::context()->global_context()->normalized_map_cache()->
- Contains(map())) {
- // Replace the map with the identical copy that can be safely modified.
- Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+ if (map()->is_shared()) {
+ // Fast case maps are never marked as shared.
+ ASSERT(!HasFastProperties());
+ // Replace the map with an identical copy that can be safely modified.
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
+ UNIQUE_NORMALIZED_MAP);
if (obj->IsFailure()) return obj;
Counters::normalized_maps.Increment();
@@ -3189,12 +3162,14 @@
}
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(false);
Map::cast(result)->ClearCodeCache();
return result;
}
-Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+Object* Map::CopyNormalized(PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
new_instance_size -= inobject_properties() * kPointerSize;
@@ -3213,8 +3188,12 @@
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+
#ifdef DEBUG
- Map::cast(result)->NormalizedMapVerify();
+ if (Map::cast(result)->is_shared()) {
+ Map::cast(result)->SharedMapVerify();
+ }
#endif
return result;
=======================================
--- /branches/bleeding_edge/src/objects.h Thu Sep 23 04:25:01 2010
+++ /branches/bleeding_edge/src/objects.h Fri Sep 24 01:18:33 2010
@@ -200,6 +200,14 @@
};
+// NormalizedMapSharingMode is used to specify whether a map may be shared
+// by different objects with normalized properties.
+enum NormalizedMapSharingMode {
+ UNIQUE_NORMALIZED_MAP,
+ SHARED_NORMALIZED_MAP
+};
+
+
// Instance size sentinel for objects of variable size.
static const int kVariableSizeSentinel = 0;
@@ -2509,12 +2517,8 @@
public:
static const int kEntries = 64;
- static bool IsCacheable(JSObject* object);
-
Object* Get(JSObject* object, PropertyNormalizationMode mode);
- bool Contains(Map* map);
-
void Clear();
// Casting
@@ -3176,6 +3180,13 @@
inline bool attached_to_shared_function_info();
+ // Tells whether the map is shared between objects that may have
different
+ // behavior. If true, the map should never be modified, instead a clone
+ // should be created and modified.
+ inline void set_is_shared(bool value);
+
+ inline bool is_shared();
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -3197,7 +3208,8 @@
MUST_USE_RESULT Object* CopyDropDescriptors();
- MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode);
+ MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing);
// Returns a copy of the map, with all transitions dropped from the
// instance descriptors.
@@ -3261,7 +3273,7 @@
#ifdef DEBUG
void MapPrint();
void MapVerify();
- void NormalizedMapVerify();
+ void SharedMapVerify();
#endif
inline int visitor_id();
@@ -3325,6 +3337,7 @@
static const int kHasFastElements = 2;
static const int kStringWrapperSafeForDefaultValueOf = 3;
static const int kAttachedToSharedFunctionInfo = 4;
+ static const int kIsShared = 5;
// Layout of the default cache. It holds alternating name and code
objects.
static const int kCodeCacheEntrySize = 2;
=======================================
--- /branches/bleeding_edge/test/cctest/test-api.cc Thu Sep 23 04:25:01 2010
+++ /branches/bleeding_edge/test/cctest/test-api.cc Fri Sep 24 01:18:33 2010
@@ -2935,6 +2935,49 @@
"get_x(interceptor_obj)");
CHECK_EQ(result, v8_str("x"));
}
+
+
+THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
+ v8::HandleScope scope;
+
+ v8::Persistent<Context> context1 = Context::New();
+
+ context1->Enter();
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetNamedPropertyHandler(XPropertyGetter);
+ // Create an object with a named interceptor.
+ v8::Local<v8::Object> object = templ->NewInstance();
+ context1->Global()->Set(v8_str("interceptor_obj"), object);
+
+ // Force the object into the slow case.
+ CompileRun("interceptor_obj.y = 0;"
+ "delete interceptor_obj.y;");
+ context1->Exit();
+
+ {
+ // Introduce the object into a different context.
+ // Repeat named loads to exercise ICs.
+ LocalContext context2;
+ context2->Global()->Set(v8_str("interceptor_obj"), object);
+ Local<Value> result =
+ CompileRun("function get_x(o) { return o.x; }"
+ "interceptor_obj.x = 42;"
+ "for (var i=0; i != 10; i++) {"
+ " get_x(interceptor_obj);"
+ "}"
+ "get_x(interceptor_obj)");
+ // Check that the interceptor was actually invoked.
+ CHECK_EQ(result, v8_str("x"));
+ }
+
+ // Return to the original context and force some object to the slow case
+ // to cause the NormalizedMapCache to verify.
+ context1->Enter();
+ CompileRun("var obj = { x : 0 }; delete obj.x;");
+ context1->Exit();
+
+ context1.Dispose();
+}
static v8::Handle<Value> SetXOnPrototypeGetter(Local<String> property,
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev