Revision: 21095
Author:   [email protected]
Date:     Wed Apr 30 15:03:18 2014 UTC
Log:      Public interface of KeyedLookupCache handlified.

[email protected]

Review URL: https://codereview.chromium.org/264563003
http://code.google.com/p/v8/source/detail?r=21095

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/runtime.cc

=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Apr 30 12:38:39 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Apr 30 15:03:18 2014 UTC
@@ -6219,19 +6219,21 @@
 }


-int KeyedLookupCache::Hash(Map* map, Name* name) {
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
 }


-int KeyedLookupCache::Lookup(Map* map, Name* name) {
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
   int index = (Hash(map, name) & kHashMask);
   for (int i = 0; i < kEntriesPerBucket; i++) {
     Key& key = keys_[index + i];
-    if ((key.map == map) && key.name->Equals(name)) {
+    if ((key.map == *map) && key.name->Equals(*name)) {
       return field_offsets_[index + i];
     }
   }
@@ -6239,18 +6241,21 @@
 }


-void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
+void KeyedLookupCache::Update(Handle<Map> map,
+                              Handle<Name> name,
+                              int field_offset) {
+  DisallowHeapAllocation no_gc;
   if (!name->IsUniqueName()) {
     String* internalized_string;
     if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
-            String::cast(name), &internalized_string)) {
+            String::cast(*name), &internalized_string)) {
       return;
     }
-    name = internalized_string;
+    name = handle(internalized_string);
   }
// This cache is cleared only between mark compact passes, so we expect the
   // cache to only contain old space names.
-  ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
+  ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));

   int index = (Hash(map, name) & kHashMask);
   // After a GC there will be free slots, so we use them in order (this may
@@ -6259,8 +6264,8 @@
     Key& key = keys_[index];
     Object* free_entry_indicator = NULL;
     if (key.map == free_entry_indicator) {
-      key.map = map;
-      key.name = name;
+      key.map = *map;
+      key.name = *name;
       field_offsets_[index + i] = field_offset;
       return;
     }
@@ -6276,8 +6281,8 @@

   // Write the new first entry.
   Key& key = keys_[index];
-  key.map = map;
-  key.name = name;
+  key.map = *map;
+  key.name = *name;
   field_offsets_[index] = field_offset;
 }

=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Apr 30 12:25:18 2014 UTC
+++ /branches/bleeding_edge/src/heap.h  Wed Apr 30 15:03:18 2014 UTC
@@ -2413,10 +2413,10 @@
 class KeyedLookupCache {
  public:
   // Lookup field offset for (map, name). If absent, -1 is returned.
-  int Lookup(Map* map, Name* name);
+  int Lookup(Handle<Map> map, Handle<Name> name);

   // Update an element in the cache.
-  void Update(Map* map, Name* name, int field_offset);
+  void Update(Handle<Map> map, Handle<Name> name, int field_offset);

   // Clear the cache.
   void Clear();
@@ -2441,7 +2441,7 @@
     }
   }

-  static inline int Hash(Map* map, Name* name);
+  static inline int Hash(Handle<Map> map, Handle<Name> name);

   // Get the address of the keys and field_offsets arrays.  Used in
   // generated code to perform cache lookups.
=======================================
--- /branches/bleeding_edge/src/runtime.cc      Wed Apr 30 14:51:41 2014 UTC
+++ /branches/bleeding_edge/src/runtime.cc      Wed Apr 30 15:03:18 2014 UTC
@@ -5099,9 +5099,9 @@
       Handle<Name> key = Handle<Name>::cast(key_obj);
       if (receiver->HasFastProperties()) {
         // Attempt to use lookup cache.
-        Map* receiver_map = receiver->map();
+        Handle<Map> receiver_map(receiver->map(), isolate);
KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
-        int offset = keyed_lookup_cache->Lookup(receiver_map, *key);
+        int offset = keyed_lookup_cache->Lookup(receiver_map, key);
         if (offset != -1) {
           // Doubles are not cached, so raw read the value.
           Object* value = receiver->RawFastPropertyAt(offset);
@@ -5118,7 +5118,7 @@
           // Do not track double fields in the keyed lookup cache. Reading
           // double values requires boxing.
           if (!result.representation().IsDouble()) {
-            keyed_lookup_cache->Update(receiver_map, *key, offset);
+            keyed_lookup_cache->Update(receiver_map, key, offset);
           }
           AllowHeapAllocation allow_allocation;
           return *JSObject::FastPropertyAt(

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to