Revision: 10541
Author:   [email protected]
Date:     Fri Jan 27 08:54:22 2012
Log:      Some assembler-level optimizations on ARM.

Review URL: https://chromiumcodereview.appspot.com/9223011
http://code.google.com/p/v8/source/detail?r=10541

Modified:
 /branches/bleeding_edge/src/arm/code-stubs-arm.cc
 /branches/bleeding_edge/src/arm/codegen-arm.cc
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
 /branches/bleeding_edge/src/arm/macro-assembler-arm.cc
 /branches/bleeding_edge/src/arm/macro-assembler-arm.h

=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Fri Jan 27 05:20:01 2012 +++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Fri Jan 27 08:54:22 2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -122,7 +122,6 @@
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
   __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));

-
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
   __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -157,20 +156,18 @@
   __ ldr(r3, MemOperand(sp, 0));

   // Set up the object header.
-  __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
-  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
   __ mov(r2, Operand(Smi::FromInt(length)));
   __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
-  // Set up the fixed slots.
+  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Set up the fixed slots, copy the global object from the previous context.
+  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(r1, Operand(Smi::FromInt(0)));
   __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
   __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
-  // Copy the global object from the previous context.
-  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));

   // Initialize the rest of the slots to undefined.
   __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -229,14 +226,12 @@
   __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
   __ bind(&after_sentinel);

-  // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
   __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
   __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
-
-  // Copy the global object from the previous context.
-  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+  __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));

   // Initialize the rest of the slots to the hole value.
   __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -326,8 +321,7 @@
     Label double_elements, check_fast_elements;
     __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
     __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
-    __ cmp(r0, ip);
+    __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
     __ b(ne, &check_fast_elements);
     GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
@@ -336,8 +330,7 @@
     __ Ret();

     __ bind(&check_fast_elements);
-    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-    __ cmp(r0, ip);
+    __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
     __ b(ne, &double_elements);
     GenerateFastCloneShallowArrayCommon(masm, length_,
                                         CLONE_ELEMENTS, &slow_case);
@@ -590,7 +583,9 @@

   Label is_smi, done;

-  __ JumpIfSmi(object, &is_smi);
+  // Smi-check
+  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+  // Heap number check
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);

   // Handle loading a double from a heap number.
@@ -612,7 +607,6 @@
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Convert smi to double using VFP instructions.
-    __ SmiUntag(scratch1, object);
     __ vmov(dst.high(), scratch1);
     __ vcvt_f64_s32(dst, dst.high());
     if (destination == kCoreRegisters) {
@@ -647,11 +641,10 @@
                            Heap::kHeapNumberMapRootIndex,
                            "HeapNumberMap register clobbered.");
   }
-  Label is_smi;
   Label done;
   Label not_in_int32_range;

-  __ JumpIfSmi(object, &is_smi);
+  __ UntagAndJumpIfSmi(dst, object, &done);
   __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
   __ cmp(scratch1, heap_number_map);
   __ b(ne, not_number);
@@ -671,10 +664,6 @@
                                  scratch1,
                                  scratch2,
                                  scratch3);
-  __ jmp(&done);
-
-  __ bind(&is_smi);
-  __ SmiUntag(dst, object);
   __ bind(&done);
 }

@@ -847,10 +836,7 @@

   Label done;

-  // Untag the object into the destination register.
-  __ SmiUntag(dst, object);
-  // Just return if the object is a smi.
-  __ JumpIfSmi(object, &done);
+  __ UntagAndJumpIfSmi(dst, object, &done);

   if (FLAG_debug_code) {
     __ AbortIfNotRootValue(heap_number_map,
@@ -3310,8 +3296,7 @@
     // Check if cache matches: Double value is stored in uint32_t[2] array.
     __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
     __ cmp(r2, r4);
-    __ b(ne, &calculate);
-    __ cmp(r3, r5);
+    __ cmp(r3, r5, eq);
     __ b(ne, &calculate);
     // Cache hit. Load result, cleanup and return.
     Counters* counters = masm->isolate()->counters();
@@ -3468,7 +3453,7 @@
   const Register scratch = r9;
   const Register scratch2 = r7;

-  Label call_runtime, done, exponent_not_smi, int_exponent;
+  Label call_runtime, done, int_exponent;
   if (exponent_type_ == ON_STACK) {
     Label base_is_smi, unpack_exponent;
     // The exponent and base are supplied as arguments on the stack.
@@ -3479,7 +3464,7 @@

     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);

-    __ JumpIfSmi(base, &base_is_smi);
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
@@ -3488,16 +3473,12 @@
     __ jmp(&unpack_exponent);

     __ bind(&base_is_smi);
-    __ SmiUntag(base);
-    __ vmov(single_scratch, base);
+    __ vmov(single_scratch, scratch);
     __ vcvt_f64_s32(double_base, single_scratch);
     __ bind(&unpack_exponent);

-    __ JumpIfNotSmi(exponent, &exponent_not_smi);
-    __ SmiUntag(exponent);
-    __ jmp(&int_exponent);
-
-    __ bind(&exponent_not_smi);
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
@@ -3505,11 +3486,8 @@
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
   } else if (exponent_type_ == TAGGED) {
     // Base is already in double_base.
-    __ JumpIfNotSmi(exponent, &exponent_not_smi);
-    __ SmiUntag(exponent);
-    __ jmp(&int_exponent);
-
-    __ bind(&exponent_not_smi);
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
     __ vldr(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
@@ -3582,13 +3560,19 @@

     __ bind(&int_exponent_convert);
     __ vcvt_u32_f64(single_scratch, double_exponent);
-    __ vmov(exponent, single_scratch);
+    __ vmov(scratch, single_scratch);
   }

   // Calculate power with integer exponent.
   __ bind(&int_exponent);

-  __ mov(scratch, exponent);  // Back up exponent.
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type_ == INTEGER) {
+    __ mov(scratch, exponent);
+  } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+    __ mov(exponent, scratch);
+  }
   __ vmov(double_scratch, double_base);  // Back up base.
   __ vmov(double_result, 1.0);

@@ -4098,11 +4082,9 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
-    __ cmp(function, ip);
+    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ b(ne, &miss);
-    __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
-    __ cmp(map, ip);
+    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
     __ b(ne, &miss);
     __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
     __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4727,8 +4709,7 @@
   __ ldr(last_match_info_elements,
          FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r0, ip);
+  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
   __ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
   // additional information.
@@ -5082,11 +5063,11 @@

   // Set input, index and length fields from arguments.
   __ ldr(r1, MemOperand(sp, kPointerSize * 0));
+  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
+  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
   __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
-  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
-  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
-  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
-  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+  __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+  __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));

   // Fill out the elements FixedArray.
   // r0: JSArray, tagged.
@@ -5436,8 +5417,7 @@
   STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(result_, Operand(ip));
+  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
   __ b(eq, &slow_case_);
   __ bind(&exit_);
 }
@@ -5865,10 +5845,11 @@
   __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
   // If either to or from had the smi tag bit set, then carry is set now.
   __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
-  __ b(mi, &runtime);  // From is negative.
-
+ // We want to bailout to runtime here if From is negative. In that case, the
+  // next instruction is not executed and we fall through to bailing out to
+  // runtime.  pl is the opposite of mi.
   // Both r2 and r3 are untagged integers.
-  __ sub(r2, r2, Operand(r3), SetCC);
+  __ sub(r2, r2, Operand(r3), SetCC, pl);
   __ b(mi, &runtime);  // Fail if from > to.

   // Make sure first argument is a string.
@@ -5941,9 +5922,9 @@

   __ bind(&sliced_string);
   // Sliced string.  Fetch parent and correct start index by offset.
-  __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-  __ add(r3, r3, Operand(r5, ASR, 1));
+  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
   __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
   // Update instance type.
   __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.cc      Thu Nov 24 03:07:39 2011
+++ /branches/bleeding_edge/src/arm/codegen-arm.cc      Fri Jan 27 08:54:22 2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -104,10 +104,10 @@
   __ add(lr, lr, Operand(r5, LSL, 2));
   __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
   // r6: destination FixedDoubleArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-  // Set destination FixedDoubleArray's length.
   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
   // Update receiver's map.

   __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
@@ -155,10 +155,9 @@
   __ bind(&loop);
   __ ldr(r9, MemOperand(r3, 4, PostIndex));
   // r9: current element
-  __ JumpIfNotSmi(r9, &convert_hole);
+  __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);

   // Normal smi, convert to double and store.
-  __ SmiUntag(r9);
   if (vfp3_supported) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, r9);
@@ -181,6 +180,9 @@
   // Hole found, store the-hole NaN.
   __ bind(&convert_hole);
   if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ SmiTag(r9);
+    __ orr(r9, r9, Operand(1));
     __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
     __ Assert(eq, "object found in smi-only array");
   }
@@ -208,9 +210,8 @@
   Label entry, loop, convert_hole, gc_required;

   __ push(lr);
-  __ Push(r3, r2, r1, r0);
-
   __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ Push(r3, r2, r1, r0);
   __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
   // r4: source FixedDoubleArray
   // r5: number of elements (smi-tagged)
@@ -220,10 +221,10 @@
   __ add(r0, r0, Operand(r5, LSL, 1));
   __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
   // r6: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-  // Set destination FixedDoubleArray's length.
   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));

   // Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
@@ -325,8 +326,8 @@
   // Handle slices.
   Label indirect_string_loaded;
   __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ add(index, index, Operand(result, ASR, kSmiTagSize));
   __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
   __ jmp(&indirect_string_loaded);

   // Handle cons strings.
@@ -336,8 +337,7 @@
   // the string.
   __ bind(&cons_string);
   __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(result, ip);
+  __ CompareRoot(result, Heap::kEmptyStringRootIndex);
   __ b(ne, call_runtime);
   // Get the first of the two strings and load its instance type.
   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Fri Jan 27 06:55:20 2012 +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Fri Jan 27 08:54:22 2012
@@ -3929,7 +3929,7 @@
   Label load_smi, heap_number, done;

   // Smi check.
-  __ JumpIfSmi(input_reg, &load_smi);
+  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);

   // Heap number map check.
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3968,7 +3968,7 @@

   // Smi to double register conversion
   __ bind(&load_smi);
- __ SmiUntag(scratch, input_reg); // Untag smi before converting to float.
+  // scratch: untagged value of input_reg
   __ vmov(flt_scratch, scratch);
   __ vcvt_f64_s32(result_reg, flt_scratch);
   __ bind(&done);
@@ -4256,7 +4256,7 @@
   Label is_smi, done, heap_number;

   // Both smi and heap number cases are handled.
-  __ JumpIfSmi(input_reg, &is_smi);
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);

   // Check for heap number
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4279,7 +4279,6 @@

   // smi
   __ bind(&is_smi);
-  __ SmiUntag(result_reg, input_reg);
   __ ClampUint8(result_reg, result_reg);

   __ bind(&done);
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Thu Jan 26 13:47:57 2012 +++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Fri Jan 27 08:54:22 2012
@@ -2963,6 +2963,22 @@
   tst(reg2, Operand(kSmiTagMask), eq);
   b(ne, on_not_both_smi);
 }
+
+
+void MacroAssembler::UntagAndJumpIfSmi(
+    Register dst, Register src, Label* smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+  b(cc, smi_case);  // Shifter carry is not set for a smi.
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(
+    Register dst, Register src, Label* non_smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
+}


 void MacroAssembler::JumpIfEitherSmi(Register reg1,
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Thu Jan 26 13:47:57 2012 +++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Fri Jan 27 08:54:22 2012
@@ -1148,6 +1148,14 @@
   void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
     mov(dst, Operand(src, ASR, kSmiTagSize), s);
   }
+
+  // Untag the source value into destination and jump if source is a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+  // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);

   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label) {

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to