Revision: 6471
Author: [email protected]
Date: Tue Jan 25 06:52:35 2011
Log: ARM: Initial type recording binary operation stub
This implements the type recording binary operation stub for ARM. This
first iteration only supports ADD. Handling of 32-bit integers is currently
not implemented but just transitions. The generic case for now delegates to
the generic binary operation stub.
Review URL: http://codereview.chromium.org/6342019
http://code.google.com/p/v8/source/detail?r=6471
Modified:
/branches/bleeding_edge/src/arm/code-stubs-arm.cc
/branches/bleeding_edge/src/arm/code-stubs-arm.h
/branches/bleeding_edge/src/arm/full-codegen-arm.cc
/branches/bleeding_edge/src/arm/ic-arm.cc
/branches/bleeding_edge/src/arm/lithium-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.h
/branches/bleeding_edge/src/ic.cc
=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Tue Jan 25 05:01:45
2011
+++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Tue Jan 25 06:52:35
2011
@@ -342,6 +342,155 @@
Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
__ Ret();
}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum Destination {
+ kVFPRegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values
ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the
destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from r0 and r1 (right and left in binary operations)
into
+ // floating point registers. Depending on the destination the values
ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the
destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object)
the
+ // not_number label is jumped to.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination
destination,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, scratch1);
+ __ vcvt_f64_s32(d7, s15);
+ __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, scratch1);
+ __ vcvt_f64_s32(d6, s13);
+ if (destination == kCoreRegisters) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(scratch1, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
+ __ mov(scratch1, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (r0) to d6 or r2/r3.
+ LoadNumber(masm, destination,
+ r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (r1) to d7 or r0/r1.
+ LoadNumber(masm, destination,
+ r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ Label is_smi, done;
+
+ __ BranchOnSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber to double register.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(dst, scratch1, HeapNumber::kValueOffset);
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ }
+ __ jmp(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi to double.
+ __ SmiUntag(scratch1, object);
+ __ vmov(dst.high(), scratch1);
+ __ vcvt_f64_s32(dst, dst.high());
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, dst);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi to dst1 and dst2 double format.
+ __ mov(scratch1, Operand(object));
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ __ bind(&done);
+}
// See comment for class.
@@ -1375,7 +1524,7 @@
__ sub(r0, r5, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
+ __ Ret();
} else {
// If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa).
@@ -2207,8 +2356,392 @@
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
+ TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler*
masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
UNIMPLEMENTED();
- return Handle<Code>::null();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case TRBinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRBinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRBinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case TRBinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRBinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case TRBinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingBinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRBinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+// Generate the smi code. If the operation on smis are successful this
return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but
a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Label not_smis;
+
+ ASSERT(op_ == Token::ADD);
+
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(scratch1, Operand(kSmiTagMask));
+ __ b(ne, ¬_smis);
+
+ __ add(right, right, Operand(left), SetCC); // Add optimistically.
+
+ // Return smi result if no overflow (r0 is the result).
+ ASSERT(right.is(r0));
+ __ Ret(vc);
+
+ // Result is not a smi. Revert the optimistic add.
+ __ sub(right, right, Operand(left));
+
+ // If heap number results are possible generate the result in an
allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && Token::MOD != op_ ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Allocate new heap number for result.
+ Register heap_number = r5;
+ __ AllocateHeapNumber(
+ heap_number, scratch1, scratch2, heap_number_map, gc_required);
+
+ // Load the smis.
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Using VFP registers:
+ // d6: Left value
+ // d7: Right value
+ CpuFeatures::Scope scope(VFP3);
+ __ vadd(d5, d6, d7);
+
+ __ sub(r0, heap_number, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+
+ __ push(lr); // For later.
+ __ PrepareCallCFunction(4, scratch1); // Two doubles are 4
arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is
callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so
we
+ // need to substract the tag from r5.
+ __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+#else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(heap_number,
HeapNumber::kValueOffset));
+#endif
+ __ mov(r0, Operand(heap_number));
+ // And we are done.
+ __ pop(pc);
+ }
+ }
+ __ bind(¬_smis);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smis, call_runtime;
+
+ ASSERT(op_ == Token::ADD);
+
+ if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+ result_type_ == TRBinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap
number
+ // cannot be allocated.
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or
heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+
+ ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler*
masm) {
+ ASSERT(op_ == Token::ADD);
+
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ Label not_number, call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
depending on
+ // whether VFP3 is available.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ ¬_number);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Use floating point instructions for the binary operation.
+ CpuFeatures::Scope scope(VFP3);
+ __ vadd(d5, d6, d7);
+
+ // Get a heap number object for the result - might be left or right if
one
+ // of these are overwritable.
+ GenerateHeapResultAllocation(
+ masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+ // Fill the result into the allocated heap number and return.
+ __ sub(r0, r4, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+
+ } else {
+ // Call a C function for the binary operation.
+ // r0/r1: Left operand
+ // r2/r3: Right operand
+
+ // Get a heap number object for the result - might be left or right if
one
+ // of these are overwritable. Uses a callee-save register to keep the
value
+ // across the c call.
+ GenerateHeapResultAllocation(
+ masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+ __ push(lr); // For returning later (no GC after this point).
+ __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4
arguments.
+ // Call C routine that may not cause GC or other trouble. r4 is callee
+ // saved.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+
+ // Fill the result into the allocated heap number.
+ #if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, r4, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+ #else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ #endif
+ __ mov(r0, Operand(r4));
+ __ pop(pc); // Return to the pushed lr.
+ }
+
+ __ bind(¬_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+
+ Label call_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+
+ // Try to add strings before calling runtime.
+ GenerateAddStrings(masm);
+
+ GenericBinaryOpStub stub(op_, mode_, r1, r0);
+ __ TailCallStub(&stub);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ Register left = r1;
+ Register right = r0;
+ Label call_runtime;
+
+ // Check if first argument is a string.
+ __ BranchOnSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First argument is a a string, test second.
+ __ BranchOnSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
+ // Code below will scratch result if allocation fails. To keep both
arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(r0) && !result.is(r1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ BranchOnNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ b(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, Operand(overwritable_operand));
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler*
masm) {
+ __ Push(r1, r0);
}
=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.h Tue Jan 4 03:02:58
2011
+++ /branches/bleeding_edge/src/arm/code-stubs-arm.h Tue Jan 25 06:52:35
2011
@@ -218,6 +218,115 @@
};
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+ TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(TRBinaryOpIC::UNINITIALIZED),
+ result_type_(TRBinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ TypeRecordingBinaryOpStub(
+ int key,
+ TRBinaryOpIC::TypeInfo operands_type,
+ TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_vfp3_(VFP3Bits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_vfp3_;
+
+ // Operand type information determined at runtime.
+ TRBinaryOpIC::TypeInfo operands_type_;
+ TRBinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRBinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class VFP3Bits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10,
3> {};
+ class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3>
{};
+
+ Major MajorKey() { return TypeRecordingBinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | VFP3Bits::encode(use_vfp3_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults
heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRBinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_binary_op_type(operands_type_);
+ code->set_type_recording_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Tue Jan 25 05:01:45
2011
+++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Tue Jan 25 06:52:35
2011
@@ -1548,8 +1548,13 @@
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- GenericBinaryOpStub stub(op, mode, r1, r0);
- __ CallStub(&stub);
+ if (op == Token::ADD) {
+ TypeRecordingBinaryOpStub stub(op, mode);
+ __ CallStub(&stub);
+ } else {
+ GenericBinaryOpStub stub(op, mode, r1, r0);
+ __ CallStub(&stub);
+ }
context()->Plug(r0);
}
=======================================
--- /branches/bleeding_edge/src/arm/ic-arm.cc Fri Jan 21 15:58:00 2011
+++ /branches/bleeding_edge/src/arm/ic-arm.cc Tue Jan 25 06:52:35 2011
@@ -1704,7 +1704,7 @@
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ // Currently there is no smi inlining in the ARM full code generator.
}
=======================================
--- /branches/bleeding_edge/src/arm/lithium-arm.cc Tue Jan 25 04:17:02 2011
+++ /branches/bleeding_edge/src/arm/lithium-arm.cc Tue Jan 25 06:52:35 2011
@@ -803,6 +803,7 @@
LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock*
next_block) {
ASSERT(is_building());
@@ -1114,7 +1115,7 @@
case kMathAbs:
return
AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Mon Jan 24
23:49:39 2011
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Tue Jan 25
06:52:35 2011
@@ -1939,7 +1939,7 @@
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi);
@@ -1949,7 +1949,7 @@
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi);
@@ -1957,17 +1957,28 @@
void MacroAssembler::AbortIfSmi(Register object) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi");
}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ cmp(scratch, heap_number_map);
+ b(ne, on_not_heap_number);
+}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
@@ -1996,7 +2007,7 @@
Register scratch2,
Label* failure) {
// Check that neither is a smi.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask));
b(eq, failure);
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Thu Jan 20
06:20:54 2011
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Tue Jan 25
06:52:35 2011
@@ -719,6 +719,9 @@
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
+ void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
+ add(dst, src, Operand(src), s);
+ }
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
@@ -733,6 +736,9 @@
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
+ void SmiUntag(Register dst, Register src) {
+ mov(dst, Operand(src, ASR, kSmiTagSize));
+ }
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label*
on_not_both_smi);
@@ -743,6 +749,14 @@
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
+ //
---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
//
---------------------------------------------------------------------------
// String utilities
=======================================
--- /branches/bleeding_edge/src/ic.cc Fri Jan 21 15:58:00 2011
+++ /branches/bleeding_edge/src/ic.cc Tue Jan 25 06:52:35 2011
@@ -2098,8 +2098,6 @@
Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
if (!code.is_null()) {
- TRBinaryOpIC ic;
- ic.patch(*code);
if (FLAG_trace_ic) {
PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
TRBinaryOpIC::GetName(previous_type),
@@ -2107,6 +2105,8 @@
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
+ TRBinaryOpIC ic;
+ ic.patch(*code);
// Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev