Revision: 18953
Author: [email protected]
Date: Thu Jan 30 16:31:19 2014 UTC
Log: A64: Implement patchable inline code for InstanceOfKnownGlobal.
In contrast to the ARM version, the address to the location to patch is
passed
directly in a register rather than in a safepoint slot.
[email protected]
Review URL: https://codereview.chromium.org/136203005
http://code.google.com/p/v8/source/detail?r=18953
Modified:
/branches/experimental/a64/src/a64/assembler-a64.h
/branches/experimental/a64/src/a64/code-stubs-a64.cc
/branches/experimental/a64/src/a64/lithium-codegen-a64.cc
/branches/experimental/a64/src/a64/lithium-codegen-a64.h
/branches/experimental/a64/src/a64/macro-assembler-a64.cc
/branches/experimental/a64/src/a64/macro-assembler-a64.h
=======================================
--- /branches/experimental/a64/src/a64/assembler-a64.h Mon Jan 27 14:32:55
2014 UTC
+++ /branches/experimental/a64/src/a64/assembler-a64.h Thu Jan 30 16:31:19
2014 UTC
@@ -1694,6 +1694,10 @@
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Available for constrained code generation scopes. Prefer
+ // MacroAssembler::Mov() when possible.
+ inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
+
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg)
const;
@@ -1750,8 +1754,6 @@
AddSubOp op);
static bool IsImmAddSub(int64_t immediate);
- inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
-
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
=======================================
--- /branches/experimental/a64/src/a64/code-stubs-a64.cc Thu Jan 30
12:09:30 2014 UTC
+++ /branches/experimental/a64/src/a64/code-stubs-a64.cc Thu Jan 30
16:31:19 2014 UTC
@@ -3002,25 +3002,39 @@
// Returns result in x0. Zero indicates instanceof, smi 1 indicates not
// instanceof.
- // Instanceof supports the kArgsInRegisters flag but not the others, ie.
- // No call site inlining.
- // No return of true/false objects.
- ASSERT((flags_ == kNoFlags) || (flags_ == kArgsInRegisters));
-
Register result = x0;
Register function = right();
Register object = left();
+ Register scratch1 = x6;
+ Register scratch2 = x7;
+ Register res_true = x8;
+ Register res_false = x9;
+ // Only used if there was an inline map check site. (See
+ // LCodeGen::DoInstanceOfKnownGlobal().)
+ Register map_check_site = x4;
+ // Delta for the instructions generated between the inline map check and
the
+ // instruction setting the result.
+ const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
+
Label not_js_object, slow;
if (!HasArgsInRegisters()) {
__ Pop(function, object);
}
+
+ if (ReturnTrueFalseObject()) {
+ __ LoadTrueFalseRoots(res_true, res_false);
+ } else {
+ // This is counter-intuitive, but correct.
+ __ Mov(res_true, Operand(Smi::FromInt(0)));
+ __ Mov(res_false, Operand(Smi::FromInt(1)));
+ }
// Check that the left hand side is a JS object and load its map as a
side
// effect.
Register map = x12;
__ JumpIfSmi(object, ¬_js_object);
- __ IsObjectJSObjectType(object, map, x7, ¬_js_object);
+ __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object);
// If there is a call site cache, don't look in the global cache, but do
the
// real lookup and update the call site cache.
@@ -3035,23 +3049,27 @@
// Get the prototype of the function.
Register prototype = x13;
- __ TryGetFunctionPrototype(function, prototype, x7, &slow,
+ __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
MacroAssembler::kMissOnBoundFunction);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, x6, x7, &slow);
+ __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
// Update the global instanceof or call site inlined cache with the
current
// map and function. The cached answer will be set when it is known
below.
- if (!HasCallSiteInlineCheck()) {
+ if (HasCallSiteInlineCheck()) {
+ // Patch the (relocated) inlined map check.
+ __ GetRelocatedValueLocation(map_check_site, scratch1);
+ // We have a cell, so need another level of dereferencing.
+ __ Ldr(scratch1, MemOperand(scratch1));
+ __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
+ } else {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- ASM_UNIMPLEMENTED("InstanceofStub inline patching");
}
- Label return_result;
+ Label return_true, return_result;
{
// Loop through the prototype chain looking for the function prototype.
Register chain_map = x1;
@@ -3061,18 +3079,16 @@
__ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
// Speculatively set a result.
- __ Mov(result, Operand(Smi::FromInt(1)));
+ __ Mov(result, res_false);
__ Bind(&loop);
- // If the chain prototype is the object prototype, return smi(0).
+ // If the chain prototype is the object prototype, return true.
__ Cmp(chain_prototype, prototype);
- ASSERT(Smi::FromInt(0) == 0UL);
- __ CzeroX(result, eq);
- __ B(eq, &return_result);
+ __ B(eq, &return_true);
// If the chain prototype is null, we've reached the end of the chain,
so
- // return smi(1).
+ // return false.
__ Cmp(chain_prototype, null_value);
__ B(eq, &return_result);
@@ -3083,11 +3099,17 @@
}
// Return sequence when no arguments are on the stack.
+ // We cannot fall through to here.
+ __ Bind(&return_true);
+ __ Mov(result, res_true);
__ Bind(&return_result);
- if (!HasCallSiteInlineCheck()) {
+ if (HasCallSiteInlineCheck()) {
+ ASSERT(ReturnTrueFalseObject());
+ __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
+ __ GetRelocatedValueLocation(map_check_site, scratch2);
+ __ Str(result, MemOperand(scratch2));
+ } else {
__ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- ASM_UNIMPLEMENTED("InstanceofStub call site patcher");
}
__ Ret();
@@ -3103,36 +3125,38 @@
// Before null, smi and string checks, check that the rhs is a function.
// For a non-function rhs, an exception must be thrown.
__ JumpIfSmi(function, &slow);
- __ JumpIfNotObjectType(function, x6, object_type, JS_FUNCTION_TYPE,
&slow);
+ __ JumpIfNotObjectType(
+ function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
+
+ __ Mov(result, res_false);
// Null is not instance of anything.
__ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
__ B(ne, &object_not_null);
- __ Mov(result, Operand(Smi::FromInt(1)));
__ Ret();
__ Bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ Mov(result, Operand(Smi::FromInt(1)));
__ Ret();
__ Bind(&object_not_null_or_smi);
// String values are not instances of anything.
- __ IsObjectJSStringType(object, x7, &slow);
- __ Mov(result, Operand(Smi::FromInt(1)));
+ __ IsObjectJSStringType(object, scratch2, &slow);
__ Ret();
// Slow-case. Tail call builtin.
__ Bind(&slow);
- if (!ReturnTrueFalseObject()) {
+ {
FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments have either been passed into registers or have been
previously
// popped. We need to push them before calling builtin.
__ Push(object, function);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- } else {
- ASM_UNIMPLEMENTED("InstanceofStub call builtin and return object");
+ }
+ if (ReturnTrueFalseObject()) {
+ __ Cmp(result, 0);
+ __ Csel(result, res_true, res_false, eq);
}
__ Ret();
}
=======================================
--- /branches/experimental/a64/src/a64/lithium-codegen-a64.cc Thu Jan 30
14:42:24 2014 UTC
+++ /branches/experimental/a64/src/a64/lithium-codegen-a64.cc Thu Jan 30
16:31:19 2014 UTC
@@ -2724,21 +2724,22 @@
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
}
virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
- Label map_check_;
};
DeferredInstanceOfKnownGlobal* deferred =
new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
- Label return_false, cache_miss;
+ Label map_check, return_false, cache_miss, done;
Register object = ToRegister(instr->value());
Register result = ToRegister(instr->result());
+ // x4 is expected in the associated deferred code and stub.
+ Register map_check_site = x4;
+ Register map = x5;
// This instruction is marked as call. We can clobber any register.
ASSERT(instr->IsMarkedAsCall());
@@ -2750,11 +2751,36 @@
// A Smi is not instance of anything.
__ JumpIfSmi(object, &return_false);
- TODO_UNIMPLEMENTED("patchable inline check");
+ // This is the inlined call site instanceof cache. The two occurences of
the
+ // hole value will be patched to the last map/result pair generated by
the
+ // instanceof stub.
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ {
+ // Below we use Factory::the_hole_value() on purpose instead of
loading from
+ // the root array to force relocation and later be able to patch with a
+ // custom value.
+ InstructionAccurateScope scope(masm(), 5);
+ __ bind(&map_check);
+ // Will be patched with the cached map.
+ Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+ __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ cmp(map, Operand(scratch));
+ __ b(&cache_miss, ne);
+ // The address of this instruction is computed relative to the map
check
+ // above, so check the size of the code generated.
+ ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+ // Will be patched with the cached result.
+ __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ }
+ __ B(&done);
// The inlined call site cache did not match.
// Check null and string before calling the deferred code.
__ Bind(&cache_miss);
+ // Compute the address of the map check. It must not be clobbered until
the
+ // InstanceOfStub has used it.
+ __ Adr(map_check_site, &map_check);
// Null is not instance of anything.
__ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
@@ -2772,6 +2798,7 @@
// Here result is either true or false.
__ Bind(deferred->exit());
+ __ Bind(&done);
}
@@ -2783,11 +2810,16 @@
}
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal*
instr,
- Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal*
instr) {
Register result = ToRegister(instr->result());
ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
- InstanceofStub::Flags flags = InstanceofStub::kArgsInRegisters;
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -2803,12 +2835,6 @@
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // TODO(all): This could be integrated into InstanceofStub.
- __ LoadTrueFalseRoots(x1, x2);
- ASSERT(Smi::FromInt(0) == 0);
- __ Cmp(result, 0);
- __ Csel(result, x1, x2, eq);
-
// Put the result value into the result register slot.
__ StoreToSafepointRegisterSlot(result, result);
}
=======================================
--- /branches/experimental/a64/src/a64/lithium-codegen-a64.h Tue Jan 28
15:50:06 2014 UTC
+++ /branches/experimental/a64/src/a64/lithium-codegen-a64.h Thu Jan 30
16:31:19 2014 UTC
@@ -176,8 +176,7 @@
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
=======================================
--- /branches/experimental/a64/src/a64/macro-assembler-a64.cc Thu Jan 30
12:09:30 2014 UTC
+++ /branches/experimental/a64/src/a64/macro-assembler-a64.cc Thu Jan 30
16:31:19 2014 UTC
@@ -4087,6 +4087,24 @@
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 0); //
kBlackBitPattern.
}
+
+
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ ASSERT(!result.Is(ldr_location));
+ const uint32_t kLdrLitOffset_lsb = 5;
+ const uint32_t kLdrLitOffset_width = 19;
+ Ldr(result, MemOperand(ldr_location));
+ if (emit_debug_code()) {
+ And(result, result, LoadLiteralFMask);
+ Cmp(result, LoadLiteralFixed);
+ Check(eq, "The instruction to patch should be a load literal.");
+ // The instruction was clobbered. Reload it.
+ Ldr(result, MemOperand(ldr_location));
+ }
+ Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
+ Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
+}
void MacroAssembler::EnsureNotWhite(
=======================================
--- /branches/experimental/a64/src/a64/macro-assembler-a64.h Thu Jan 30
12:09:30 2014 UTC
+++ /branches/experimental/a64/src/a64/macro-assembler-a64.h Thu Jan 30
16:31:19 2014 UTC
@@ -1593,6 +1593,12 @@
void StoreToSafepointRegisterSlot(Register src, Register dst) {
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
+
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
+ }
void CheckPageFlagSet(const Register& object,
const Register& scratch,
@@ -1715,6 +1721,12 @@
Label* on_black);
+ // Get the location of a relocated constant (its address in the constant
pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
//
---------------------------------------------------------------------------
// Debugging.
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.