Reviewers: Benedikt Meurer, ulan,
Description:
ARM64: optimize call immediate
BUG=
[email protected], [email protected]
Please review this at https://codereview.chromium.org/209923002/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+45, -41 lines):
M src/arm64/assembler-arm64-inl.h
M src/arm64/assembler-arm64.h
M src/arm64/macro-assembler-arm64.h
M src/arm64/macro-assembler-arm64.cc
Index: src/arm64/assembler-arm64-inl.h
diff --git a/src/arm64/assembler-arm64-inl.h
b/src/arm64/assembler-arm64-inl.h
index
c509e05a5b7f7a3d4b43cda189b91980b6681e6f..0b58b08a7adecba84ef3c811350dd3f75d470558
100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -589,30 +589,34 @@ Address
Assembler::return_address_from_call_start(Address pc) {
// sequences:
//
// Without relocation:
- // movz ip0, #(target & 0x000000000000ffff)
- // movk ip0, #(target & 0x00000000ffff0000)
- // movk ip0, #(target & 0x0000ffff00000000)
- // movk ip0, #(target & 0xffff000000000000)
- // blr ip0
+ // movz temp
+ // [movk temp] (up to 2 instructions).
+ // blr temp
//
// With relocation:
- // ldr ip0, =target
- // blr ip0
+ // ldr temp, =target
+ // blr temp
//
// The return address is immediately after the blr instruction in both
cases,
// so it can be found by adding the call size to the address at the
start of
// the call sequence.
- STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 *
kInstructionSize);
STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 *
kInstructionSize);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsMovz()) {
+ int movk_count;
+ if (instr->following(1)->IsMovk()) {
+ if (instr->following(2)->IsMovk()) {
+ movk_count = 2;
+ } else {
+ movk_count = 1;
+ }
+ } else {
+ movk_count = 0;
+ }
// Verify the instruction sequence.
- ASSERT(instr->following(1)->IsMovk());
- ASSERT(instr->following(2)->IsMovk());
- ASSERT(instr->following(3)->IsMovk());
- ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
- return pc + Assembler::kCallSizeWithoutRelocation;
+ ASSERT(instr->following(movk_count + 1)->IsBranchAndLinkToRegister());
+ return pc + (movk_count + 2) * kInstructionSize;
} else {
// Verify the instruction sequence.
ASSERT(instr->IsLdrLiteralX());
Index: src/arm64/assembler-arm64.h
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index
79f957b91cb744f8ea4822f3cc87dd8d7086c66c..54a9f2bd734b646a518bd7fa6d13d17f0375957c
100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -818,16 +818,15 @@ class Assembler : public AssemblerBase {
// as it will choose the correct value for a given relocation mode.
//
// Without relocation:
- // movz ip0, #(target & 0x000000000000ffff)
- // movk ip0, #(target & 0x00000000ffff0000)
- // movk ip0, #(target & 0x0000ffff00000000)
- // movk ip0, #(target & 0xffff000000000000)
- // blr ip0
+ // movz temp, #(target & 0x000000000000ffff)
+ // movk temp, #(target & 0x00000000ffff0000)
+ // movk temp, #(target & 0x0000ffff00000000)
+ // blr temp
//
// With relocation:
- // ldr ip0, =target
- // blr ip0
- static const int kCallSizeWithoutRelocation = 5 * kInstructionSize;
+ // ldr temp, =target
+ // blr temp
+ static const int kMaxCallSizeWithoutRelocation = 4 * kInstructionSize;
static const int kCallSizeWithRelocation = 2 * kInstructionSize;
// Size of the generated code in bytes
Index: src/arm64/macro-assembler-arm64.cc
diff --git a/src/arm64/macro-assembler-arm64.cc
b/src/arm64/macro-assembler-arm64.cc
index
d7d0ab7502308f2687beb126076c70e4e3ca1905..011f5b8e86ecf296d0e6613a863f9cd656fcca4a
100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -2024,11 +2024,16 @@ void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode) {
Register temp = temps.AcquireX();
if (rmode == RelocInfo::NONE64) {
+ // Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
movz(temp, (imm >> 0) & 0xffff, 0);
- movk(temp, (imm >> 16) & 0xffff, 16);
- movk(temp, (imm >> 32) & 0xffff, 32);
- movk(temp, (imm >> 48) & 0xffff, 48);
+ if (predictable_code_size()) {
+ movk(temp, (imm >> 16) & 0xffff, 16);
+ movk(temp, (imm >> 32) & 0xffff, 32);
+ } else {
+ if (((imm >> 16) & 0xffff) != 0) movk(temp, (imm >> 16) & 0xffff,
16);
+ if (((imm >> 32) & 0xffff) != 0) movk(temp, (imm >> 32) & 0xffff,
32);
+ }
} else {
LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target),
rmode));
}
@@ -2075,13 +2080,17 @@ int MacroAssembler::CallSize(Label* target) {
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
- USE(target);
-
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
ASSERT(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
+ int result = kMaxCallSizeWithoutRelocation;
+ if (!predictable_code_size()) {
+ uint64_t imm = reinterpret_cast<uint64_t>(target);
+ if (((imm >> 16) & 0xffff) == 0) result -= kInstructionSize;
+ if (((imm >> 32) & 0xffff) == 0) result -= kInstructionSize;
+ }
+ return result;
} else {
return kCallSizeWithRelocation;
}
@@ -2091,17 +2100,9 @@ int MacroAssembler::CallSize(Address target,
RelocInfo::Mode rmode) {
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- USE(code);
USE(ast_id);
-
- // Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
-
- if (rmode == RelocInfo::NONE64) {
- return kCallSizeWithoutRelocation;
- } else {
- return kCallSizeWithRelocation;
- }
+ AllowDeferredHandleDereference embedding_raw_address;
+ return CallSize(reinterpret_cast<Address>(code.location()), rmode);
}
Index: src/arm64/macro-assembler-arm64.h
diff --git a/src/arm64/macro-assembler-arm64.h
b/src/arm64/macro-assembler-arm64.h
index
a54ed30a2b07180f7a151ab1d96975559f190353..a218a2b359d3cfd947086ca1b1db95dbb7b1aad8
100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -1159,10 +1159,10 @@ class MacroAssembler : public Assembler {
// the size (in bytes) of the call sequence.
static int CallSize(Register target);
static int CallSize(Label* target);
- static int CallSize(Address target, RelocInfo::Mode rmode);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
+ int CallSize(Address target, RelocInfo::Mode rmode);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None());
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.