Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (227642 => 227643)
--- trunk/Source/_javascript_Core/ChangeLog 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/ChangeLog 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1,3 +1,47 @@
+2018-01-24 Filip Pizlo <fpi...@apple.com>
+
+ DirectArguments should protect itself using dynamic poisoning and precise index masking
+ https://bugs.webkit.org/show_bug.cgi?id=182086
+
+ Reviewed by Saam Barati.
+
+ This implements dynamic poisoning and precise index masking in DirectArguments, using the
+ helpers from <wtf/MathExtras.h> and helpers in AssemblyHelpers and FTL::LowerDFGToB3.
+
+ We use dynamic poisoning for DirectArguments since this object did not have any additional
+ indirection inside it that could have been poisoned. So, we use the xor of the expected type
+ and the actual type as an additional input into the pointer.
+
+ We use precise index masking for bounds checks, because it's not worth doing index masking
+ unless we know that precise index masking is too slow.
+
+ * assembler/MacroAssembler.h:
+ (JSC::MacroAssembler::lshiftPtr):
+ (JSC::MacroAssembler::rshiftPtr):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileGetByValOnDirectArguments):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compileGetByVal):
+ (JSC::FTL::DFG::LowerDFGToB3::compileGetMyArgumentByVal):
+ (JSC::FTL::DFG::LowerDFGToB3::preciseIndexMask64):
+ (JSC::FTL::DFG::LowerDFGToB3::preciseIndexMask32):
+ (JSC::FTL::DFG::LowerDFGToB3::dynamicPoison):
+ (JSC::FTL::DFG::LowerDFGToB3::dynamicPoisonOnLoadedType):
+ (JSC::FTL::DFG::LowerDFGToB3::dynamicPoisonOnType):
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::emitPreciseIndexMask32):
+ (JSC::AssemblyHelpers::emitDynamicPoison):
+ (JSC::AssemblyHelpers::emitDynamicPoisonOnLoadedType):
+ (JSC::AssemblyHelpers::emitDynamicPoisonOnType):
+ * jit/AssemblyHelpers.h:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emitDirectArgumentsGetByVal):
+ * runtime/DirectArguments.h:
+ (JSC::DirectArguments::getIndexQuickly const):
+ (JSC::DirectArguments::setIndexQuickly):
+ (JSC::DirectArguments::argument):
+ * runtime/GenericArgumentsInlines.h:
+
2018-01-25 Mark Lam <mark....@apple.com>
Rename some local vars from type to typedArrayType for greater clarity.
Modified: trunk/Source/_javascript_Core/assembler/MacroAssembler.h (227642 => 227643)
--- trunk/Source/_javascript_Core/assembler/MacroAssembler.h 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/assembler/MacroAssembler.h 2018-01-26 00:05:57 UTC (rev 227643)
@@ -582,11 +582,21 @@
lshift32(trustedImm32ForShift(imm), srcDest);
}
+ void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ lshift32(imm, srcDest);
+ }
+
void rshiftPtr(Imm32 imm, RegisterID srcDest)
{
rshift32(trustedImm32ForShift(imm), srcDest);
}
+ void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ rshift32(imm, srcDest);
+ }
+
void urshiftPtr(Imm32 imm, RegisterID srcDest)
{
urshift32(trustedImm32ForShift(imm), srcDest);
@@ -906,11 +916,21 @@
lshift64(trustedImm32ForShift(imm), srcDest);
}
+ void lshiftPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ lshift64(imm, srcDest);
+ }
+
void rshiftPtr(Imm32 imm, RegisterID srcDest)
{
rshift64(trustedImm32ForShift(imm), srcDest);
}
+ void rshiftPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ rshift64(imm, srcDest);
+ }
+
void urshiftPtr(Imm32 imm, RegisterID srcDest)
{
urshift64(trustedImm32ForShift(imm), srcDest);
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp (227642 => 227643)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2018-01-26 00:05:57 UTC (rev 227643)
@@ -6410,6 +6410,7 @@
#if USE(JSVALUE32_64)
GPRTemporary resultTag(this);
#endif
+ GPRTemporary scratch(this);
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
@@ -6420,6 +6421,7 @@
#else
JSValueRegs resultRegs = JSValueRegs(resultReg);
#endif
+ GPRReg scratchReg = scratch.gpr();
if (!m_compileOkay)
return;
@@ -6432,14 +6434,20 @@
MacroAssembler::NonZero,
MacroAssembler::Address(baseReg, DirectArguments::offsetOfMappedArguments())));
- auto isOutOfBounds = m_jit.branch32(CCallHelpers::AboveOrEqual, propertyReg, CCallHelpers::Address(baseReg, DirectArguments::offsetOfLength()));
+ m_jit.load32(CCallHelpers::Address(baseReg, DirectArguments::offsetOfLength()), scratchReg);
+ auto isOutOfBounds = m_jit.branch32(CCallHelpers::AboveOrEqual, propertyReg, scratchReg);
if (node->arrayMode().isInBounds())
speculationCheck(OutOfBounds, JSValueSource(), 0, isOutOfBounds);
+ m_jit.emitDynamicPoisonOnType(baseReg, resultReg, DirectArgumentsType);
+ m_jit.emitPreparePreciseIndexMask32(propertyReg, scratchReg, scratchReg);
+
m_jit.loadValue(
MacroAssembler::BaseIndex(
baseReg, propertyReg, MacroAssembler::TimesEight, DirectArguments::storageOffset()),
resultRegs);
+
+ m_jit.andPtr(scratchReg, resultReg);
if (!node->arrayMode().isInBounds()) {
addSlowPathGenerator(
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (227642 => 227643)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-01-26 00:05:57 UTC (rev 227643)
@@ -3779,7 +3779,8 @@
ExoticObjectMode, noValue(), nullptr,
m_out.notNull(m_out.loadPtr(base, m_heaps.DirectArguments_mappedArguments)));
- auto isOutOfBounds = m_out.aboveOrEqual(index, m_out.load32NonNegative(base, m_heaps.DirectArguments_length));
+ LValue length = m_out.load32NonNegative(base, m_heaps.DirectArguments_length);
+ auto isOutOfBounds = m_out.aboveOrEqual(index, length);
if (m_node->arrayMode().isInBounds()) {
speculate(OutOfBounds, noValue(), nullptr, isOutOfBounds);
TypedPointer address = m_out.baseIndex(
@@ -3796,8 +3797,11 @@
LBasicBlock lastNext = m_out.appendTo(inBounds, slowCase);
TypedPointer address = m_out.baseIndex(
- m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index));
- ValueFromBlock fastResult = m_out.anchor(m_out.load64(address));
+ m_heaps.DirectArguments_storage,
+ dynamicPoisonOnType(base, DirectArgumentsType),
+ m_out.zeroExt(index, pointerType()));
+ ValueFromBlock fastResult = m_out.anchor(
+ preciseIndexMask32(m_out.load64(address), index, length));
m_out.jump(continuation);
m_out.appendTo(slowCase, continuation);
@@ -3974,7 +3978,7 @@
{
InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
- LValue index = lowInt32(m_node->child2());
+ LValue originalIndex = lowInt32(m_node->child2());
LValue originalLimit;
if (inlineCallFrame && !inlineCallFrame->isVarargs())
@@ -3989,7 +3993,7 @@
if (m_node->numberOfArgumentsToSkip())
limit = m_out.sub(limit, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
- LValue isOutOfBounds = m_out.aboveOrEqual(index, limit);
+ LValue isOutOfBounds = m_out.aboveOrEqual(originalIndex, limit);
LBasicBlock continuation = nullptr;
LBasicBlock lastNext = nullptr;
ValueFromBlock slowResult;
@@ -4004,20 +4008,12 @@
} else
speculate(OutOfBounds, noValue(), 0, isOutOfBounds);
+ LValue index = originalIndex;
if (m_node->numberOfArgumentsToSkip())
index = m_out.add(index, m_out.constInt32(m_node->numberOfArgumentsToSkip()));
+
index = m_out.add(index, m_out.int32One);
- index = m_out.zeroExt(index, Int64);
-
- index = m_out.bitAnd(
- index,
- m_out.aShr(
- m_out.sub(
- index,
- m_out.opaque(m_out.zeroExt(originalLimit, Int64))),
- m_out.constInt32(63)));
-
TypedPointer base;
if (inlineCallFrame) {
if (inlineCallFrame->argumentCountIncludingThis > 1)
@@ -4027,8 +4023,10 @@
LValue result;
if (base) {
- LValue pointer = m_out.baseIndex(base.value(), index, ScaleEight);
+ LValue pointer = m_out.baseIndex(
+ base.value(), m_out.zeroExt(index, pointerType()), ScaleEight);
result = m_out.load64(TypedPointer(m_heaps.variables.atAnyIndex(), pointer));
+ result = preciseIndexMask32(result, originalIndex, limit);
} else
result = m_out.constInt64(JSValue::encode(jsUndefined()));
@@ -15111,6 +15109,48 @@
m_out.appendTo(continuation, lastNext);
}
+
+ LValue preciseIndexMask64(LValue value, LValue index, LValue limit)
+ {
+ return m_out.bitAnd(
+ value,
+ m_out.aShr(
+ m_out.sub(
+ index,
+ m_out.opaque(limit)),
+ m_out.constInt32(63)));
+ }
+
+ LValue preciseIndexMask32(LValue value, LValue index, LValue limit)
+ {
+ return preciseIndexMask64(value, m_out.zeroExt(index, Int64), m_out.zeroExt(limit, Int64));
+ }
+
+ LValue dynamicPoison(LValue value, LValue poison)
+ {
+ return m_out.add(
+ value,
+ m_out.shl(
+ m_out.zeroExt(poison, pointerType()),
+ m_out.constInt32(40)));
+ }
+
+ LValue dynamicPoisonOnLoadedType(LValue value, LValue actualType, JSType expectedType)
+ {
+ return dynamicPoison(
+ value,
+ m_out.bitXor(
+ m_out.opaque(actualType),
+ m_out.constInt32(expectedType)));
+ }
+
+ LValue dynamicPoisonOnType(LValue value, JSType expectedType)
+ {
+ return dynamicPoisonOnLoadedType(
+ value,
+ m_out.load8ZeroExt32(value, m_heaps.JSCell_typeInfoType),
+ expectedType);
+ }
template<typename... Args>
LValue vmCall(LType type, LValue function, Args&&... args)
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp (227642 => 227643)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1016,6 +1016,53 @@
storePtr(scratch, vm.addressOfLastStackTop());
}
+void AssemblyHelpers::emitPreparePreciseIndexMask32(GPRReg index, GPRReg length, GPRReg result)
+{
+ if (length == result) {
+ negPtr(length);
+ addPtr(index, length);
+ } else {
+ move(index, result);
+ subPtr(length, result);
+ }
+ rshiftPtr(TrustedImm32(preciseIndexMaskShift<void*>()), result);
+}
+
+void AssemblyHelpers::emitDynamicPoison(GPRReg base, GPRReg poisonValue)
+{
+#if CPU(X86_64) || CPU(ARM64)
+ lshiftPtr(TrustedImm32(40), poisonValue);
+ addPtr(poisonValue, base);
+#else
+ UNUSED_PARAM(base);
+ UNUSED_PARAM(poisonValue);
+#endif
+}
+
+void AssemblyHelpers::emitDynamicPoisonOnLoadedType(GPRReg base, GPRReg actualType, JSType expectedType)
+{
+#if CPU(X86_64) || CPU(ARM64)
+ xor32(TrustedImm32(expectedType), actualType);
+ emitDynamicPoison(base, actualType);
+#else
+ UNUSED_PARAM(base);
+ UNUSED_PARAM(actualValue);
+ UNUSED_PARAM(expectedType);
+#endif
+}
+
+void AssemblyHelpers::emitDynamicPoisonOnType(GPRReg base, GPRReg scratch, JSType expectedType)
+{
+#if CPU(X86_64) || CPU(ARM64)
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ emitDynamicPoisonOnLoadedType(base, scratch, expectedType);
+#else
+ UNUSED_PARAM(base);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(expectedType);
+#endif
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.h (227642 => 227643)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1585,6 +1585,15 @@
#if USE(JSVALUE64)
void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch);
#endif
+
+ // This assumes that index and length are 32-bit. This also assumes that they are already
+ // zero-extended. Also this does not clobber index, which is useful in the baseline JIT. This
+ // permits length and result to be in the same register.
+ void emitPreparePreciseIndexMask32(GPRReg index, GPRReg length, GPRReg result);
+
+ void emitDynamicPoison(GPRReg base, GPRReg poisonValue);
+ void emitDynamicPoisonOnLoadedType(GPRReg base, GPRReg actualType, JSType expectedType);
+ void emitDynamicPoisonOnType(GPRReg base, GPRReg scratch, JSType expectedType);
#if ENABLE(WEBASSEMBLY)
void loadWasmContextInstance(GPRReg dst);
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (227642 => 227643)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1392,21 +1392,26 @@
RegisterID property = regT1;
JSValueRegs result = JSValueRegs(regT0);
RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
#else
RegisterID base = regT0;
RegisterID property = regT2;
JSValueRegs result = JSValueRegs(regT1, regT0);
RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
#endif
load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
+ emitDynamicPoisonOnLoadedType(base, scratch, DirectArgumentsType);
- slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength())));
+ load32(Address(base, DirectArguments::offsetOfLength()), scratch2);
+ slowCases.append(branch32(AboveOrEqual, property, scratch2));
slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfMappedArguments())));
- zeroExtend32ToPtr(property, scratch);
- loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result);
+ emitPreparePreciseIndexMask32(property, scratch2, scratch2);
+ loadValue(BaseIndex(base, property, TimesEight, DirectArguments::storageOffset()), result);
+ andPtr(scratch2, result.payloadGPR());
return slowCases;
}
Modified: trunk/Source/_javascript_Core/runtime/DirectArguments.h (227642 => 227643)
--- trunk/Source/_javascript_Core/runtime/DirectArguments.h 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/_javascript_Core/runtime/DirectArguments.h 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -98,13 +98,15 @@
JSValue getIndexQuickly(uint32_t i) const
{
ASSERT_WITH_SECURITY_IMPLICATION(isMappedArgument(i));
- return const_cast<DirectArguments*>(this)->storage()[i].get();
+ auto* ptr = &const_cast<DirectArguments*>(this)->storage()[i];
+ return preciseIndexMaskPtr(i, m_length, dynamicPoison(type(), DirectArgumentsType, ptr))->get();
}
void setIndexQuickly(VM& vm, uint32_t i, JSValue value)
{
ASSERT_WITH_SECURITY_IMPLICATION(isMappedArgument(i));
- storage()[i].set(vm, this, value);
+ auto* ptr = &storage()[i];
+ preciseIndexMaskPtr(i, m_length, dynamicPoison(type(), DirectArgumentsType, ptr))->set(vm, this, value);
}
WriteBarrier<JSFunction>& callee()
@@ -116,7 +118,8 @@
{
ASSERT(offset);
ASSERT_WITH_SECURITY_IMPLICATION(offset.offset() < std::max(m_length, m_minCapacity));
- return storage()[offset.offset()];
+ auto* ptr = &storage()[offset.offset()];
+ return *preciseIndexMaskPtr(offset.offset(), std::max(m_length, m_minCapacity), dynamicPoison(type(), DirectArgumentsType, ptr));
}
// Methods intended for use by the GenericArguments mixin.
Modified: trunk/Source/WTF/ChangeLog (227642 => 227643)
--- trunk/Source/WTF/ChangeLog 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/WTF/ChangeLog 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1,3 +1,25 @@
+2018-01-24 Filip Pizlo <fpi...@apple.com>
+
+ DirectArguments should protect itself using dynamic poisoning and precise index masking
+ https://bugs.webkit.org/show_bug.cgi?id=182086
+
+ Reviewed by Saam Barati.
+
+ Add helpers for:
+
+ Dynamic poisoning: this means arranging to have the pointer you will dereference become an
+ invalid pointer if the type check you were relying on would have failed.
+
+ Precise index masking: a variant of index masking that does not depend on distancing. I figured
+ I'd just try this first for DirectArguments, since I didn't think that arguments[i] was ever
+ hot enough to warrant anything better. Turns out that in all of the benchmarks that care about
+ arguments performance, we optimize things to the point that the index masking isn't on a hot
+ path anymore. Turns out, it's neutral!
+
+ * wtf/MathExtras.h:
+ (WTF::preciseIndexMask):
+ (WTF::dynamicPoison):
+
2018-01-25 Jer Noble <jer.no...@apple.com>
Unreviewed build fix after r227631; make USE_VIDEOTOOLBOX universally enabled on iOS.
Modified: trunk/Source/WTF/wtf/MathExtras.h (227642 => 227643)
--- trunk/Source/WTF/wtf/MathExtras.h 2018-01-25 23:45:11 UTC (rev 227642)
+++ trunk/Source/WTF/wtf/MathExtras.h 2018-01-26 00:05:57 UTC (rev 227643)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006, 2007, 2008, 2009, 2010, 2013, 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2006-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -480,6 +480,56 @@
return static_cast<uint64_t>(static_cast<uint32_t>(-1)) >> std::clz(size);
}
+constexpr unsigned preciseIndexMaskShiftForSize(unsigned size)
+{
+ return size * 8 - 1;
+}
+
+template<typename T>
+constexpr unsigned preciseIndexMaskShift()
+{
+ return preciseIndexMaskShiftForSize(sizeof(T));
+}
+
+template<typename T>
+T opaque(T pointer)
+{
+ asm("" : "+r"(pointer));
+ return pointer;
+}
+
+template<typename T>
+inline T* preciseIndexMaskPtr(uintptr_t index, uintptr_t length, T* value)
+{
+ uintptr_t result = bitwise_cast<uintptr_t>(value) & static_cast<uintptr_t>(
+ static_cast<intptr_t>(index - opaque(length)) >>
+ static_cast<intptr_t>(preciseIndexMaskShift<T*>()));
+ return bitwise_cast<T*>(result);
+}
+
+constexpr unsigned bytePoisonShift = 40;
+
+template<typename T, typename U>
+inline T* dynamicPoison(U actual, U expected, T* pointer)
+{
+ static_assert(sizeof(U) == 1, "Poisoning only works for bytes at the moment");
+#if CPU(X86_64) || CPU(ARM64)
+ return bitwise_cast<T*>(
+ bitwise_cast<char*>(pointer) +
+ (static_cast<uintptr_t>(opaque(actual) ^ expected) << bytePoisonShift));
+#else
+ UNUSED_PARAM(actual);
+ UNUSED_PARAM(expected);
+ return pointer;
+#endif
+}
+
} // namespace WTF
+using WTF::dynamicPoison;
+using WTF::opaque;
+using WTF::preciseIndexMaskPtr;
+using WTF::preciseIndexMaskShift;
+using WTF::preciseIndexMaskShiftForSize;
+
#endif // #ifndef WTF_MathExtras_h