Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (191220 => 191221)
--- trunk/Source/_javascript_Core/ChangeLog 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/ChangeLog 2015-10-16 22:53:11 UTC (rev 191221)
@@ -1,3 +1,69 @@
+2015-10-15 Filip Pizlo <fpi...@apple.com>
+
+ CopyBarrier must be avoided for slow TypedArrays
+ https://bugs.webkit.org/show_bug.cgi?id=150217
+ rdar://problem/23128791
+
+ Reviewed by Michael Saboff.
+
+ Change how we access array buffer views so that we don't fire the barrier slow path, and
+ don't mask off the spaceBits, if the view is not FastTypedArray. That's because in that case
+ m_vector could be misaligned and so have meaningful non-space data in the spaceBits. Also in
+ that case, m_vector does not point into copied space.
+
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage):
+ (JSC::DFG::SpeculativeJIT::compileGetTypedArrayByteOffset):
+ * ftl/FTLLowerDFGToLLVM.cpp:
+ (JSC::FTL::DFG::LowerDFGToLLVM::loadVectorWithBarrier):
+ (JSC::FTL::DFG::LowerDFGToLLVM::copyBarrier):
+ (JSC::FTL::DFG::LowerDFGToLLVM::isInToSpace):
+ (JSC::FTL::DFG::LowerDFGToLLVM::loadButterflyReadOnly):
+ (JSC::FTL::DFG::LowerDFGToLLVM::loadVectorReadOnly):
+ (JSC::FTL::DFG::LowerDFGToLLVM::removeSpaceBits):
+ (JSC::FTL::DFG::LowerDFGToLLVM::isFastTypedArray):
+ (JSC::FTL::DFG::LowerDFGToLLVM::baseIndex):
+ * heap/CopyBarrier.h:
+ (JSC::CopyBarrierBase::getWithoutBarrier):
+ (JSC::CopyBarrierBase::getPredicated):
+ (JSC::CopyBarrierBase::get):
+ (JSC::CopyBarrierBase::copyState):
+ (JSC::CopyBarrier::get):
+ (JSC::CopyBarrier::getPredicated):
+ (JSC::CopyBarrier::set):
+ * heap/Heap.cpp:
+ (JSC::Heap::copyBarrier):
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::branchIfNotType):
+ (JSC::AssemblyHelpers::branchIfFastTypedArray):
+ (JSC::AssemblyHelpers::branchIfNotFastTypedArray):
+ (JSC::AssemblyHelpers::loadTypedArrayVector):
+ (JSC::AssemblyHelpers::purifyNaN):
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::branchStructure):
+ (JSC::AssemblyHelpers::branchIfToSpace):
+ (JSC::AssemblyHelpers::branchIfNotToSpace):
+ (JSC::AssemblyHelpers::removeSpaceBits):
+ (JSC::AssemblyHelpers::addressForByteOffset):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emitIntTypedArrayGetByVal):
+ (JSC::JIT::emitFloatTypedArrayGetByVal):
+ (JSC::JIT::emitIntTypedArrayPutByVal):
+ (JSC::JIT::emitFloatTypedArrayPutByVal):
+ * runtime/JSArrayBufferView.h:
+ (JSC::JSArrayBufferView::vector):
+ (JSC::JSArrayBufferView::length):
+ * runtime/JSArrayBufferViewInlines.h:
+ (JSC::JSArrayBufferView::byteOffset):
+ * runtime/JSGenericTypedArrayView.h:
+ (JSC::JSGenericTypedArrayView::typedVector):
+ * runtime/JSGenericTypedArrayViewInlines.h:
+ (JSC::JSGenericTypedArrayView<Adaptor>::copyBackingStore):
+ (JSC::JSGenericTypedArrayView<Adaptor>::slowDownAndWasteMemory):
+ * tests/stress/misaligned-int8-view-byte-offset.js: Added.
+ * tests/stress/misaligned-int8-view-read.js: Added.
+ * tests/stress/misaligned-int8-view-write.js: Added.
+
2015-10-16 Keith Miller <keith_mil...@apple.com>
Unreviewed. Build fix for 191215.
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp (191220 => 191221)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2015-10-16 22:53:11 UTC (rev 191221)
@@ -4386,14 +4386,11 @@
default:
ASSERT(isTypedView(node->arrayMode().typedArrayType()));
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector()),
- storageReg);
+ JITCompiler::Jump fail = m_jit.loadTypedArrayVector(baseReg, storageReg);
+
addSlowPathGenerator(
- slowPathCall(
- m_jit.branchIfNotToSpace(storageReg),
- this, operationGetArrayBufferVector, storageReg, baseReg));
+ slowPathCall(fail, this, operationGetArrayBufferVector, storageReg, baseReg));
break;
}
@@ -4418,7 +4415,11 @@
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR);
m_jit.removeSpaceBits(dataGPR);
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR);
+ JITCompiler::JumpList vectorReady;
+ vectorReady.append(m_jit.branchIfToSpace(vectorGPR));
+ vectorReady.append(m_jit.branchIfNotFastTypedArray(baseGPR));
m_jit.removeSpaceBits(vectorGPR);
+ vectorReady.link(&m_jit);
m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), dataGPR);
m_jit.loadPtr(MacroAssembler::Address(dataGPR, ArrayBuffer::offsetOfData()), dataGPR);
m_jit.subPtr(dataGPR, vectorGPR);
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp (191220 => 191221)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp 2015-10-16 22:53:11 UTC (rev 191221)
@@ -6139,35 +6139,68 @@
LValue loadVectorWithBarrier(LValue object)
{
+ LValue fastResultValue = m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector);
return copyBarrier(
- object, m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector),
- operationGetArrayBufferVector);
+ fastResultValue,
+ [&] () -> LValue {
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("loadVectorWithBarrier slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("loadVectorWithBarrier continuation"));
+
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+ m_out.branch(isFastTypedArray(object), rarely(slowPath), usually(continuation));
+
+ LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
+
+ LValue slowResultValue = lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ operationGetArrayBufferVector, locations[0].directGPR(),
+ locations[1].directGPR());
+ }, object);
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ return m_out.phi(m_out.intPtr, fastResult, slowResult);
+ });
}
-
+
LValue copyBarrier(LValue object, LValue pointer, P_JITOperation_EC slowPathFunction)
{
- LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("loadButterflyWithBarrier slow path"));
- LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("loadButterflyWithBarrier continuation"));
+ return copyBarrier(
+ pointer,
+ [&] () -> LValue {
+ return lazySlowPath(
+ [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
+ return createLazyCallGenerator(
+ slowPathFunction, locations[0].directGPR(), locations[1].directGPR());
+ }, object);
+ });
+ }
+ template<typename Functor>
+ LValue copyBarrier(LValue pointer, const Functor& functor)
+ {
+ LBasicBlock slowPath = FTL_NEW_BLOCK(m_out, ("copyBarrier slow path"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("copyBarrier continuation"));
+
ValueFromBlock fastResult = m_out.anchor(pointer);
- m_out.branch(
- m_out.testIsZeroPtr(pointer, m_out.constIntPtr(CopyBarrierBase::spaceBits)),
- usually(continuation), rarely(slowPath));
+ m_out.branch(isInToSpace(pointer), usually(continuation), rarely(slowPath));
LBasicBlock lastNext = m_out.appendTo(slowPath, continuation);
- LValue call = lazySlowPath(
- [=] (const Vector<Location>& locations) -> RefPtr<LazySlowPath::Generator> {
- return createLazyCallGenerator(
- slowPathFunction, locations[0].directGPR(), locations[1].directGPR());
- }, object);
- ValueFromBlock slowResult = m_out.anchor(call);
+ ValueFromBlock slowResult = m_out.anchor(functor());
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
return m_out.phi(m_out.intPtr, fastResult, slowResult);
}
+ LValue isInToSpace(LValue pointer)
+ {
+ return m_out.testIsZeroPtr(pointer, m_out.constIntPtr(CopyBarrierBase::spaceBits));
+ }
+
LValue loadButterflyReadOnly(LValue object)
{
return removeSpaceBits(m_out.loadPtr(object, m_heaps.JSObject_butterfly));
@@ -6175,7 +6208,25 @@
LValue loadVectorReadOnly(LValue object)
{
- return removeSpaceBits(m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector));
+ LValue fastResultValue = m_out.loadPtr(object, m_heaps.JSArrayBufferView_vector);
+
+ LBasicBlock possiblyFromSpace = FTL_NEW_BLOCK(m_out, ("loadVectorReadOnly possibly from space"));
+ LBasicBlock continuation = FTL_NEW_BLOCK(m_out, ("loadVectorReadOnly continuation"));
+
+ ValueFromBlock fastResult = m_out.anchor(fastResultValue);
+
+ m_out.branch(isInToSpace(fastResultValue), usually(continuation), rarely(possiblyFromSpace));
+
+ LBasicBlock lastNext = m_out.appendTo(possiblyFromSpace, continuation);
+
+ LValue slowResultValue = m_out.select(
+ isFastTypedArray(object), removeSpaceBits(fastResultValue), fastResultValue);
+ ValueFromBlock slowResult = m_out.anchor(slowResultValue);
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+
+ return m_out.phi(m_out.intPtr, fastResult, slowResult);
}
LValue removeSpaceBits(LValue storage)
@@ -6183,6 +6234,13 @@
return m_out.bitAnd(
storage, m_out.constIntPtr(~static_cast<intptr_t>(CopyBarrierBase::spaceBits)));
}
+
+ LValue isFastTypedArray(LValue object)
+ {
+ return m_out.equal(
+ m_out.load32(object, m_heaps.JSArrayBufferView_mode),
+ m_out.constInt32(FastTypedArray));
+ }
TypedPointer baseIndex(IndexedAbstractHeap& heap, LValue storage, LValue index, Edge edge, ptrdiff_t offset = 0)
{
Modified: trunk/Source/_javascript_Core/heap/CopyBarrier.h (191220 => 191221)
--- trunk/Source/_javascript_Core/heap/CopyBarrier.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/heap/CopyBarrier.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -74,14 +74,25 @@
return m_value;
}
+ // Use this version of get() if you only want to execute the barrier slow path if some condition
+ // holds, and you only want to evaluate that condition after first checking the barrier's
+ // condition. Usually, you just want to use get().
+ template<typename Functor>
+ void* getPredicated(const JSCell* owner, const Functor& functor) const
+ {
+ void* result = m_value;
+ if (UNLIKELY(bitwise_cast<uintptr_t>(result) & spaceBits)) {
+ if (functor())
+ return Heap::copyBarrier(owner, m_value);
+ }
+ return result;
+ }
+
// When we are in the concurrent copying phase, this method may lock the barrier object (i.e. the field
// pointing to copied space) and call directly into the owning object's copyBackingStore() method.
void* get(const JSCell* owner) const
{
- void* result = m_value;
- if (UNLIKELY(bitwise_cast<uintptr_t>(result) & spaceBits))
- return Heap::copyBarrier(owner, m_value);
- return result;
+ return getPredicated(owner, [] () -> bool { return true; });
}
CopyState copyState() const
@@ -159,6 +170,12 @@
{
return bitwise_cast<T*>(CopyBarrierBase::get(owner));
}
+
+ template<typename Functor>
+ T* getPredicated(const JSCell* owner, const Functor& functor) const
+ {
+ return bitwise_cast<T*>(CopyBarrierBase::getPredicated(owner, functor));
+ }
void set(VM& vm, const JSCell* owner, T* value)
{
Modified: trunk/Source/_javascript_Core/heap/Heap.cpp (191220 => 191221)
--- trunk/Source/_javascript_Core/heap/Heap.cpp 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/heap/Heap.cpp 2015-10-16 22:53:11 UTC (rev 191221)
@@ -1001,7 +1001,11 @@
void* Heap::copyBarrier(const JSCell*, void*& pointer)
{
- // Do nothing for now.
+ // Do nothing for now, except making sure that the low bits are masked off. This helps to
+ // simulate enough of this barrier that at least we can test the low bits assumptions.
+ pointer = bitwise_cast<void*>(
+ bitwise_cast<uintptr_t>(pointer) & ~static_cast<uintptr_t>(CopyBarrierBase::spaceBits));
+
return pointer;
}
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp (191220 => 191221)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2015-10-16 22:53:11 UTC (rev 191221)
@@ -132,6 +132,33 @@
return result;
}
+AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ Equal,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ NotEqual,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR)
+{
+ RELEASE_ASSERT(baseGPR != resultGPR);
+
+ loadPtr(Address(baseGPR, JSArrayBufferView::offsetOfVector()), resultGPR);
+ Jump ok = branchIfToSpace(resultGPR);
+ Jump result = branchIfFastTypedArray(baseGPR);
+ ok.link(this);
+ return result;
+}
+
void AssemblyHelpers::purifyNaN(FPRReg fpr)
{
MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr);
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.h (191220 => 191221)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -756,6 +756,11 @@
#endif
}
+ Jump branchIfToSpace(GPRReg storageGPR)
+ {
+ return branchTest32(Zero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
+ }
+
Jump branchIfNotToSpace(GPRReg storageGPR)
{
return branchTest32(NonZero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
@@ -765,6 +770,13 @@
{
andPtr(TrustedImmPtr(~static_cast<uintptr_t>(CopyBarrierBase::spaceBits)), storageGPR);
}
+
+ Jump branchIfFastTypedArray(GPRReg baseGPR);
+ Jump branchIfNotFastTypedArray(GPRReg baseGPR);
+
+ // Returns a jump to slow path for when we need to execute the barrier. Note that baseGPR and
+ // resultGPR must be different.
+ Jump loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR);
static Address addressForByteOffset(ptrdiff_t byteOffset)
{
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (191220 => 191221)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2015-10-16 22:53:11 UTC (rev 191221)
@@ -1455,8 +1455,7 @@
load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
- slowCases.append(branchIfNotToSpace(scratch));
+ slowCases.append(loadTypedArrayVector(base, scratch));
switch (elementSize(type)) {
case 1:
@@ -1527,8 +1526,7 @@
load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
- slowCases.append(branchIfNotToSpace(scratch));
+ slowCases.append(loadTypedArrayVector(base, scratch));
switch (elementSize(type)) {
case 4:
@@ -1595,8 +1593,7 @@
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
- slowCases.append(branchIfNotToSpace(lateScratch));
+ slowCases.append(loadTypedArrayVector(base, lateScratch));
if (isClamped(type)) {
ASSERT(elementSize(type) == 1);
@@ -1681,8 +1678,7 @@
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
- slowCases.append(branchIfNotToSpace(lateScratch));
+ slowCases.append(loadTypedArrayVector(base, lateScratch));
switch (elementSize(type)) {
case 4:
Modified: trunk/Source/_javascript_Core/runtime/JSArrayBufferView.h (191220 => 191221)
--- trunk/Source/_javascript_Core/runtime/JSArrayBufferView.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/runtime/JSArrayBufferView.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -161,7 +161,15 @@
PassRefPtr<ArrayBufferView> impl();
void neuter();
- void* vector() { return m_vector.get(this); }
+ void* vector()
+ {
+ return m_vector.getPredicated(
+ this,
+ [this] () -> bool {
+ return mode() == FastTypedArray;
+ });
+ }
+
unsigned byteOffset();
unsigned length() const { return m_length; }
Modified: trunk/Source/_javascript_Core/runtime/JSArrayBufferViewInlines.h (191220 => 191221)
--- trunk/Source/_javascript_Core/runtime/JSArrayBufferViewInlines.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/runtime/JSArrayBufferViewInlines.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -68,7 +68,7 @@
return 0;
ptrdiff_t delta =
- bitwise_cast<uint8_t*>(m_vector.get(this)) - static_cast<uint8_t*>(buffer()->data());
+ bitwise_cast<uint8_t*>(vector()) - static_cast<uint8_t*>(buffer()->data());
unsigned result = static_cast<unsigned>(delta);
ASSERT(static_cast<ptrdiff_t>(result) == delta);
Modified: trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayView.h (191220 => 191221)
--- trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayView.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayView.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -108,11 +108,11 @@
const typename Adaptor::Type* typedVector() const
{
- return bitwise_cast<const typename Adaptor::Type*>(m_vector.get(this));
+ return bitwise_cast<const typename Adaptor::Type*>(vector());
}
typename Adaptor::Type* typedVector()
{
- return bitwise_cast<typename Adaptor::Type*>(m_vector.get(this));
+ return bitwise_cast<typename Adaptor::Type*>(vector());
}
// These methods are meant to match indexed access methods that JSObject
Modified: trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayViewInlines.h (191220 => 191221)
--- trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayViewInlines.h 2015-10-16 22:51:31 UTC (rev 191220)
+++ trunk/Source/_javascript_Core/runtime/JSGenericTypedArrayViewInlines.h 2015-10-16 22:53:11 UTC (rev 191221)
@@ -443,7 +443,7 @@
if (token == TypedArrayVectorCopyToken
&& visitor.checkIfShouldCopy(thisObject->m_vector.getWithoutBarrier())) {
ASSERT(thisObject->m_vector);
- void* oldVector = thisObject->m_vector.get(thisObject);
+ void* oldVector = thisObject->vector();
void* newVector = visitor.allocateNewSpace(thisObject->byteSize());
memcpy(newVector, oldVector, thisObject->byteSize());
thisObject->m_vector.setWithoutBarrier(static_cast<char*>(newVector));
@@ -482,7 +482,7 @@
ASSERT(thisObject->m_vector);
// Reuse already allocated memory if at all possible.
thisObject->m_butterfly.setWithoutBarrier(
- bitwise_cast<IndexingHeader*>(thisObject->m_vector.get(thisObject))->butterfly());
+ bitwise_cast<IndexingHeader*>(thisObject->vector())->butterfly());
} else {
VM& vm = *heap->vm();
thisObject->m_butterfly.set(vm, thisObject, Butterfly::createOrGrowArrayRight(
@@ -494,14 +494,14 @@
switch (thisObject->m_mode) {
case FastTypedArray:
- buffer = ArrayBuffer::create(thisObject->m_vector.get(thisObject), thisObject->byteLength());
+ buffer = ArrayBuffer::create(thisObject->vector(), thisObject->byteLength());
break;
case OversizeTypedArray:
// FIXME: consider doing something like "subtracting" from extra memory
// cost, since right now this case will cause the GC to think that we reallocated
// the whole buffer.
- buffer = ArrayBuffer::createAdopted(thisObject->m_vector.get(thisObject), thisObject->byteLength());
+ buffer = ArrayBuffer::createAdopted(thisObject->vector(), thisObject->byteLength());
break;
default:
Added: trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-byte-offset.js (0 => 191221)
--- trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-byte-offset.js (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-byte-offset.js 2015-10-16 22:53:11 UTC (rev 191221)
@@ -0,0 +1,25 @@
+function make(offset) {
+ // Default offset is 1.
+ if (offset === void 0)
+ offset = 1;
+
+ var int8Array = new Int8Array(100);
+ for (var i = 0; i < int8Array.length; ++i)
+ int8Array[i] = i;
+
+ return new Int8Array(int8Array.buffer, offset, int8Array.length - offset);
+}
+noInline(make);
+
+function foo(o) {
+ return o.byteOffset;
+}
+
+noInline(foo);
+
+var o = make();
+for (var i = 0; i < 10000; ++i) {
+ var result = foo(o);
+ if (result != 1)
+ throw "Error: bad result: " + result;
+}
Added: trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-read.js (0 => 191221)
--- trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-read.js (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-read.js 2015-10-16 22:53:11 UTC (rev 191221)
@@ -0,0 +1,26 @@
+function make(offset) {
+ // Default offset is 1.
+ if (offset === void 0)
+ offset = 1;
+
+ var int8Array = new Int8Array(100);
+ for (var i = 0; i < int8Array.length; ++i)
+ int8Array[i] = i;
+
+ return new Int8Array(int8Array.buffer, offset, int8Array.length - offset);
+}
+noInline(make);
+
+function foo(o, i) {
+ return o[i];
+}
+
+noInline(foo);
+
+var o = make();
+for (var i = 0; i < 10000; ++i) {
+ var index = i % o.length;
+ var result = foo(o, index);
+ if (result != index + 1)
+ throw "Read test error: bad result for index = " + index + ": " + result + "; expected " + (index + 1);
+}
Added: trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-write.js (0 => 191221)
--- trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-write.js (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/misaligned-int8-view-write.js 2015-10-16 22:53:11 UTC (rev 191221)
@@ -0,0 +1,30 @@
+function make(offset) {
+ // Default offset is 1.
+ if (offset === void 0)
+ offset = 1;
+
+ var int8Array = new Int8Array(100);
+ for (var i = 0; i < int8Array.length; ++i)
+ int8Array[i] = i;
+
+ return new Int8Array(int8Array.buffer, offset, int8Array.length - offset);
+}
+noInline(make);
+
+function foo(o, i, v) {
+ o[i] = v;
+}
+
+noInline(foo);
+
+var o = make();
+var real = new Int8Array(o.buffer);
+for (var i = 0; i < 10000; ++i) {
+ var index = i % o.length;
+ var value = i % 7;
+ foo(o, index, value);
+ var result = real[index + 1];
+ if (result != value)
+ throw "Write test error: bad result for index = " + index + ": " + result + "; expected " + value;
+}
+