[SCM] WebKit Debian packaging branch, webkit-1.1, updated. upstream/1.1.15.1-1414-gc69ee75

ggaren at apple.com ggaren at apple.com
Thu Oct 29 20:38:38 UTC 2009


The following commit has been merged in the webkit-1.1 branch:
commit 570483e730a9a5a9f8ed4be4fd2cc690582e70d2
Author: ggaren at apple.com <ggaren at apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Date:   Sat Oct 3 17:01:14 2009 +0000

    Removed the concept of a "fast access cutoff" in arrays, because it
    punished some patterns of array access too much, and made things too
    complex for inlining in some cases.
    
    Patch by Geoffrey Garen <ggaren at apple.com> on 2009-10-02
    Reviewed by Sam Weinig.
    
    1.3% speedup on SunSpider.
    
    * jit/JITOpcodes.cpp:
    (JSC::JIT::emitSlow_op_get_by_val):
    (JSC::JIT::emitSlow_op_put_by_val):
    * jit/JITPropertyAccess.cpp:
    (JSC::JIT::emit_op_get_by_val):
    (JSC::JIT::emitSlow_op_get_by_val):
    (JSC::JIT::emit_op_put_by_val):
    (JSC::JIT::emitSlow_op_put_by_val):
    * jit/JITStubs.cpp:
    * jit/JITStubs.h:
    (JSC::): Check m_vectorLength instead of m_fastAccessCutoff when
    getting / putting from / to an array. Inline putting past the end of
    the array.
    
    * runtime/JSArray.cpp:
    (JSC::JSArray::JSArray):
    (JSC::JSArray::getOwnPropertySlot):
    (JSC::JSArray::getOwnPropertyDescriptor):
    (JSC::JSArray::put):
    (JSC::JSArray::putSlowCase):
    (JSC::JSArray::deleteProperty):
    (JSC::JSArray::getOwnPropertyNames):
    (JSC::JSArray::increaseVectorLength):
    (JSC::JSArray::setLength):
    (JSC::JSArray::pop):
    (JSC::JSArray::push):
    (JSC::JSArray::sort):
    (JSC::JSArray::fillArgList):
    (JSC::JSArray::copyToRegisters):
    (JSC::JSArray::compactForSorting):
    (JSC::JSArray::checkConsistency):
    * runtime/JSArray.h:
    (JSC::JSArray::canGetIndex):
    (JSC::JSArray::canSetIndex):
    (JSC::JSArray::setIndex):
    (JSC::JSArray::markChildrenDirect): Removed m_fastAccessCutoff, and
    replaced with checks for JSValue() to detect reads and writes from / to
    uninitialized parts of the array.
    
    
    
    git-svn-id: http://svn.webkit.org/repository/webkit/trunk@49065 268f45cc-cd09-0410-ab3c-d52691b4dbfc

diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog
index c0761cb..0b7f5a1 100644
--- a/JavaScriptCore/ChangeLog
+++ b/JavaScriptCore/ChangeLog
@@ -1,3 +1,52 @@
+2009-10-02  Geoffrey Garen  <ggaren at apple.com>
+
+        Reviewed by Sam Weinig.
+
+        Removed the concept of a "fast access cutoff" in arrays, because it
+        punished some patterns of array access too much, and made things too
+        complex for inlining in some cases.
+        
+        1.3% speedup on SunSpider.
+
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::emitSlow_op_put_by_val):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emit_op_get_by_val):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::emit_op_put_by_val):
+        (JSC::JIT::emitSlow_op_put_by_val):
+        * jit/JITStubs.cpp:
+        * jit/JITStubs.h:
+        (JSC::): Check m_vectorLength instead of m_fastAccessCutoff when
+        getting / putting from / to an array. Inline putting past the end of
+        the array.
+
+        * runtime/JSArray.cpp:
+        (JSC::JSArray::JSArray):
+        (JSC::JSArray::getOwnPropertySlot):
+        (JSC::JSArray::getOwnPropertyDescriptor):
+        (JSC::JSArray::put):
+        (JSC::JSArray::putSlowCase):
+        (JSC::JSArray::deleteProperty):
+        (JSC::JSArray::getOwnPropertyNames):
+        (JSC::JSArray::increaseVectorLength):
+        (JSC::JSArray::setLength):
+        (JSC::JSArray::pop):
+        (JSC::JSArray::push):
+        (JSC::JSArray::sort):
+        (JSC::JSArray::fillArgList):
+        (JSC::JSArray::copyToRegisters):
+        (JSC::JSArray::compactForSorting):
+        (JSC::JSArray::checkConsistency):
+        * runtime/JSArray.h:
+        (JSC::JSArray::canGetIndex):
+        (JSC::JSArray::canSetIndex):
+        (JSC::JSArray::setIndex):
+        (JSC::JSArray::markChildrenDirect): Removed m_fastAccessCutoff, and
+        replaced with checks for JSValue() to detect reads and writes from / to
+        uninitialized parts of the array.
+
 2009-10-02  Jonni Rainisto  <jonni.rainisto at nokia.com>
 
         Reviewed by Darin Adler.
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index e965a0d..f362d75 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -2691,32 +2691,20 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
 
 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. 
-    Label beginGetByValSlow(this);
+    unsigned dst = currentInstruction[1].u.operand;
+    unsigned base = currentInstruction[2].u.operand;
+    unsigned property = currentInstruction[3].u.operand;
 
-    Jump notImm = getSlowCase(iter);
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    emitFastArithIntToImmNoCheck(regT1, regT1);
+    linkSlowCase(iter); // property int32 check
+    linkSlowCaseIfNotJSCell(iter, base); // base cell check
+    linkSlowCase(iter); // base array check
+    linkSlowCase(iter); // vector length check
+    linkSlowCase(iter); // empty value
 
-    notImm.link(this);
     JITStubCall stubCall(this, cti_op_get_by_val);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(regT1);
-    stubCall.call(currentInstruction[1].u.operand);
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
-    // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
-    // First, check if this is an access to the vector
-    linkSlowCase(iter);
-    branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), beginGetByValSlow);
-
-    // okay, missed the fast region, but it is still in the vector.  Get the value.
-    loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT2);
-    // Check whether the value loaded is zero; if so we need to return undefined.
-    branchTestPtr(Zero, regT2, beginGetByValSlow);
-    move(regT2, regT0);
-    emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+    stubCall.addArgument(base, regT2);
+    stubCall.addArgument(property, regT2);
+    stubCall.call(dst);
 }
 
 void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -2773,30 +2761,20 @@ void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<Slo
 
 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
-    // Normal slow cases - either is not an immediate imm, or is an array.
-    Jump notImm = getSlowCase(iter);
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    emitFastArithIntToImmNoCheck(regT1, regT1);
+    unsigned base = currentInstruction[1].u.operand;
+    unsigned property = currentInstruction[2].u.operand;
+    unsigned value = currentInstruction[3].u.operand;
 
-    notImm.link(this); {
-        JITStubCall stubCall(this, cti_op_put_by_val);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-        stubCall.call();
-        emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
-    }
+    linkSlowCase(iter); // property int32 check
+    linkSlowCaseIfNotJSCell(iter, base); // base cell check
+    linkSlowCase(iter); // base not array check
+    linkSlowCase(iter); // in vector check
 
-    // slow cases for immediate int accesses to arrays
-    linkSlowCase(iter);
-    linkSlowCase(iter); {
-        JITStubCall stubCall(this, cti_op_put_by_val_array);
-        stubCall.addArgument(regT0);
-        stubCall.addArgument(regT1);
-        stubCall.addArgument(currentInstruction[3].u.operand, regT2);
-        stubCall.call();
-    }
+    JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+    stubPutByValCall.addArgument(regT0);
+    stubPutByValCall.addArgument(property, regT2);
+    stubPutByValCall.addArgument(value, regT2);
+    stubPutByValCall.call();
 }
 
 void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 86d9651..9edfd01 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -273,11 +273,14 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
     emitJumpSlowCaseIfNotJSCell(base, regT1);
     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
 
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+    addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+
     emitStore(dst, regT1, regT0);
     map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
 }
@@ -288,31 +291,16 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
     unsigned base = currentInstruction[2].u.operand;
     unsigned property = currentInstruction[3].u.operand;
 
-    // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here. 
-    Label callGetByValJITStub(this);
-
     linkSlowCase(iter); // property int32 check
     linkSlowCaseIfNotJSCell(iter, base); // base cell check
     linkSlowCase(iter); // base array check
+    linkSlowCase(iter); // vector length check
+    linkSlowCase(iter); // empty value
 
     JITStubCall stubCall(this, cti_op_get_by_val);
     stubCall.addArgument(base);
     stubCall.addArgument(property);
     stubCall.call(dst);
-
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
-    linkSlowCase(iter); // array fast cut-off check
-
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
-    branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength)), callGetByValJITStub);
-
-    // Missed the fast region, but it is still in the vector.
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
-    load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
-    branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)).linkTo(callGetByValJITStub, this);
-
-    emitStore(dst, regT1, regT0);
 }
 
 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
@@ -326,22 +314,27 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
     addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
     emitJumpSlowCaseIfNotJSCell(base, regT1);
     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
-
-    Jump inFastVector = branch32(Below, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
-    
-    // Check if the access is within the vector.
-    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
+    addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
 
-    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
-    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
-    addSlowCase(branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag)));
+    loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
 
-    inFastVector.link(this);
+    Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
 
+    Label storeResult(this);
     emitLoad(value, regT1, regT0);
     store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
     store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+    Jump end = jump();
+
+    empty.link(this);
+    add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+    branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+    add32(Imm32(1), regT2, regT0);
+    store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+    jump().linkTo(storeResult, this);
+
+    end.link(this);
 }
 
 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -353,24 +346,13 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
     linkSlowCase(iter); // property int32 check
     linkSlowCaseIfNotJSCell(iter, base); // base cell check
     linkSlowCase(iter); // base not array check
+    linkSlowCase(iter); // in vector check
 
     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
     stubPutByValCall.addArgument(base);
     stubPutByValCall.addArgument(property);
     stubPutByValCall.addArgument(value);
     stubPutByValCall.call();
-
-    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
-    // Slow cases for immediate int accesses to arrays.
-    linkSlowCase(iter); // in vector check
-    linkSlowCase(iter); // written to slot check
-
-    JITStubCall stubCall(this, cti_op_put_by_val_array);
-    stubCall.addArgument(regT1, regT0);
-    stubCall.addArgument(regT2);
-    stubCall.addArgument(value);
-    stubCall.call();
 }
 
 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
@@ -952,12 +934,16 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
 
 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
 {
-    emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+    unsigned dst = currentInstruction[1].u.operand;
+    unsigned base = currentInstruction[2].u.operand;
+    unsigned property = currentInstruction[3].u.operand;
+
+    emitGetVirtualRegisters(base, regT0, property, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT1);
 #if USE(JSVALUE64)
     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
-    // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
-    // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+    // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+    // number was signed since m_vectorLength is always less than intmax (since the total allocation
     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
     // extending since it makes it easier to re-tag the value in the slow case.
@@ -965,21 +951,25 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
 #else
     emitFastArithImmToInt(regT1);
 #endif
-    emitJumpSlowCaseIfNotJSCell(regT0);
+    emitJumpSlowCaseIfNotJSCell(regT0, base);
     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
 
-    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff))));
+    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
 
-    // Get the value from the vector
     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
-    emitPutVirtualRegister(currentInstruction[1].u.operand);
+    addSlowCase(branchTestPtr(Zero, regT0));
+
+    emitPutVirtualRegister(dst);
 }
 
 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
 {
-    emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+    unsigned base = currentInstruction[1].u.operand;
+    unsigned property = currentInstruction[2].u.operand;
+    unsigned value = currentInstruction[3].u.operand;
+
+    emitGetVirtualRegisters(base, regT0, property, regT1);
     emitJumpSlowCaseIfNotImmediateInteger(regT1);
 #if USE(JSVALUE64)
     // See comment in op_get_by_val.
@@ -987,23 +977,29 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
 #else
     emitFastArithImmToInt(regT1);
 #endif
-    emitJumpSlowCaseIfNotJSCell(regT0);
+    emitJumpSlowCaseIfNotJSCell(regT0, base);
     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
 
-    // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-    Jump inFastVector = branch32(Below, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_fastAccessCutoff)));
-    // No; oh well, check if the access if within the vector - if so, we may still be okay.
-    addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_vectorLength))));
 
-    // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
-    // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. 
-    addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
+    Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
 
-    // All good - put the value into the array.
-    inFastVector.link(this);
-    emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+    Label storeResult(this);
+    emitGetVirtualRegister(value, regT0);
     storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+    Jump end = jump();
+    
+    empty.link(this);
+    add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+    branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+    move(regT1, regT0);
+    add32(Imm32(1), regT0);
+    store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+    jump().linkTo(storeResult, this);
+
+    end.link(this);
 }
 
 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index 065b7ea..1796f63 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -1942,28 +1942,6 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
     CHECK_FOR_EXCEPTION_AT_END();
 }
 
-DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
-{
-    STUB_INIT_STACK_FRAME(stackFrame);
-
-    CallFrame* callFrame = stackFrame.callFrame;
-    JSValue baseValue = stackFrame.args[0].jsValue();
-    int i = stackFrame.args[1].int32();
-    JSValue value = stackFrame.args[2].jsValue();
-
-    ASSERT(isJSArray(stackFrame.globalData, baseValue));
-
-    if (LIKELY(i >= 0))
-        asArray(baseValue)->JSArray::put(callFrame, i, value);
-    else {
-        Identifier property(callFrame, UString::from(i));
-        PutPropertySlot slot;
-        baseValue.put(callFrame, property, value, slot);
-    }
-
-    CHECK_FOR_EXCEPTION_AT_END();
-}
-
 DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
 {
     STUB_INIT_STACK_FRAME(stackFrame);
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 46973ee..daae043 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -345,7 +345,6 @@ extern "C" {
     void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
     void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
     void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
-    void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
     void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
     void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
     void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
diff --git a/JavaScriptCore/runtime/JSArray.cpp b/JavaScriptCore/runtime/JSArray.cpp
index c471dac..fd9e7b2 100644
--- a/JavaScriptCore/runtime/JSArray.cpp
+++ b/JavaScriptCore/runtime/JSArray.cpp
@@ -136,9 +136,7 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure)
     unsigned initialCapacity = 0;
 
     m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
-    m_storage->m_vectorLength = initialCapacity;
-
-    m_fastAccessCutoff = 0;
+    m_vectorLength = initialCapacity;
 
     checkConsistency();
 }
@@ -150,7 +148,7 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength)
 
     m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
     m_storage->m_length = initialLength;
-    m_storage->m_vectorLength = initialCapacity;
+    m_vectorLength = initialCapacity;
     m_storage->m_numValuesInVector = 0;
     m_storage->m_sparseValueMap = 0;
     m_storage->lazyCreationData = 0;
@@ -159,8 +157,6 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength)
     for (size_t i = 0; i < initialCapacity; ++i)
         vector[i] = JSValue();
 
-    m_fastAccessCutoff = 0;
-
     checkConsistency();
 
     Heap::heap(this)->reportExtraMemoryCost(initialCapacity * sizeof(JSValue));
@@ -173,7 +169,7 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
 
     m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
     m_storage->m_length = initialCapacity;
-    m_storage->m_vectorLength = initialCapacity;
+    m_vectorLength = initialCapacity;
     m_storage->m_numValuesInVector = initialCapacity;
     m_storage->m_sparseValueMap = 0;
 
@@ -182,8 +178,6 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
     for (ArgList::const_iterator it = list.begin(); it != end; ++it, ++i)
         m_storage->m_vector[i] = *it;
 
-    m_fastAccessCutoff = initialCapacity;
-
     checkConsistency();
 
     Heap::heap(this)->reportExtraMemoryCost(storageSize(initialCapacity));
@@ -207,7 +201,7 @@ bool JSArray::getOwnPropertySlot(ExecState* exec, unsigned i, PropertySlot& slot
         return false;
     }
 
-    if (i < storage->m_vectorLength) {
+    if (i < m_vectorLength) {
         JSValue& valueSlot = storage->m_vector[i];
         if (valueSlot) {
             slot.setValueSlot(&valueSlot);
@@ -253,8 +247,8 @@ bool JSArray::getOwnPropertyDescriptor(ExecState* exec, const Identifier& proper
     if (isArrayIndex) {
         if (i >= m_storage->m_length)
             return false;
-        if (i < m_storage->m_vectorLength) {
-            JSValue value = m_storage->m_vector[i];
+        if (i < m_vectorLength) {
+            JSValue& value = m_storage->m_vector[i];
             if (value) {
                 descriptor.setDescriptor(value, 0);
                 return true;
@@ -305,7 +299,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
         m_storage->m_length = length;
     }
 
-    if (i < m_storage->m_vectorLength) {
+    if (i < m_vectorLength) {
         JSValue& valueSlot = m_storage->m_vector[i];
         if (valueSlot) {
             valueSlot = value;
@@ -313,8 +307,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
             return;
         }
         valueSlot = value;
-        if (++m_storage->m_numValuesInVector == m_storage->m_length)
-            m_fastAccessCutoff = m_storage->m_length;
+        ++m_storage->m_numValuesInVector;
         checkConsistency();
         return;
     }
@@ -352,8 +345,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
         if (increaseVectorLength(i + 1)) {
             storage = m_storage;
             storage->m_vector[i] = value;
-            if (++storage->m_numValuesInVector == storage->m_length)
-                m_fastAccessCutoff = storage->m_length;
+            ++storage->m_numValuesInVector;
             checkConsistency();
         } else
             throwOutOfMemoryError(exec);
@@ -363,7 +355,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
     // Decide how many values it would be best to move from the map.
     unsigned newNumValuesInVector = storage->m_numValuesInVector + 1;
     unsigned newVectorLength = increasedVectorLength(i + 1);
-    for (unsigned j = max(storage->m_vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j)
+    for (unsigned j = max(m_vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j)
         newNumValuesInVector += map->contains(j);
     if (i >= MIN_SPARSE_ARRAY_INDEX)
         newNumValuesInVector -= map->contains(i);
@@ -386,7 +378,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
         return;
     }
 
-    unsigned vectorLength = storage->m_vectorLength;
+    unsigned vectorLength = m_vectorLength;
 
     Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength));
 
@@ -404,7 +396,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
 
     storage->m_vector[i] = value;
 
-    storage->m_vectorLength = newVectorLength;
+    m_vectorLength = newVectorLength;
     storage->m_numValuesInVector = newNumValuesInVector;
 
     m_storage = storage;
@@ -431,7 +423,7 @@ bool JSArray::deleteProperty(ExecState* exec, unsigned i)
 
     ArrayStorage* storage = m_storage;
 
-    if (i < storage->m_vectorLength) {
+    if (i < m_vectorLength) {
         JSValue& valueSlot = storage->m_vector[i];
         if (!valueSlot) {
             checkConsistency();
@@ -439,8 +431,6 @@ bool JSArray::deleteProperty(ExecState* exec, unsigned i)
         }
         valueSlot = JSValue();
         --storage->m_numValuesInVector;
-        if (m_fastAccessCutoff > i)
-            m_fastAccessCutoff = i;
         checkConsistency();
         return true;
     }
@@ -472,7 +462,7 @@ void JSArray::getOwnPropertyNames(ExecState* exec, PropertyNameArray& propertyNa
 
     ArrayStorage* storage = m_storage;
 
-    unsigned usedVectorLength = min(storage->m_length, storage->m_vectorLength);
+    unsigned usedVectorLength = min(storage->m_length, m_vectorLength);
     for (unsigned i = 0; i < usedVectorLength; ++i) {
         if (storage->m_vector[i])
             propertyNames.add(Identifier::from(exec, i));
@@ -494,7 +484,7 @@ bool JSArray::increaseVectorLength(unsigned newLength)
 
     ArrayStorage* storage = m_storage;
 
-    unsigned vectorLength = storage->m_vectorLength;
+    unsigned vectorLength = m_vectorLength;
     ASSERT(newLength > vectorLength);
     ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
     unsigned newVectorLength = increasedVectorLength(newLength);
@@ -503,7 +493,7 @@ bool JSArray::increaseVectorLength(unsigned newLength)
         return false;
 
     Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength));
-    storage->m_vectorLength = newVectorLength;
+    m_vectorLength = newVectorLength;
 
     for (unsigned i = vectorLength; i < newVectorLength; ++i)
         storage->m_vector[i] = JSValue();
@@ -521,10 +511,7 @@ void JSArray::setLength(unsigned newLength)
     unsigned length = m_storage->m_length;
 
     if (newLength < length) {
-        if (m_fastAccessCutoff > newLength)
-            m_fastAccessCutoff = newLength;
-
-        unsigned usedVectorLength = min(length, storage->m_vectorLength);
+        unsigned usedVectorLength = min(length, m_vectorLength);
         for (unsigned i = newLength; i < usedVectorLength; ++i) {
             JSValue& valueSlot = storage->m_vector[i];
             bool hadValue = valueSlot;
@@ -563,20 +550,13 @@ JSValue JSArray::pop()
 
     JSValue result;
 
-    if (m_fastAccessCutoff > length) {
-        JSValue& valueSlot = m_storage->m_vector[length];
-        result = valueSlot;
-        ASSERT(result);
-        valueSlot = JSValue();
-        --m_storage->m_numValuesInVector;
-        m_fastAccessCutoff = length;
-    } else if (length < m_storage->m_vectorLength) {
+    if (length < m_vectorLength) {
         JSValue& valueSlot = m_storage->m_vector[length];
-        result = valueSlot;
-        valueSlot = JSValue();
-        if (result)
+        if (valueSlot) {
             --m_storage->m_numValuesInVector;
-        else
+            result = valueSlot;
+            valueSlot = JSValue();
+        } else
             result = jsUndefined();
     } else {
         result = jsUndefined();
@@ -604,11 +584,10 @@ void JSArray::push(ExecState* exec, JSValue value)
 {
     checkConsistency();
 
-    if (m_storage->m_length < m_storage->m_vectorLength) {
-        ASSERT(!m_storage->m_vector[m_storage->m_length]);
+    if (m_storage->m_length < m_vectorLength) {
         m_storage->m_vector[m_storage->m_length] = value;
-        if (++m_storage->m_numValuesInVector == ++m_storage->m_length)
-            m_fastAccessCutoff = m_storage->m_length;
+        ++m_storage->m_numValuesInVector;
+        ++m_storage->m_length;
         checkConsistency();
         return;
     }
@@ -618,8 +597,8 @@ void JSArray::push(ExecState* exec, JSValue value)
         if (!map || map->isEmpty()) {
             if (increaseVectorLength(m_storage->m_length + 1)) {
                 m_storage->m_vector[m_storage->m_length] = value;
-                if (++m_storage->m_numValuesInVector == ++m_storage->m_length)
-                    m_fastAccessCutoff = m_storage->m_length;
+                ++m_storage->m_numValuesInVector;
+                ++m_storage->m_length;
                 checkConsistency();
                 return;
             }
@@ -837,7 +816,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
     if (!m_storage->m_length)
         return;
 
-    unsigned usedVectorLength = min(m_storage->m_length, m_storage->m_vectorLength);
+    unsigned usedVectorLength = min(m_storage->m_length, m_vectorLength);
 
     AVLTree<AVLTreeAbstractorForArrayCompare, 44> tree; // Depth 44 is enough for 2^31 items
     tree.abstractor().m_exec = exec;
@@ -886,7 +865,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
 
     if (SparseArrayValueMap* map = m_storage->m_sparseValueMap) {
         newUsedVectorLength += map->size();
-        if (newUsedVectorLength > m_storage->m_vectorLength) {
+        if (newUsedVectorLength > m_vectorLength) {
             // Check that it is possible to allocate an array large enough to hold all the entries.
             if ((newUsedVectorLength > MAX_STORAGE_VECTOR_LENGTH) || !increaseVectorLength(newUsedVectorLength)) {
                 throwOutOfMemoryError(exec);
@@ -926,7 +905,6 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
     for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
         m_storage->m_vector[i] = JSValue();
 
-    m_fastAccessCutoff = newUsedVectorLength;
     m_storage->m_numValuesInVector = newUsedVectorLength;
 
     checkConsistency(SortConsistencyCheck);
@@ -934,10 +912,16 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
 
 void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args)
 {
-    unsigned fastAccessLength = min(m_storage->m_length, m_fastAccessCutoff);
+    JSValue* vector = m_storage->m_vector;
+    unsigned vectorEnd = min(m_storage->m_length, m_vectorLength);
     unsigned i = 0;
-    for (; i < fastAccessLength; ++i)
-        args.append(getIndex(i));
+    for (; i < vectorEnd; ++i) {
+        JSValue& v = vector[i];
+        if (!v)
+            break;
+        args.append(v);
+    }
+
     for (; i < m_storage->m_length; ++i)
         args.append(get(exec, i));
 }
@@ -946,12 +930,17 @@ void JSArray::copyToRegisters(ExecState* exec, Register* buffer, uint32_t maxSiz
 {
     ASSERT(m_storage->m_length == maxSize);
     UNUSED_PARAM(maxSize);
-    unsigned fastAccessLength = min(m_storage->m_length, m_fastAccessCutoff);
+    JSValue* vector = m_storage->m_vector;
+    unsigned vectorEnd = min(m_storage->m_length, m_vectorLength);
     unsigned i = 0;
-    for (; i < fastAccessLength; ++i)
-        buffer[i] = getIndex(i);
-    uint32_t size = m_storage->m_length;
-    for (; i < size; ++i)
+    for (; i < vectorEnd; ++i) {
+        JSValue& v = vector[i];
+        if (!v)
+            break;
+        buffer[i] = v;
+    }
+
+    for (; i < m_storage->m_length; ++i)
         buffer[i] = get(exec, i);
 }
 
@@ -961,7 +950,7 @@ unsigned JSArray::compactForSorting()
 
     ArrayStorage* storage = m_storage;
 
-    unsigned usedVectorLength = min(m_storage->m_length, storage->m_vectorLength);
+    unsigned usedVectorLength = min(m_storage->m_length, m_vectorLength);
 
     unsigned numDefined = 0;
     unsigned numUndefined = 0;
@@ -985,7 +974,7 @@ unsigned JSArray::compactForSorting()
 
     if (SparseArrayValueMap* map = storage->m_sparseValueMap) {
         newUsedVectorLength += map->size();
-        if (newUsedVectorLength > storage->m_vectorLength) {
+        if (newUsedVectorLength > m_vectorLength) {
             // Check that it is possible to allocate an array large enough to hold all the entries - if not,
             // exception is thrown by caller.
             if ((newUsedVectorLength > MAX_STORAGE_VECTOR_LENGTH) || !increaseVectorLength(newUsedVectorLength))
@@ -1006,7 +995,6 @@ unsigned JSArray::compactForSorting()
     for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
         storage->m_vector[i] = JSValue();
 
-    m_fastAccessCutoff = newUsedVectorLength;
     storage->m_numValuesInVector = newUsedVectorLength;
 
     checkConsistency(SortConsistencyCheck);
@@ -1032,30 +1020,27 @@ void JSArray::checkConsistency(ConsistencyCheckType type)
     if (type == SortConsistencyCheck)
         ASSERT(!m_storage->m_sparseValueMap);
 
-    ASSERT(m_fastAccessCutoff <= m_storage->m_length);
-    ASSERT(m_fastAccessCutoff <= m_storage->m_numValuesInVector);
-
     unsigned numValuesInVector = 0;
-    for (unsigned i = 0; i < m_storage->m_vectorLength; ++i) {
+    for (unsigned i = 0; i < m_vectorLength; ++i) {
         if (JSValue value = m_storage->m_vector[i]) {
             ASSERT(i < m_storage->m_length);
             if (type != DestructorConsistencyCheck)
                 value->type(); // Likely to crash if the object was deallocated.
             ++numValuesInVector;
         } else {
-            ASSERT(i >= m_fastAccessCutoff);
             if (type == SortConsistencyCheck)
                 ASSERT(i >= m_storage->m_numValuesInVector);
         }
     }
     ASSERT(numValuesInVector == m_storage->m_numValuesInVector);
+    ASSERT(numValuesInVector <= m_storage->m_length);
 
     if (m_storage->m_sparseValueMap) {
         SparseArrayValueMap::iterator end = m_storage->m_sparseValueMap->end();
         for (SparseArrayValueMap::iterator it = m_storage->m_sparseValueMap->begin(); it != end; ++it) {
             unsigned index = it->first;
             ASSERT(index < m_storage->m_length);
-            ASSERT(index >= m_storage->m_vectorLength);
+            ASSERT(index >= m_vectorLength);
             ASSERT(index <= MAX_ARRAY_INDEX);
             ASSERT(it->second);
             if (type != DestructorConsistencyCheck)
diff --git a/JavaScriptCore/runtime/JSArray.h b/JavaScriptCore/runtime/JSArray.h
index 12768a4..66b5a1d 100644
--- a/JavaScriptCore/runtime/JSArray.h
+++ b/JavaScriptCore/runtime/JSArray.h
@@ -29,7 +29,6 @@ namespace JSC {
 
     struct ArrayStorage {
         unsigned m_length;
-        unsigned m_vectorLength;
         unsigned m_numValuesInVector;
         SparseArrayValueMap* m_sparseValueMap;
         void* lazyCreationData; // A JSArray subclass can use this to fill the vector lazily.
@@ -63,18 +62,24 @@ namespace JSC {
         void push(ExecState*, JSValue);
         JSValue pop();
 
-        bool canGetIndex(unsigned i) { return i < m_fastAccessCutoff; }
+        bool canGetIndex(unsigned i) { return i < m_vectorLength && m_storage->m_vector[i]; }
         JSValue getIndex(unsigned i)
         {
             ASSERT(canGetIndex(i));
             return m_storage->m_vector[i];
         }
 
-        bool canSetIndex(unsigned i) { return i < m_fastAccessCutoff; }
-        JSValue setIndex(unsigned i, JSValue v)
+        bool canSetIndex(unsigned i) { return i < m_vectorLength; }
+        void setIndex(unsigned i, JSValue v)
         {
             ASSERT(canSetIndex(i));
-            return m_storage->m_vector[i] = v;
+            JSValue& x = m_storage->m_vector[i];
+            if (!x) {
+                ++m_storage->m_numValuesInVector;
+                if (i >= m_storage->m_length)
+                    m_storage->m_length = i + 1;
+            }
+            x = v;
         }
 
         void fillArgList(ExecState*, MarkedArgumentBuffer&);
@@ -110,7 +115,7 @@ namespace JSC {
         enum ConsistencyCheckType { NormalConsistencyCheck, DestructorConsistencyCheck, SortConsistencyCheck };
         void checkConsistency(ConsistencyCheckType = NormalConsistencyCheck);
 
-        unsigned m_fastAccessCutoff;
+        unsigned m_vectorLength;
         ArrayStorage* m_storage;
     };
 
@@ -139,7 +144,7 @@ namespace JSC {
         
         ArrayStorage* storage = m_storage;
 
-        unsigned usedVectorLength = std::min(storage->m_length, storage->m_vectorLength);
+        unsigned usedVectorLength = std::min(storage->m_length, m_vectorLength);
         markStack.appendValues(storage->m_vector, usedVectorLength, MayContainNullValues);
 
         if (SparseArrayValueMap* map = storage->m_sparseValueMap) {

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list