[SCM] WebKit Debian packaging branch, debian/unstable, updated. debian/1.1.22-1-992-gfd6f826

Gustavo Noronha Silva kov at debian.org
Wed Mar 17 19:52:23 UTC 2010


The following commit has been merged in the debian/unstable branch:
commit aa84620979c017396a7d941031d2c2923fd5ac18
Merge: 8b6c041475f930335899b120e4542cde6fab607f 3c00f005323a09fd3c6d1b9e5b43c1aad5dc5f71
Author: Gustavo Noronha Silva <kov at debian.org>
Date:   Wed Mar 17 15:01:28 2010 -0300

    Merge branch 'webkit-1.1' into debian/unstable
    
    Conflicts:
    	WebKit/gtk/tests/testwebview.c
    	WebKit/gtk/webkit/webkitwebview.cpp

diff --combined JavaScriptCore/wtf/FastMalloc.cpp
index 159fc5e,e7d9efa..aa7d192
--- a/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/JavaScriptCore/wtf/FastMalloc.cpp
@@@ -208,6 -208,12 +208,12 @@@ TryMallocReturnValue tryFastZeroedMallo
  #include "brew/SystemMallocBrew.h"
  #endif
  
+ #if OS(DARWIN)
+ #include <malloc/malloc.h>
+ #elif COMPILER(MSVC)
+ #include <malloc.h>
+ #endif
+ 
  namespace WTF {
  
  TryMallocReturnValue tryFastMalloc(size_t n) 
@@@ -369,10 -375,21 +375,21 @@@ void releaseFastMallocFreeMemory() { 
      
  FastMallocStatistics fastMallocStatistics()
  {
-     FastMallocStatistics statistics = { 0, 0, 0, 0 };
+     FastMallocStatistics statistics = { 0, 0, 0 };
      return statistics;
  }
  
+ size_t fastMallocSize(const void* p)
+ {
+ #if OS(DARWIN)
+     return malloc_size(p);
+ #elif COMPILER(MSVC)
+     return _msize(const_cast<void*>(p));
+ #else
+     return 1;
+ #endif
+ }
+ 
  } // namespace WTF
  
  #if OS(DARWIN)
@@@ -400,7 -417,6 +417,6 @@@ extern "C" const int jscore_fastmalloc_
  #include <algorithm>
  #include <errno.h>
  #include <limits>
- #include <new>
  #include <pthread.h>
  #include <stdarg.h>
  #include <stddef.h>
@@@ -415,7 -431,7 +431,7 @@@
  #include <windows.h>
  #endif
  
- #if WTF_CHANGES
+ #ifdef WTF_CHANGES
  
  #if OS(DARWIN)
  #include "MallocZoneSupport.h"
@@@ -464,7 -480,7 +480,7 @@@ namespace WTF 
  #define CHECK_CONDITION ASSERT
  
  #if OS(DARWIN)
- class Span;
+ struct Span;
  class TCMalloc_Central_FreeListPadded;
  class TCMalloc_PageHeap;
  class TCMalloc_ThreadCache;
@@@ -1236,18 -1252,26 +1252,26 @@@ template <> class MapSelector<32> 
  // -------------------------------------------------------------------------
  
  #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
- // The central page heap collects spans of memory that have been deleted but are still committed until they are released
- // back to the system.  We use a background thread to periodically scan the list of free spans and release some back to the
- // system.  Every 5 seconds, the background thread wakes up and does the following:
- // - Check if we needed to commit memory in the last 5 seconds.  If so, skip this scavenge because it's a sign that we are short
- // of free committed pages and so we should not release them back to the system yet.
- // - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
- // back to the system.
- // - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
- // scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
- 
- // Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
- static const int kScavengeTimerDelayInSeconds = 5;
+ // The page heap maintains a free list for spans that are no longer in use by
+ // the central cache or any thread caches. We use a background thread to
+ // periodically scan the free list and release a percentage of it back to the OS.
+ 
+ // If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
+ // background thread:
+ //     - wakes up
+ //     - pauses for kScavengeDelayInSeconds
+ //     - returns to the OS a percentage of the memory that remained unused during
+ //       that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
+ // The goal of this strategy is to reduce memory pressure in a timely fashion
+ // while avoiding thrashing the OS allocator.
+ 
+ // Time delay before the page heap scavenger will consider returning pages to
+ // the OS.
+ static const int kScavengeDelayInSeconds = 2;
+ 
+ // Approximate percentage of free committed pages to return to the OS in one
+ // scavenge.
+ static const float kScavengePercentage = .5f;
  
  // Number of free committed pages that we want to keep around.
  static const size_t kMinimumFreeCommittedPageCount = 512;
@@@ -1357,8 -1381,9 +1381,9 @@@ class TCMalloc_PageHeap 
    // Number of pages kept in free lists that are still committed.
    Length free_committed_pages_;
  
-   // Number of pages that we committed in the last scavenge wait interval.
-   Length pages_committed_since_last_scavenge_;
+   // Minimum number of free committed pages since last scavenge. (Can be 0 if
+   // we've committed new pages since the last scavenge.)
+   Length min_free_committed_pages_since_last_scavenge_;
  #endif
  
    bool GrowHeap(Length n);
@@@ -1403,13 -1428,13 +1428,13 @@@
    void initializeScavenger();
    ALWAYS_INLINE void signalScavenger();
    void scavenge();
-   ALWAYS_INLINE bool shouldContinueScavenging() const;
+   ALWAYS_INLINE bool shouldScavenge() const;
  
  #if !HAVE(DISPATCH_H)
    static NO_RETURN void* runScavengerThread(void*);
    NO_RETURN void scavengerThread();
  
-   // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or
+   // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
    // it's blocked waiting for more pages to be deleted.
    bool m_scavengeThreadActive;
  
@@@ -1435,7 -1460,7 +1460,7 @@@ void TCMalloc_PageHeap::init(
  
  #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
    free_committed_pages_ = 0;
-   pages_committed_since_last_scavenge_ = 0;
+   min_free_committed_pages_since_last_scavenge_ = 0;
  #endif  // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
  
    scavenge_counter_ = 0;
@@@ -1478,7 -1503,7 +1503,7 @@@ void* TCMalloc_PageHeap::runScavengerTh
  
  ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
  {
-   if (!m_scavengeThreadActive && shouldContinueScavenging())
+   if (!m_scavengeThreadActive && shouldScavenge())
      pthread_cond_signal(&m_scavengeCondition);
  }
  
@@@ -1488,15 -1513,15 +1513,15 @@@ void TCMalloc_PageHeap::initializeScave
  {
    m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
    m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
-   dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeTimerDelayInSeconds * NSEC_PER_SEC);
-   dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeTimerDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
+   dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC);
+   dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
    dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
    m_scavengingScheduled = false;
  }
  
  ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
  {
-   if (!m_scavengingScheduled && shouldContinueScavenging()) {
+   if (!m_scavengingScheduled && shouldScavenge()) {
      m_scavengingScheduled = true;
      dispatch_resume(m_scavengeTimer);
    }
@@@ -1506,17 -1531,12 +1531,12 @@@
  
  void TCMalloc_PageHeap::scavenge()
  {
-     // If we've recently commited pages, our working set is growing, so now is
-     // not a good time to free pages.
-     if (pages_committed_since_last_scavenge_ > 0) {
-         pages_committed_since_last_scavenge_ = 0;
-         return;
-     }
+     size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
+     size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
  
-     for (int i = kMaxPages; i >= 0 && shouldContinueScavenging(); i--) {
+     for (int i = kMaxPages; i >= 0 && free_committed_pages_ > targetPageCount; i--) {
          SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
-         if (!DLL_IsEmpty(&slist->normal)) {
-             // Release the last span on the normal portion of this list
+         while (!DLL_IsEmpty(&slist->normal) && free_committed_pages_ > targetPageCount) {
              Span* s = slist->normal.prev; 
              DLL_Remove(s);
              ASSERT(!s->decommitted);
@@@ -1531,11 -1551,10 +1551,10 @@@
          }
      }
  
-     ASSERT(!shouldContinueScavenging());
-     pages_committed_since_last_scavenge_ = 0;
+     min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
  }
  
- ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const 
+ ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const 
  {
      return free_committed_pages_ > kMinimumFreeCommittedPageCount; 
  }
@@@ -1567,9 -1586,6 +1586,6 @@@ inline Span* TCMalloc_PageHeap::New(Len
      if (result->decommitted) {
          TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
          result->decommitted = false;
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-         pages_committed_since_last_scavenge_ += n;
- #endif
      }
  #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
      else {
@@@ -1577,6 -1593,8 +1593,8 @@@
          // free committed pages count.
          ASSERT(free_committed_pages_ >= n);
          free_committed_pages_ -= n;
+         if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+             min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
      }
  #endif  // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
      ASSERT(Check());
@@@ -1638,9 -1656,6 +1656,6 @@@ Span* TCMalloc_PageHeap::AllocLarge(Len
      if (best->decommitted) {
          TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
          best->decommitted = false;
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-         pages_committed_since_last_scavenge_ += n;
- #endif
      }
  #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
      else {
@@@ -1648,6 -1663,8 +1663,8 @@@
          // free committed pages count.
          ASSERT(free_committed_pages_ >= n);
          free_committed_pages_ -= n;
+         if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+             min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
      }
  #endif  // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
      ASSERT(Check());
@@@ -1791,6 -1808,8 +1808,8 @@@ inline void TCMalloc_PageHeap::Delete(S
        // If the merged span is decommitted, that means we decommitted any neighboring spans that were
        // committed.  Update the free committed pages count.
        free_committed_pages_ -= neighboringCommittedSpansLength;
+       if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
+             min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
    } else {
        // If the merged span remains committed, add the deleted span's size to the free committed pages count.
        free_committed_pages_ += n;
@@@ -1955,10 -1974,6 +1974,6 @@@ bool TCMalloc_PageHeap::GrowHeap(Lengt
    }
    ask = actual_size >> kPageShift;
  
- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
-   pages_committed_since_last_scavenge_ += ask;
- #endif
- 
    uint64_t old_system_bytes = system_bytes_;
    system_bytes_ += (ask << kPageShift);
    const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
@@@ -2321,13 -2336,13 +2336,13 @@@ static TCMalloc_Central_FreeListPadded 
  
  // Page-level allocator
  static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
 -static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)];
 +static uint64_t pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(uint64_t) - 1) / sizeof(uint64_t)];
  static bool phinited = false;
  
  // Avoid extra level of indirection by making "pageheap" be just an alias
  // of pageheap_memory.
  typedef union {
 -    void* m_memory;
 +    uint64_t* m_memory;
      TCMalloc_PageHeap* m_pageHeap;
  } PageHeapUnion;
  
@@@ -2356,15 -2371,15 +2371,15 @@@ void TCMalloc_PageHeap::scavengerThread
  #endif
  
    while (1) {
-       if (!shouldContinueScavenging()) {
+       if (!shouldScavenge()) {
            pthread_mutex_lock(&m_scavengeMutex);
            m_scavengeThreadActive = false;
-           // Block until there are enough freed pages to release back to the system.
+           // Block until there are enough free committed pages to release back to the system.
            pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
            m_scavengeThreadActive = true;
            pthread_mutex_unlock(&m_scavengeMutex);
        }
-       sleep(kScavengeTimerDelayInSeconds);
+       sleep(kScavengeDelayInSeconds);
        {
            SpinLockHolder h(&pageheap_lock);
            pageheap->scavenge();
@@@ -2381,7 -2396,7 +2396,7 @@@ void TCMalloc_PageHeap::periodicScaveng
      pageheap->scavenge();
    }
  
-   if (!shouldContinueScavenging()) {
+   if (!shouldScavenge()) {
      m_scavengingScheduled = false;
      dispatch_suspend(m_scavengeTimer);
    }
@@@ -3926,6 -3941,8 +3941,8 @@@ static inline void* cpp_alloc(size_t si
    }
  }
  
+ #if ENABLE(GLOBAL_FASTMALLOC_NEW)
+ 
  void* operator new(size_t size) {
    void* p = cpp_alloc(size, false);
    // We keep this next instruction out of cpp_alloc for a reason: when
@@@ -3980,6 -3997,8 +3997,8 @@@ void operator delete[](void* p, const s
    do_free(p);
  }
  
+ #endif
+ 
  extern "C" void* memalign(size_t align, size_t size) __THROW {
    void* result = do_memalign(align, size);
    MallocHook::InvokeNewHook(result, size);
@@@ -4095,7 -4114,62 +4114,62 @@@ void *(*__memalign_hook)(size_t, size_t
  
  #endif
  
- #if defined(WTF_CHANGES) && OS(DARWIN)
+ #ifdef WTF_CHANGES
+ void releaseFastMallocFreeMemory()
+ {
+     // Flush free pages in the current thread cache back to the page heap.
+     // Low watermark mechanism in Scavenge() prevents full return on the first pass.
+     // The second pass flushes everything.
+     if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
+         threadCache->Scavenge();
+         threadCache->Scavenge();
+     }
+ 
+     SpinLockHolder h(&pageheap_lock);
+     pageheap->ReleaseFreePages();
+ }
+     
+ FastMallocStatistics fastMallocStatistics()
+ {
+     FastMallocStatistics statistics;
+ 
+     SpinLockHolder lockHolder(&pageheap_lock);
+     statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
+     statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
+ 
+     statistics.freeListBytes = 0;
+     for (unsigned cl = 0; cl < kNumClasses; ++cl) {
+         const int length = central_cache[cl].length();
+         const int tc_length = central_cache[cl].tc_length();
+ 
+         statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
+     }
+     for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
+         statistics.freeListBytes += threadCache->Size();
+ 
+     return statistics;
+ }
+ 
+ size_t fastMallocSize(const void* ptr)
+ {
+     const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
+     Span* span = pageheap->GetDescriptorEnsureSafe(p);
+ 
+     if (!span || span->free)
+         return 0;
+ 
+     for (void* free = span->objects; free != NULL; free = *((void**) free)) {
+         if (ptr == free)
+             return 0;
+     }
+ 
+     if (size_t cl = span->sizeclass)
+         return ByteSizeForClass(cl);
+ 
+     return span->length << kPageShift;
+ }
+ 
+ #if OS(DARWIN)
  
  class FreeObjectFinder {
      const RemoteMemoryReader& m_reader;
@@@ -4412,44 -4486,9 +4486,9 @@@ void FastMallocZone::init(
      static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
  }
  
- #endif
- 
- #if WTF_CHANGES
- void releaseFastMallocFreeMemory()
- {
-     // Flush free pages in the current thread cache back to the page heap.
-     // Low watermark mechanism in Scavenge() prevents full return on the first pass.
-     // The second pass flushes everything.
-     if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
-         threadCache->Scavenge();
-         threadCache->Scavenge();
-     }
- 
-     SpinLockHolder h(&pageheap_lock);
-     pageheap->ReleaseFreePages();
- }
-     
- FastMallocStatistics fastMallocStatistics()
- {
-     FastMallocStatistics statistics;
-     {
-         SpinLockHolder lockHolder(&pageheap_lock);
-         statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes());
-         statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes());
-         statistics.returnedSize = pageheap->ReturnedBytes();
-         statistics.freeSizeInCaches = 0;
-         for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
-             statistics.freeSizeInCaches += threadCache->Size();
-     }
-     for (unsigned cl = 0; cl < kNumClasses; ++cl) {
-         const int length = central_cache[cl].length();
-         const int tc_length = central_cache[cl].tc_length();
-         statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length);
-     }
-     return statistics;
- }
+ #endif // OS(DARWIN)
  
  } // namespace WTF
- #endif
+ #endif // WTF_CHANGES
  
  #endif // FORCE_SYSTEM_MALLOC
diff --combined JavaScriptCore/wtf/Platform.h
index 9e6946b,40bb4e6..3cd4290
--- a/JavaScriptCore/wtf/Platform.h
+++ b/JavaScriptCore/wtf/Platform.h
@@@ -104,6 -104,23 +104,23 @@@
  #define WTF_CPU_IA64 1
  #endif
  
+ /* CPU(MIPS) - MIPS 32-bit */
+ /* Note: Only O32 ABI is tested, so we enable it for O32 ABI for now.  */
+ #if (defined(mips) || defined(__mips__)) \
+     && defined(_ABIO32)
+ #define WTF_CPU_MIPS 1
+ #if defined(__MIPSEB__)
+ #define WTF_CPU_BIG_ENDIAN 1
+ #endif
+ #define WTF_MIPS_PIC (defined __PIC__)
+ #define WTF_MIPS_ARCH __mips
+ #define WTF_MIPS_ISA(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH == v)
+ #define WTF_MIPS_ISA_AT_LEAST(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH >= v)
+ #define WTF_MIPS_ARCH_REV __mips_isa_rev
+ #define WTF_MIPS_ISA_REV(v) (defined WTF_MIPS_ARCH_REV && WTF_MIPS_ARCH_REV == v)
+ #define WTF_MIPS_DOUBLE_FLOAT (defined __mips_hard_float && !defined __mips_single_float)
+ #endif /* MIPS */
+ 
  /* CPU(PPC) - PowerPC 32-bit */
  #if   defined(__ppc__)     \
      || defined(__PPC__)     \
@@@ -142,7 -159,7 +159,7 @@@
  
  /* CPU(SPARC) - any SPARC, true for CPU(SPARC32) and CPU(SPARC64) */
  #if CPU(SPARC32) || CPU(SPARC64)
 -#define WTF_CPU_SPARC
 +#define WTF_CPU_SPARC 1
  #endif
  
  /* CPU(X86) - i386 / x86 32-bit */
@@@ -562,10 -579,15 +579,15 @@@
  #define HAVE_RUNLOOP_TIMER 1
  #endif /* PLATFORM(MAC) && !PLATFORM(IPHONE) */
  
+ #if PLATFORM(MAC)
+ #define WTF_USE_CARBON_SECURE_INPUT_MODE 1
+ #endif
+ 
  #if PLATFORM(CHROMIUM) && OS(DARWIN)
  #define WTF_PLATFORM_CF 1
  #define WTF_USE_PTHREADS 1
  #define HAVE_PTHREAD_RWLOCK 1
+ #define WTF_USE_CARBON_SECURE_INPUT_MODE 1
  #endif
  
  #if PLATFORM(QT) && OS(DARWIN)
@@@ -607,6 -629,7 +629,7 @@@
  
  #if PLATFORM(WX)
  #define ENABLE_ASSEMBLER 1
+ #define ENABLE_GLOBAL_FASTMALLOC_NEW 0
  #if OS(DARWIN)
  #define WTF_PLATFORM_CF 1
  #endif
@@@ -742,6 -765,11 +765,11 @@@
  
  /* ENABLE macro defaults */
  
+ #if PLATFORM(QT)
+ // We musn't customize the global operator new and delete for the Qt port.
+ #define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+ #endif
+ 
  /* fastMalloc match validation allows for runtime verification that
     new is matched by delete, fastMalloc is matched by fastFree, etc. */
  #if !defined(ENABLE_FAST_MALLOC_MATCH_VALIDATION)
@@@ -800,6 -828,10 +828,10 @@@
  #define ENABLE_OPCODE_STATS 0
  #endif
  
+ #if !defined(ENABLE_GLOBAL_FASTMALLOC_NEW)
+ #define ENABLE_GLOBAL_FASTMALLOC_NEW 1
+ #endif
+ 
  #define ENABLE_SAMPLING_COUNTERS 0
  #define ENABLE_SAMPLING_FLAGS 0
  #define ENABLE_OPCODE_SAMPLING 0
@@@ -830,7 -862,7 +862,7 @@@
  #if !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32) && !defined(WTF_USE_JSVALUE32_64)
  #if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) || CPU(IA64) || CPU(ALPHA)
  #define WTF_USE_JSVALUE64 1
- #elif CPU(ARM) || CPU(PPC64)
+ #elif CPU(ARM) || CPU(PPC64) || CPU(MIPS)
  #define WTF_USE_JSVALUE32 1
  #elif OS(WINDOWS) && COMPILER(MINGW)
  /* Using JSVALUE32_64 causes padding/alignement issues for JITStubArg
@@@ -856,6 -888,9 +888,9 @@@ on MinGW. See https://bugs.webkit.org/s
      #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
  #elif CPU(ARM_THUMB2) && PLATFORM(IPHONE)
      #define ENABLE_JIT 1
+ /* The JIT is tested & working on Android */
+ #elif CPU(ARM_THUMB2) && PLATFORM(ANDROID) && ENABLE(ANDROID_JSC_JIT)
+     #define ENABLE_JIT 1
  /* The JIT is tested & working on x86 Windows */
  #elif CPU(X86) && PLATFORM(WIN)
      #define ENABLE_JIT 1
@@@ -935,6 -970,7 +970,7 @@@
  #if (CPU(X86) && PLATFORM(MAC)) \
      || (CPU(X86_64) && PLATFORM(MAC)) \
      || (CPU(ARM_THUMB2) && PLATFORM(IPHONE)) \
+     || (CPU(ARM_THUMB2) && PLATFORM(ANDROID) && ENABLE(ANDROID_JSC_JIT)) \
      || (CPU(X86) && PLATFORM(WIN)) \
      || (CPU(X86) && PLATFORM(WX))
  #define ENABLE_YARR 1
@@@ -946,7 -982,8 +982,8 @@@
      || (CPU(X86) && OS(WINDOWS) && COMPILER(MSVC)) \
      || (CPU(X86) && OS(LINUX) && GCC_VERSION >= 40100) \
      || (CPU(X86_64) && OS(LINUX) && GCC_VERSION >= 40100) \
-     || (CPU(ARM_TRADITIONAL) && OS(LINUX))
+     || (CPU(ARM_TRADITIONAL) && OS(LINUX)) \
+     || (CPU(MIPS) && OS(LINUX))
  #define ENABLE_YARR 1
  #define ENABLE_YARR_JIT 1
  #endif
diff --combined WebCore/platform/text/AtomicString.cpp
index c3b662d,c313f7f..75a3aff
--- a/WebCore/platform/text/AtomicString.cpp
+++ b/WebCore/platform/text/AtomicString.cpp
@@@ -103,9 -103,9 +103,9 @@@ static inline bool equal(StringImpl* st
      if (string->length() != length)
          return false;
  
 +#if CPU(ARM) || CPU(SPARC) || CPU(SH4)
      // FIXME: perhaps we should have a more abstract macro that indicates when
      // going 4 bytes at a time is unsafe
 -#if CPU(ARM) || CPU(SH4)
      const UChar* stringCharacters = string->characters();
      for (unsigned i = 0; i != length; ++i) {
          if (*stringCharacters++ != *characters++)
@@@ -252,7 -252,7 +252,7 @@@ PassRefPtr<StringImpl> AtomicString::ad
      if (!length)
          return StringImpl::empty();
  
-     HashAndCharacters buffer = { string->existingHash(), string->data(), length }; 
+     HashAndCharacters buffer = { string->existingHash(), string->characters(), length }; 
      pair<HashSet<StringImpl*>::iterator, bool> addResult = stringTable().add<HashAndCharacters, HashAndCharactersTranslator>(buffer);
      if (!addResult.second)
          return *addResult.first;
@@@ -269,7 -269,7 +269,7 @@@ PassRefPtr<StringImpl> AtomicString::ad
      if (!length)
          return StringImpl::empty();
  
-     HashAndCharacters buffer = { string->hash(), string->data(), length }; 
+     HashAndCharacters buffer = { string->hash(), string->characters(), length }; 
      pair<HashSet<StringImpl*>::iterator, bool> addResult = stringTable().add<HashAndCharacters, HashAndCharactersTranslator>(buffer);
      if (!addResult.second)
          return *addResult.first;
@@@ -286,7 -286,7 +286,7 @@@ AtomicStringImpl* AtomicString::find(co
      if (!length)
          return static_cast<AtomicStringImpl*>(StringImpl::empty());
  
-     HashAndCharacters buffer = { string->existingHash(), string->data(), length }; 
+     HashAndCharacters buffer = { string->existingHash(), string->characters(), length }; 
      HashSet<StringImpl*>::iterator iterator = stringTable().find<HashAndCharacters, HashAndCharactersTranslator>(buffer);
      if (iterator == stringTable().end())
          return 0;

-- 
WebKit Debian packaging



More information about the Pkg-webkit-commits mailing list