Changeset 205921 in webkit
- Timestamp:
- Sep 14, 2016 11:51:26 AM (8 years ago)
- Location:
- trunk/Source/WTF
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WTF/ChangeLog
r205914 r205921 1 2016-09-14 JF Bastien <jfbastien@apple.com> 2 3 Atomics on ARM don't require full-system fencing, and other minutiae 4 https://bugs.webkit.org/show_bug.cgi?id=161928 5 6 Reviewed by Geoffrey Garen. 7 8 Add cmpxchg versions with both success and failure memory 9 ordering. In some interesting cases we can craft code which needs 10 barriers which aren't as strong. 11 12 weakCompareAndSwap is super dubious, its 3 uses seem 13 questionable... but for now I'm just adding debug asserts. 14 15 Rename armv7_dmb* functions to arm_dmb* because they apply to v7 16 and v8 (or more precisely; to ARMv7's ARM and Thumb2, as well as 17 ARMv8's aarch32 A32/T32 and aarch64). 18 19 Use inner-shareability domain for ARM barriers instead of 20 full-system. This is what C++ uses. 21 22 The default case for barriers simply used a compiler barrier. This 23 is generally wrong, e.g. for MIPS. 24 25 * wtf/Atomics.h: 26 (WTF::Atomic::compareExchangeWeak): offer two-order version 27 (WTF::Atomic::compareExchangeStrong): offer two-order version 28 (WTF::weakCompareAndSwap): a few assertions 29 (WTF::arm_dmb): rename since it applies to ARMv7 and v8; make it innser-shareable 30 (WTF::arm_dmb_st): rename since it applies to ARMv7 and v8; make it innser-shareable 31 (WTF::loadLoadFence): incorrect generally 32 (WTF::loadStoreFence): incorrect generally 33 (WTF::storeLoadFence): incorrect generally 34 (WTF::storeStoreFence): incorrect generally 35 (WTF::memoryBarrierAfterLock): incorrect generally 36 (WTF::memoryBarrierBeforeUnlock): incorrect generally 37 (WTF::armV7_dmb): Deleted. 38 (WTF::armV7_dmb_st): Deleted. 39 1 40 2016-09-14 JF Bastien <jfbastien@apple.com> 2 41 -
trunk/Source/WTF/wtf/Atomics.h
r205914 r205921 68 68 } 69 69 70 ALWAYS_INLINE bool compareExchangeWeak(T expected, T desired, std::memory_order order_success, std::memory_order order_failure) 71 { 72 #if OS(WINDOWS) 73 // Windows makes strange assertions about the argument to compare_exchange_weak, and anyway, 74 // Windows is X86 so seq_cst is cheap. 75 order_success = std::memory_order_seq_cst; 76 order_failure = std::memory_order_seq_cst; 77 #endif 78 T expectedOrActual = expected; 79 return value.compare_exchange_weak(expectedOrActual, desired, order_success, order_failure); 80 } 81 70 82 ALWAYS_INLINE bool compareExchangeStrong(T expected, T desired, std::memory_order order = std::memory_order_seq_cst) 71 83 { … … 77 89 return value.compare_exchange_strong(expectedOrActual, desired, order); 78 90 } 79 91 92 ALWAYS_INLINE bool compareExchangeStrong(T expected, T desired, std::memory_order order_success, std::memory_order order_failure) 93 { 94 #if OS(WINDOWS) 95 // See above. 96 order_success = std::memory_order_seq_cst; 97 order_failure = std::memory_order_seq_cst; 98 #endif 99 T expectedOrActual = expected; 100 return value.compare_exchange_strong(expectedOrActual, desired, order_success, order_failure); 101 } 102 80 103 template<typename U> 81 104 ALWAYS_INLINE T exchangeAndAdd(U addend, std::memory_order order = std::memory_order_seq_cst) … … 104 127 inline bool weakCompareAndSwap(volatile T* location, T expected, T newValue) 105 128 { 129 ASSERT(isPointerTypeAlignmentOkay(location) && "natural alignment required"); 130 ASSERT(bitwise_cast<std::atomic<T>*>(location)->is_lock_free() && "expected lock-free type"); 106 131 return bitwise_cast<Atomic<T>*>(location)->compareExchangeWeak(expected, newValue, std::memory_order_relaxed); 107 132 } … … 123 148 // Full memory fence. No accesses will float above this, and no accesses will sink 124 149 // below it. 125 inline void arm V7_dmb()126 { 127 asm volatile("dmb sy" ::: "memory");150 inline void arm_dmb() 151 { 152 asm volatile("dmb ish" ::: "memory"); 128 153 } 129 154 130 155 // Like the above, but only affects stores. 131 inline void arm V7_dmb_st()132 { 133 asm volatile("dmb st" ::: "memory");134 } 135 136 inline void loadLoadFence() { arm V7_dmb(); }137 inline void loadStoreFence() { arm V7_dmb(); }138 inline void storeLoadFence() { arm V7_dmb(); }139 inline void storeStoreFence() { arm V7_dmb_st(); }140 inline void memoryBarrierAfterLock() { arm V7_dmb(); }141 inline void memoryBarrierBeforeUnlock() { arm V7_dmb(); }156 inline void arm_dmb_st() 157 { 158 asm volatile("dmb ishst" ::: "memory"); 159 } 160 161 inline void loadLoadFence() { arm_dmb(); } 162 inline void loadStoreFence() { arm_dmb(); } 163 inline void storeLoadFence() { arm_dmb(); } 164 inline void storeStoreFence() { arm_dmb_st(); } 165 inline void memoryBarrierAfterLock() { arm_dmb(); } 166 inline void memoryBarrierBeforeUnlock() { arm_dmb(); } 142 167 143 168 #elif CPU(X86) || CPU(X86_64) … … 165 190 #else 166 191 167 inline void loadLoadFence() { compilerFence(); }168 inline void loadStoreFence() { compilerFence(); }169 inline void storeLoadFence() { compilerFence(); }170 inline void storeStoreFence() { compilerFence(); }171 inline void memoryBarrierAfterLock() { compilerFence(); }172 inline void memoryBarrierBeforeUnlock() { compilerFence(); }192 inline void loadLoadFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } 193 inline void loadStoreFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } 194 inline void storeLoadFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } 195 inline void storeStoreFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } 196 inline void memoryBarrierAfterLock() { std::atomic_thread_fence(std::memory_order_seq_cst); } 197 inline void memoryBarrierBeforeUnlock() { std::atomic_thread_fence(std::memory_order_seq_cst); } 173 198 174 199 #endif
Note: See TracChangeset
for help on using the changeset viewer.