[Commits] Rev 4367: MDEV-7026: Race in InnoDB/XtraDB mutex implementation can stall or hang the server. in http://bazaar.launchpad.net/~maria-captains/maria/5.5

knielsen at knielsen-hq.org knielsen at knielsen-hq.org
Wed Nov 19 14:53:36 EET 2014


At http://bazaar.launchpad.net/~maria-captains/maria/5.5

------------------------------------------------------------
revno: 4367
revision-id: knielsen at knielsen-hq.org-20141119125335-q1us5hk39am2k9pk
parent: sergii at pisem.net-20141118231952-fk3o69075dn3ravv
committer: Kristian Nielsen <knielsen at knielsen-hq.org>
branch nick: mariadb-5.5
timestamp: Wed 2014-11-19 13:53:35 +0100
message:
  MDEV-7026: Race in InnoDB/XtraDB mutex implementation can stall or hang the server.
  
  The bug was that full memory barrier was missing in the code that ensures that
  a waiter on an InnoDB mutex will not go to sleep unless it is guaranteed to be
  woken up again by another thread currently holding the mutex. This made
  possible a race where a thread could get stuck waiting for a mutex that is in
  fact no longer locked. If that thread was also holding other critical locks,
  this could stall the entire server. There is an error monitor thread than can
  break the stall, it runs about once per second. But if the error monitor
  thread itself got stuck or was not running, then the entire server could hang
  infinitely.
  
  This was introduced on i386/amd64 platforms in 5.5.40 and 10.0.13 by an
  incorrect patch that tried to fix the similar problem for PowerPC.
  
  This commit reverts the incorrect PowerPC patch, and instead implements a fix
  for PowerPC that does not change i386/amd64 behaviour, making PowerPC work
  similarly to i386/amd64.
=== modified file 'storage/innobase/include/os0sync.h'
--- a/storage/innobase/include/os0sync.h	2014-09-08 15:10:48 +0000
+++ b/storage/innobase/include/os0sync.h	2014-11-19 12:53:35 +0000
@@ -232,12 +232,29 @@ os_fast_mutex_trylock(
 /*==================*/
         os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to acquire */
 /**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+ at return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+        os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to acquire */
+/**********************************************************//**
 Releases ownership of a fast mutex. */
 UNIV_INTERN
 void
 os_fast_mutex_unlock(
 /*=================*/
         os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to release */
+/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+        os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to release */
 /*********************************************************//**
 Initializes an operating system fast mutex semaphore. */
 UNIV_INTERN
@@ -307,11 +324,28 @@ amount of increment. */
 /**********************************************************//**
 Returns the old value of *ptr, atomically sets *ptr to new_val */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+#ifdef __powerpc__
+/*
+  os_atomic_test_and_set_byte_release() should imply a release barrier before
+  setting, and a full barrier after. But __sync_lock_test_and_set() is only
+  documented as an aquire barrier. So on PowerPC we need to add the full
+  barrier explicitly.  */
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+        do { __sync_lock_release(ptr); \
+             __sync_synchronize(); } while (0)
+#else
+/*
+  On x86, __sync_lock_test_and_set() happens to be full barrier, due to
+  LOCK prefix.
+*/
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+        __sync_lock_test_and_set(ptr, (byte) new_val)
+#endif
+/*
+  os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
+  in general, just an aquire barrier should be sufficient. */
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
         __sync_lock_test_and_set(ptr, (byte) new_val)
-
-# define os_atomic_lock_release_byte(ptr) \
-        __sync_lock_release(ptr)
 
 #elif defined(HAVE_IB_SOLARIS_ATOMICS)
 
@@ -363,11 +397,10 @@ amount of increment. */
 /**********************************************************//**
 Returns the old value of *ptr, atomically sets *ptr to new_val */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+        atomic_swap_uchar(ptr, new_val)
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
         atomic_swap_uchar(ptr, new_val)
-
-# define os_atomic_lock_release_byte(ptr) \
-        (void) atomic_swap_uchar(ptr, 0)
 
 #elif defined(HAVE_WINDOWS_ATOMICS)
 
@@ -414,7 +447,9 @@ Returns the old value of *ptr, atomicall
 InterlockedExchange() operates on LONG, and the LONG will be
 clobbered */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+        ((byte) InterlockedExchange(ptr, new_val))
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
         ((byte) InterlockedExchange(ptr, new_val))
 
 #else
@@ -427,11 +462,7 @@ clobbered */
 # define HAVE_MEMORY_BARRIER
 # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
 # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
-#ifdef __powerpc__
-# define os_isync  __asm __volatile ("isync":::"memory")
-#else
-#define os_isync do { } while(0)
-#endif
+# define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
 
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "GCC builtin __atomic_thread_fence() is used for memory barrier"
@@ -440,7 +471,7 @@ clobbered */
 # define HAVE_MEMORY_BARRIER
 # define os_rmb __sync_synchronize()
 # define os_wmb __sync_synchronize()
-# define os_isync __sync_synchronize()
+# define os_mb  __sync_synchronize()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "GCC builtin __sync_synchronize() is used for memory barrier"
 
@@ -449,7 +480,7 @@ clobbered */
 # include <mbarrier.h>
 # define os_rmb __machine_r_barrier()
 # define os_wmb __machine_w_barrier()
-# define os_isync os_rmb; os_wmb
+# define os_mb __machine_rw_barrier()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "Soralis memory ordering functions are used for memory barrier"
 
@@ -458,17 +489,14 @@ clobbered */
 # include <intrin.h>
 # define os_rmb _mm_lfence()
 # define os_wmb _mm_sfence()
-# define os_isync os_rmb; os_wmb
+# define os_mb  _mm_mfence()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "_mm_lfence() and _mm_sfence() are used for memory barrier"
 
-# define os_atomic_lock_release_byte(ptr) \
-        (void) InterlockedExchange(ptr, 0)
-
 #else
 # define os_rmb do { } while(0)
 # define os_wmb do { } while(0)
-# define os_isync do { } while(0)
+# define os_mb do { } while(0)
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "Memory barrier is not used"
 #endif

=== modified file 'storage/innobase/include/os0sync.ic'
--- a/storage/innobase/include/os0sync.ic	2014-03-17 12:04:28 +0000
+++ b/storage/innobase/include/os0sync.ic	2014-11-19 12:53:35 +0000
@@ -54,3 +54,35 @@ os_fast_mutex_trylock(
         return((ulint) pthread_mutex_trylock(fast_mutex));
 #endif
 }
+
+/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+ at return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+        os_fast_mutex_t*        fast_mutex)     /*!< in: mutex to acquire */
+{
+#ifdef __WIN__
+        if (TryEnterCriticalSection(fast_mutex)) {
+
+                return(0);
+        } else {
+
+                return(1);
+        }
+#else
+        /* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
+        so that it returns 0 on success. In the operating system
+        libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
+        returns 1 on success (but MySQL remaps that to 0), while Linux,
+        FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
+
+#ifdef __powerpc__
+        os_mb;
+#endif
+        return((ulint) pthread_mutex_trylock(fast_mutex));
+#endif
+}

=== modified file 'storage/innobase/include/sync0sync.ic'
--- a/storage/innobase/include/sync0sync.ic	2014-08-29 12:02:46 +0000
+++ b/storage/innobase/include/sync0sync.ic	2014-11-19 12:53:35 +0000
@@ -80,11 +80,11 @@ mutex_test_and_set(
         mutex_t*        mutex)  /*!< in: mutex */
 {
 #if defined(HAVE_ATOMIC_BUILTINS)
-        return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
+        return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
 #else
         ibool   ret;
 
-        ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex));
+        ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
 
         if (ret == 0) {
                 /* We check that os_fast_mutex_trylock does not leak
@@ -92,7 +92,6 @@ mutex_test_and_set(
                 ut_a(mutex->lock_word == 0);
 
                 mutex->lock_word = 1;
-                os_wmb;
         }
 
         return((byte)ret);
@@ -109,11 +108,14 @@ mutex_reset_lock_word(
         mutex_t*        mutex)  /*!< in: mutex */
 {
 #if defined(HAVE_ATOMIC_BUILTINS)
-        os_atomic_lock_release_byte(&mutex->lock_word);
+        /* In theory __sync_lock_release should be used to release the lock.
+        Unfortunately, it does not work properly alone. The workaround is
+        that more conservative __sync_lock_test_and_set is used instead. */
+        os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
 #else
         mutex->lock_word = 0;
 
-        os_fast_mutex_unlock(&(mutex->os_fast_mutex));
+        os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
 #endif
 }
 
@@ -145,7 +147,6 @@ mutex_get_waiters(
 
         ptr = &(mutex->waiters);
 
-        os_rmb;
         return(*ptr);           /* Here we assume that the read of a single
                                 word from memory is atomic */
 }
@@ -180,7 +181,6 @@ mutex_exit_func(
         to wake up possible hanging threads if
         they are missed in mutex_signal_object. */
 
-        os_isync;
         if (mutex_get_waiters(mutex) != 0) {
 
                 mutex_signal_object(mutex);

=== modified file 'storage/innobase/os/os0sync.c'
--- a/storage/innobase/os/os0sync.c	2014-03-17 12:04:28 +0000
+++ b/storage/innobase/os/os0sync.c	2014-11-19 12:53:35 +0000
@@ -888,6 +888,25 @@ os_fast_mutex_unlock(
 }
 
 /**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+        os_fast_mutex_t*        fast_mutex)     /*!< in: mutex to release */
+{
+#ifdef __WIN__
+        LeaveCriticalSection(fast_mutex);
+#else
+        pthread_mutex_unlock(fast_mutex);
+#ifdef __powerpc__
+        os_mb;
+#endif
+#endif
+}
+
+/**********************************************************//**
 Frees a mutex object. */
 UNIV_INTERN
 void

=== modified file 'storage/innobase/sync/sync0sync.c'
--- a/storage/innobase/sync/sync0sync.c	2014-11-03 13:43:44 +0000
+++ b/storage/innobase/sync/sync0sync.c	2014-11-19 12:53:35 +0000
@@ -474,8 +474,6 @@ mutex_set_waiters(
 
         ptr = &(mutex->waiters);
 
-        os_wmb;
-
         *ptr = n;               /* Here we assume that the write of a single
                                 word in memory is atomic */
 }

=== modified file 'storage/xtradb/include/os0sync.h'
--- a/storage/xtradb/include/os0sync.h	2014-09-08 15:10:48 +0000
+++ b/storage/xtradb/include/os0sync.h	2014-11-19 12:53:35 +0000
@@ -232,12 +232,29 @@ os_fast_mutex_trylock(
 /*==================*/
         os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to acquire */
 /**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+ at return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+        os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to acquire */
+/**********************************************************//**
 Releases ownership of a fast mutex. */
 UNIV_INTERN
 void
 os_fast_mutex_unlock(
 /*=================*/
         os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to release */
+/**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+        os_fast_mutex_t*        fast_mutex);    /*!< in: mutex to release */
 /*********************************************************//**
 Initializes an operating system fast mutex semaphore. */
 UNIV_INTERN
@@ -314,11 +331,28 @@ amount of increment. */
 /**********************************************************//**
 Returns the old value of *ptr, atomically sets *ptr to new_val */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+#ifdef __powerpc__
+/*
+  os_atomic_test_and_set_byte_release() should imply a release barrier before
+  setting, and a full barrier after. But __sync_lock_test_and_set() is only
+  documented as an aquire barrier. So on PowerPC we need to add the full
+  barrier explicitly.  */
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+        do { __sync_lock_release(ptr); \
+             __sync_synchronize(); } while 0)
+#else
+/*
+  On x86, __sync_lock_test_and_set() happens to be full barrier, due to
+  LOCK prefix.
+*/
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
+        __sync_lock_test_and_set(ptr, (byte) new_val)
+#endif
+/*
+  os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
+  in general, just an aquire barrier should be sufficient. */
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
         __sync_lock_test_and_set(ptr, (byte) new_val)
-
-# define os_atomic_lock_release_byte(ptr) \
-        __sync_lock_release(ptr)
 
 #elif defined(HAVE_IB_SOLARIS_ATOMICS)
 
@@ -374,11 +408,10 @@ amount of increment. */
 /**********************************************************//**
 Returns the old value of *ptr, atomically sets *ptr to new_val */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+        atomic_swap_uchar(ptr, new_val)
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
         atomic_swap_uchar(ptr, new_val)
-
-# define os_atomic_lock_release_byte(ptr) \
-        (void) atomic_swap_uchar(ptr, 0)
 
 #elif defined(HAVE_WINDOWS_ATOMICS)
 
@@ -434,7 +467,9 @@ Returns the old value of *ptr, atomicall
 InterlockedExchange() operates on LONG, and the LONG will be
 clobbered */
 
-# define os_atomic_test_and_set_byte(ptr, new_val) \
+# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
+        ((byte) InterlockedExchange(ptr, new_val))
+# define os_atomic_test_and_set_byte_release(ptr, new_val) \
         ((byte) InterlockedExchange(ptr, new_val))
 
 #else
@@ -447,11 +482,7 @@ clobbered */
 # define HAVE_MEMORY_BARRIER
 # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
 # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
-#ifdef __powerpc__
-# define os_isync  __asm __volatile ("isync":::"memory")
-#else
-#define os_isync do { } while(0)
-#endif
+# define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
 
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "GCC builtin __atomic_thread_fence() is used for memory barrier"
@@ -460,7 +491,7 @@ clobbered */
 # define HAVE_MEMORY_BARRIER
 # define os_rmb __sync_synchronize()
 # define os_wmb __sync_synchronize()
-# define os_isync __sync_synchronize()
+# define os_mb  __sync_synchronize()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "GCC builtin __sync_synchronize() is used for memory barrier"
 
@@ -469,7 +500,7 @@ clobbered */
 # include <mbarrier.h>
 # define os_rmb __machine_r_barrier()
 # define os_wmb __machine_w_barrier()
-# define os_isync os_rmb; os_wmb
+# define os_mb __machine_rw_barrier()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "Soralis memory ordering functions are used for memory barrier"
 
@@ -478,17 +509,14 @@ clobbered */
 # include <intrin.h>
 # define os_rmb _mm_lfence()
 # define os_wmb _mm_sfence()
-# define os_isync os_rmb; os_wmb
+# define os_mb  _mm_mfence()
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "_mm_lfence() and _mm_sfence() are used for memory barrier"
 
-# define os_atomic_lock_release_byte(ptr) \
-        (void) InterlockedExchange(ptr, 0)
-
 #else
 # define os_rmb do { } while(0)
 # define os_wmb do { } while(0)
-# define os_isync do { } while(0)
+# define os_mb do { } while(0)
 # define IB_MEMORY_BARRIER_STARTUP_MSG \
         "Memory barrier is not used"
 #endif

=== modified file 'storage/xtradb/include/os0sync.ic'
--- a/storage/xtradb/include/os0sync.ic	2013-09-07 11:49:15 +0000
+++ b/storage/xtradb/include/os0sync.ic	2014-11-19 12:53:35 +0000
@@ -54,3 +54,35 @@ os_fast_mutex_trylock(
         return((ulint) pthread_mutex_trylock(fast_mutex));
 #endif
 }
+
+/**********************************************************//**
+Acquires ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required.
+ at return 0 if success, != 0 if was reserved by another thread */
+UNIV_INLINE
+ulint
+os_fast_mutex_trylock_full_barrier(
+/*==================*/
+        os_fast_mutex_t*        fast_mutex)     /*!< in: mutex to acquire */
+{
+#ifdef __WIN__
+        if (TryEnterCriticalSection(fast_mutex)) {
+
+                return(0);
+        } else {
+
+                return(1);
+        }
+#else
+        /* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
+        so that it returns 0 on success. In the operating system
+        libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
+        returns 1 on success (but MySQL remaps that to 0), while Linux,
+        FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
+
+#ifdef __powerpc__
+        os_mb;
+#endif
+        return((ulint) pthread_mutex_trylock(fast_mutex));
+#endif
+}

=== modified file 'storage/xtradb/include/sync0sync.ic'
--- a/storage/xtradb/include/sync0sync.ic	2014-08-29 12:02:46 +0000
+++ b/storage/xtradb/include/sync0sync.ic	2014-11-19 12:53:35 +0000
@@ -80,11 +80,11 @@ mutex_test_and_set(
         mutex_t*        mutex)  /*!< in: mutex */
 {
 #if defined(HAVE_ATOMIC_BUILTINS)
-        return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
+        return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
 #else
         ibool   ret;
 
-        ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex));
+        ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
 
         if (ret == 0) {
                 /* We check that os_fast_mutex_trylock does not leak
@@ -92,7 +92,6 @@ mutex_test_and_set(
                 ut_a(mutex->lock_word == 0);
 
                 mutex->lock_word = 1;
-                os_wmb;
         }
 
         return((byte)ret);
@@ -109,11 +108,14 @@ mutex_reset_lock_word(
         mutex_t*        mutex)  /*!< in: mutex */
 {
 #if defined(HAVE_ATOMIC_BUILTINS)
-        os_atomic_lock_release_byte(&mutex->lock_word);
+        /* In theory __sync_lock_release should be used to release the lock.
+        Unfortunately, it does not work properly alone. The workaround is
+        that more conservative __sync_lock_test_and_set is used instead. */
+        os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
 #else
         mutex->lock_word = 0;
 
-        os_fast_mutex_unlock(&(mutex->os_fast_mutex));
+        os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
 #endif
 }
 
@@ -145,7 +147,6 @@ mutex_get_waiters(
 
         ptr = &(mutex->waiters);
 
-        os_rmb;
         return(*ptr);           /* Here we assume that the read of a single
                                 word from memory is atomic */
 }
@@ -180,7 +181,6 @@ mutex_exit_func(
         to wake up possible hanging threads if
         they are missed in mutex_signal_object. */
 
-        os_isync;
         if (mutex_get_waiters(mutex) != 0) {
 
                 mutex_signal_object(mutex);

=== modified file 'storage/xtradb/os/os0sync.c'
--- a/storage/xtradb/os/os0sync.c	2013-09-07 11:49:15 +0000
+++ b/storage/xtradb/os/os0sync.c	2014-11-19 12:53:35 +0000
@@ -888,6 +888,25 @@ os_fast_mutex_unlock(
 }
 
 /**********************************************************//**
+Releases ownership of a fast mutex. Implies a full memory barrier even on
+platforms such as PowerPC where this is not normally required. */
+UNIV_INTERN
+void
+os_fast_mutex_unlock_full_barrier(
+/*=================*/
+        os_fast_mutex_t*        fast_mutex)     /*!< in: mutex to release */
+{
+#ifdef __WIN__
+        LeaveCriticalSection(fast_mutex);
+#else
+        pthread_mutex_unlock(fast_mutex);
+#ifdef __powerpc__
+        os_mb;
+#endif
+#endif
+}
+
+/**********************************************************//**
 Frees a mutex object. */
 UNIV_INTERN
 void

=== modified file 'storage/xtradb/sync/sync0sync.c'
--- a/storage/xtradb/sync/sync0sync.c	2014-11-03 13:43:44 +0000
+++ b/storage/xtradb/sync/sync0sync.c	2014-11-19 12:53:35 +0000
@@ -482,8 +482,6 @@ mutex_set_waiters(
 
         ptr = &(mutex->waiters);
 
-        os_wmb;
-
         *ptr = n;               /* Here we assume that the write of a single
                                 word in memory is atomic */
 #endif



More information about the commits mailing list