[Commits] 4d26359: MDEV-7754: innodb assert "array->n_elems < array->max_elems" on a huge blob update

Jan Lindström jan.lindstrom at mariadb.com
Tue Mar 17 20:30:07 EET 2015


revision-id: 4d2635959bcad835fc0375ffd21f105c715a1523
parent(s): 3d4850158fdd62a57cf76515db99f07457f344d5
committer: Jan Lindström
branch nick: 10.0-git
timestamp: 2015-03-17 20:28:28 +0200
message:

MDEV-7754: innodb assert "array->n_elems < array->max_elems" on a huge blob update

Problem was that static array was used for storing thread mutex sync levels.
Fixed by using std::vector instead.

Does not contain test case to avoid too big memory/disk space usage
on buildbot VMs.

---
 storage/innobase/sync/sync0sync.cc | 78 ++++++++------------------------------
 storage/xtradb/sync/sync0sync.cc   | 78 ++++++++------------------------------
 2 files changed, 30 insertions(+), 126 deletions(-)

diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index fb559f2..d00aabd 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -47,6 +47,8 @@ Created 9/5/1995 Heikki Tuuri
 #include "ha_prototypes.h"
 #include "my_cpu.h"
 
+#include <vector>
+
 /*
 	REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX
 	============================================
@@ -225,12 +227,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000;
 
 /** Array for tracking sync levels per thread. */
 struct sync_arr_t {
-	ulint		in_use;		/*!< Number of active cells */
 	ulint		n_elems;	/*!< Number of elements in the array */
-	ulint		max_elems;	/*!< Maximum elements */
-	ulint		next_free;	/*!< ULINT_UNDEFINED or index of next
-					free slot */
-	sync_level_t*	elems;		/*!< Array elements */
+
+	std::vector<sync_level_t>	elems;		/*!< Vector of elements */
 };
 
 /** Mutexes or rw-locks held by a thread */
@@ -1069,10 +1068,9 @@ sync_thread_add_level(
 			SYNC_LEVEL_VARYING, nothing is done */
 	ibool	relock)	/*!< in: TRUE if re-entering an x-lock */
 {
-	ulint		i;
-	sync_level_t*	slot;
 	sync_arr_t*	array;
 	sync_thread_t*	thread_slot;
+	sync_level_t	sync_level;
 
 	if (!sync_order_checks_on) {
 
@@ -1097,21 +1095,11 @@ sync_thread_add_level(
 	thread_slot = sync_thread_level_arrays_find_slot();
 
 	if (thread_slot == NULL) {
-		ulint	sz;
-
-		sz = sizeof(*array)
-		   + (sizeof(*array->elems) * SYNC_THREAD_N_LEVELS);
 
 		/* We have to allocate the level array for a new thread */
-		array = static_cast<sync_arr_t*>(calloc(sz, sizeof(char)));
+		array = static_cast<sync_arr_t*>(calloc(1, sizeof(sync_arr_t)));
 		ut_a(array != NULL);
-
-		array->next_free = ULINT_UNDEFINED;
-		array->max_elems = SYNC_THREAD_N_LEVELS;
-		array->elems = (sync_level_t*) &array[1];
-
 		thread_slot = sync_thread_level_arrays_find_free();
-
 		thread_slot->levels = array;
 		thread_slot->id = os_thread_get_curr_id();
 	}
@@ -1321,26 +1309,11 @@ sync_thread_add_level(
 	}
 
 levels_ok:
-	if (array->next_free == ULINT_UNDEFINED) {
-		ut_a(array->n_elems < array->max_elems);
-
-		i = array->n_elems++;
-	} else {
-		i = array->next_free;
-		array->next_free = array->elems[i].level;
-	}
-
-	ut_a(i < array->n_elems);
-	ut_a(i != ULINT_UNDEFINED);
-
-	++array->in_use;
 
-	slot = &array->elems[i];
-
-	ut_a(slot->latch == NULL);
-
-	slot->latch = latch;
-	slot->level = level;
+	array->n_elems++;
+	sync_level.latch = latch;
+	sync_level.level = level;
+	array->elems.push_back(sync_level);
 
 	mutex_exit(&sync_thread_mutex);
 }
@@ -1358,7 +1331,6 @@ sync_thread_reset_level(
 {
 	sync_arr_t*	array;
 	sync_thread_t*	thread_slot;
-	ulint		i;
 
 	if (!sync_order_checks_on) {
 
@@ -1387,36 +1359,16 @@ sync_thread_reset_level(
 
 	array = thread_slot->levels;
 
-	for (i = 0; i < array->n_elems; i++) {
-		sync_level_t*	slot;
-
-		slot = &array->elems[i];
+	for (std::vector<sync_level_t>::iterator it = array->elems.begin(); it != array->elems.end(); ++it) {
+		sync_level_t level = *it;
 
-		if (slot->latch != latch) {
+		if (level.latch != latch) {
 			continue;
 		}
 
-		slot->latch = NULL;
-
-		/* Update the free slot list. See comment in sync_level_t
-		for the level field. */
-		slot->level = array->next_free;
-		array->next_free = i;
-
-		ut_a(array->in_use >= 1);
-		--array->in_use;
-
-		/* If all cells are idle then reset the free
-		list. The assumption is that this will save
-		time when we need to scan up to n_elems. */
-
-		if (array->in_use == 0) {
-			array->n_elems = 0;
-			array->next_free = ULINT_UNDEFINED;
-		}
-
+		array->elems.erase(it);
+		array->n_elems--;
 		mutex_exit(&sync_thread_mutex);
-
 		return(TRUE);
 	}
 
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index d02a0df7..7e10420 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -48,6 +48,8 @@ Created 9/5/1995 Heikki Tuuri
 #include "ha_prototypes.h"
 #include "my_cpu.h"
 
+#include <vector>
+
 /*
 	REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX
 	============================================
@@ -229,12 +231,9 @@ static const ulint SYNC_THREAD_N_LEVELS = 10000;
 
 /** Array for tracking sync levels per thread. */
 struct sync_arr_t {
-	ulint		in_use;		/*!< Number of active cells */
 	ulint		n_elems;	/*!< Number of elements in the array */
-	ulint		max_elems;	/*!< Maximum elements */
-	ulint		next_free;	/*!< ULINT_UNDEFINED or index of next
-					free slot */
-	sync_level_t*	elems;		/*!< Array elements */
+
+	std::vector<sync_level_t>	elems;		/*!< Vector of elements */
 };
 
 /** Mutexes or rw-locks held by a thread */
@@ -1177,10 +1176,9 @@ sync_thread_add_level(
 			SYNC_LEVEL_VARYING, nothing is done */
 	ibool	relock)	/*!< in: TRUE if re-entering an x-lock */
 {
-	ulint		i;
-	sync_level_t*	slot;
 	sync_arr_t*	array;
 	sync_thread_t*	thread_slot;
+	sync_level_t	sync_level;
 
 	if (!sync_order_checks_on) {
 
@@ -1205,21 +1203,11 @@ sync_thread_add_level(
 	thread_slot = sync_thread_level_arrays_find_slot();
 
 	if (thread_slot == NULL) {
-		ulint	sz;
-
-		sz = sizeof(*array)
-		   + (sizeof(*array->elems) * SYNC_THREAD_N_LEVELS);
 
 		/* We have to allocate the level array for a new thread */
-		array = static_cast<sync_arr_t*>(calloc(sz, sizeof(char)));
+		array = static_cast<sync_arr_t*>(calloc(1, sizeof(sync_arr_t)));
 		ut_a(array != NULL);
-
-		array->next_free = ULINT_UNDEFINED;
-		array->max_elems = SYNC_THREAD_N_LEVELS;
-		array->elems = (sync_level_t*) &array[1];
-
 		thread_slot = sync_thread_level_arrays_find_free();
-
 		thread_slot->levels = array;
 		thread_slot->id = os_thread_get_curr_id();
 	}
@@ -1446,26 +1434,11 @@ sync_thread_add_level(
 	}
 
 levels_ok:
-	if (array->next_free == ULINT_UNDEFINED) {
-		ut_a(array->n_elems < array->max_elems);
-
-		i = array->n_elems++;
-	} else {
-		i = array->next_free;
-		array->next_free = array->elems[i].level;
-	}
-
-	ut_a(i < array->n_elems);
-	ut_a(i != ULINT_UNDEFINED);
-
-	++array->in_use;
 
-	slot = &array->elems[i];
-
-	ut_a(slot->latch == NULL);
-
-	slot->latch = latch;
-	slot->level = level;
+	array->n_elems++;
+	sync_level.latch = latch;
+	sync_level.level = level;
+	array->elems.push_back(sync_level);
 
 	mutex_exit(&sync_thread_mutex);
 }
@@ -1483,7 +1456,6 @@ sync_thread_reset_level(
 {
 	sync_arr_t*	array;
 	sync_thread_t*	thread_slot;
-	ulint		i;
 
 	if (!sync_order_checks_on) {
 
@@ -1512,36 +1484,16 @@ sync_thread_reset_level(
 
 	array = thread_slot->levels;
 
-	for (i = 0; i < array->n_elems; i++) {
-		sync_level_t*	slot;
-
-		slot = &array->elems[i];
+	for (std::vector<sync_level_t>::iterator it = array->elems.begin(); it != array->elems.end(); ++it) {
+		sync_level_t level = *it;
 
-		if (slot->latch != latch) {
+		if (level.latch != latch) {
 			continue;
 		}
 
-		slot->latch = NULL;
-
-		/* Update the free slot list. See comment in sync_level_t
-		for the level field. */
-		slot->level = array->next_free;
-		array->next_free = i;
-
-		ut_a(array->in_use >= 1);
-		--array->in_use;
-
-		/* If all cells are idle then reset the free
-		list. The assumption is that this will save
-		time when we need to scan up to n_elems. */
-
-		if (array->in_use == 0) {
-			array->n_elems = 0;
-			array->next_free = ULINT_UNDEFINED;
-		}
-
+		array->elems.erase(it);
+		array->n_elems--;
 		mutex_exit(&sync_thread_mutex);
-
 		return(TRUE);
 	}
 


More information about the commits mailing list