[Commits] 2c91464: Issue #37: Index Condition Pushdown for RocksDB-SE

Sergei Petrunia psergey at askmonty.org
Fri Mar 20 13:57:06 EET 2015


revision-id: 2c914648d01de85787db22ad9f35d39616797892
parent(s): 3e30859863d553286df49aaaff52f3233e56d29f
committer: Sergei Petrunia
branch nick: mysql-5.6-rocksdb-icp
timestamp: 2015-03-20 14:57:06 +0300
message:

Issue #37: Index Condition Pushdown for RocksDB-SE

Summary: A straightforward implementation of Index Condition Pushdown

Test Plan: mtr t/rocksdb_icp.test

Reviewers: maykov, hermanlee4, jonahcohen, yoshinorim, jtolmer

Differential Revision: https://reviews.facebook.net/D35181

---
 mysql-test/r/rocksdb_icp.result   |  112 ++++++++++++++++
 mysql-test/r/rocksdb_range.result |    2 +-
 mysql-test/t/rocksdb_icp.test     |   72 ++++++++++
 storage/rocksdb/ha_rocksdb.cc     |  263 +++++++++++++++++++++++++++++++++----
 storage/rocksdb/ha_rocksdb.h      |    8 ++
 storage/rocksdb/rdb_datadic.cc    |    8 +-
 storage/rocksdb/rdb_datadic.h     |    2 +
 7 files changed, 437 insertions(+), 30 deletions(-)

diff --git a/mysql-test/r/rocksdb_icp.result b/mysql-test/r/rocksdb_icp.result
new file mode 100644
index 0000000..f4399a6
--- /dev/null
+++ b/mysql-test/r/rocksdb_icp.result
@@ -0,0 +1,112 @@
+select * from information_schema.engines where engine = 'rocksdb';
+ENGINE	SUPPORT	COMMENT	TRANSACTIONS	XA	SAVEPOINTS
+ROCKSDB	YES	RocksDB storage engine	YES	YES	NO
+drop table if exists t0,t1,t2,t3;
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int);
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+create table t2 (
+pk int primary key,
+kp1 int,
+kp2 int,
+col1 int,
+key (kp1,kp2)
+) engine=rocksdb;
+insert into t2 select a,a,a,a from t1;
+# Try a basic case:
+explain 
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+pk	kp1	kp2	col1
+2	2	2	2
+4	4	4	4
+6	6	6	6
+8	8	8	8
+10	10	10	10
+# Check that ICP doesnt work for columns where column value 
+# cant be restored from mem-comparable form:
+create table t3 (
+pk int primary key,
+kp1 int,
+kp2 varchar(10) collate utf8_general_ci,
+col1 int,
+key (kp1,kp2)
+) engine=rocksdb;
+insert into t3 select a,a/10,a,a from t1;
+# This must not use ICP:
+explain
+select * from t3 where kp1=3 and kp2 like '%foo%';
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t3	ref	kp1	kp1	5	const	10	Using where
+explain format=json
+select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
+EXPLAIN
+{
+  "query_block": {
+    "select_id": 1,
+    "table": {
+      "table_name": "t3",
+      "access_type": "range",
+      "possible_keys": [
+        "kp1"
+      ],
+      "key": "kp1",
+      "used_key_parts": [
+        "kp1"
+      ],
+      "key_length": "5",
+      "rows": 10,
+      "filtered": 100,
+      "index_condition": "((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0))",
+      "attached_condition": "(`test`.`t3`.`kp2` like '%foo%')"
+    }
+  }
+}
+Warnings:
+Note	1003	/* select#1 */ select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`kp1` AS `kp1`,`test`.`t3`.`kp2` AS `kp2`,`test`.`t3`.`col1` AS `col1` from `test`.`t3` where ((`test`.`t3`.`kp1` between 2 and 4) and ((`test`.`t3`.`kp1` % 3) = 0) and (`test`.`t3`.`kp2` like '%foo%'))
+# Check that we handle the case where out-of-range is encountered sooner
+# than matched index condition
+explain 
+select * from t2 where kp1< 3 and kp2+1>50000;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk	kp1	kp2	col1
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000;
+pk	kp1	kp2	col1
+# Try doing backwards scans
+explain 
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+pk	kp1	kp2	col1
+10	10	10	10
+8	8	8	8
+6	6	6	6
+4	4	4	4
+2	2	2	2
+explain 
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+pk	kp1	kp2	col1
+998	998	998	998
+996	996	996	996
+994	994	994	994
+992	992	992	992
+explain 
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	t2	range	kp1	kp1	5	NULL	10	Using index condition
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+pk	kp1	kp2	col1
+drop table t0,t1,t2,t3;
diff --git a/mysql-test/r/rocksdb_range.result b/mysql-test/r/rocksdb_range.result
index 69dae45..63f052c 100644
--- a/mysql-test/r/rocksdb_range.result
+++ b/mysql-test/r/rocksdb_range.result
@@ -89,7 +89,7 @@ count(*)
 explain
 select * from t2 force index (a) where a=0 and pk>=3;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t2	range	a	a	8	NULL	10	Using where
+1	SIMPLE	t2	range	a	a	8	NULL	10	Using index condition
 select * from t2 force index (a) where a=0 and pk>=3;
 pk	a	b
 3	0	3
diff --git a/mysql-test/t/rocksdb_icp.test b/mysql-test/t/rocksdb_icp.test
new file mode 100644
index 0000000..37162b2
--- /dev/null
+++ b/mysql-test/t/rocksdb_icp.test
@@ -0,0 +1,72 @@
+#
+# Testing Index Condition Pushdown for MyRocks
+#
+select * from information_schema.engines where engine = 'rocksdb';
+
+--disable_warnings
+drop table if exists t0,t1,t2,t3;
+--enable_warnings
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1(a int);
+insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C;
+
+create table t2 (
+  pk int primary key,
+  kp1 int,
+  kp2 int,
+  col1 int,
+  key (kp1,kp2)
+) engine=rocksdb;
+
+insert into t2 select a,a,a,a from t1;
+
+--echo # Try a basic case:
+explain 
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0;
+
+--echo # Check that ICP doesnt work for columns where column value 
+--echo # cant be restored from mem-comparable form:
+
+create table t3 (
+  pk int primary key,
+  kp1 int,
+  kp2 varchar(10) collate utf8_general_ci,
+  col1 int,
+  key (kp1,kp2)
+) engine=rocksdb;
+
+insert into t3 select a,a/10,a,a from t1;
+--echo # This must not use ICP:
+explain
+select * from t3 where kp1=3 and kp2 like '%foo%';
+
+explain format=json
+select * from t3 where kp1 between 2 and 4 and mod(kp1,3)=0 and kp2 like '%foo%';
+
+--echo # Check that we handle the case where out-of-range is encountered sooner
+--echo # than matched index condition
+explain 
+select * from t2 where kp1< 3 and kp2+1>50000;
+select * from t2 where kp1< 3 and kp2+1>50000;
+
+explain
+select * from t2 where kp1< 3 and kp2+1>50000;
+select * from t2 where kp1< 3 and kp2+1>50000;
+
+--echo # Try doing backwards scans
+explain 
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+select * from t2 where kp1 between 1 and 10 and mod(kp2,2)=0 order by kp1 desc;
+
+explain 
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+select * from t2 where kp1 >990 and mod(kp2,2)=0 order by kp1 desc;
+
+explain 
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+select * from t2 where kp1< 3 and kp2+1>50000 order by kp1 desc;
+
+drop table t0,t1,t2,t3;
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 01ca393..7cf3b76 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -2560,13 +2560,24 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const
   if (check_keyread_allowed(inx, part, all_parts))
     base_flags |= HA_KEYREAD_ONLY;
 
-  /* 
-    Index-only reads on primary key are the same as table scan for us. Still,
-    we need to explicitly "allow" them, otherwise SQL layer will miss some
-    plans.
-  */
   if (inx == table_share->primary_key)
+  {
+    /*
+      Index-only reads on primary key are the same as table scan for us. Still,
+      we need to explicitly "allow" them, otherwise SQL layer will miss some
+      plans.
+    */
     base_flags |= HA_KEYREAD_ONLY;
+  }
+  else
+  {
+    /*
+      We can Index Condition Pushdown any key except the primary. With primary
+      key, we get (pk, record) pair immediately, there is no place to put the
+      ICP check.
+    */
+    base_flags |= HA_DO_INDEX_COND_PUSHDOWN;
+  }
 
   return base_flags;
 }
@@ -2574,6 +2585,10 @@ ulong ha_rocksdb::index_flags(uint inx, uint part, bool all_parts) const
 
 /*
   Read next index tuple through the secondary index.
+
+  @detail
+    scan_it points at the index key-value pair that we should read the (pk,row)
+    pair for.
 */
 
 int ha_rocksdb::secondary_index_read(int keyno, uchar *buf)
@@ -2701,7 +2716,6 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
   setup_index_scan(kd, &slice, use_all_keys, is_ascending(kd, find_flag));
   bool move_forward= true;
 
-
   switch (find_flag) {
   case HA_READ_KEY_EXACT:
   {
@@ -2835,6 +2849,7 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
   }
   case HA_READ_KEY_OR_PREV:
   {
+    move_forward= false;
     if (!scan_it->Valid())
     {
       /*
@@ -2863,6 +2878,7 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
   case HA_READ_PREFIX_LAST:
   case HA_READ_PREFIX_LAST_OR_PREV:
   {
+    move_forward= false;
     /* 
       Given a prefix of (VAL1,VAL2), get the last record that has
       (kp1,kp2)=(VAL1,VAL2).  This cannot be translated directly to RocksDB
@@ -2923,11 +2939,10 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
   }
 
   skip_scan_it_next_call= FALSE;
-  uint pk_size;
   
   if (active_index == table->s->primary_key)
   {
-    pk_size= rkey.size();
+    uint pk_size= rkey.size();
     memcpy(pk_packed_tuple, rkey.data(), pk_size);
     last_rowkey.copy(rkey.data(), pk_size, &my_charset_bin);
     if (lock_rows)
@@ -2945,26 +2960,38 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
   }
   else
   {
-    pk_size= kd->get_primary_key_tuple(pk_descr, &rkey, 
-                                       (char*) pk_packed_tuple);
-    if (pk_size != (uint)-1)
+    uint pk_size;
+    if (keyread_only && !lock_rows)
     {
-      last_rowkey.copy((const char*)pk_packed_tuple, pk_size, &my_charset_bin);
-
-      if (keyread_only && !lock_rows)
+      /* Get the key columns and primary key value */
+      pk_size= kd->get_primary_key_tuple(pk_descr, &rkey,
+                                         (char*)pk_packed_tuple);
+      rocksdb::Slice value= scan_it->value();
+      if (pk_size == INVALID_LEN || 
+          kd->unpack_record(table, buf, &rkey, &value))
       {
-        /* Get the key columns and primary key value */
-        rocksdb::Slice value= scan_it->value();
-        if (kd->unpack_record(table, buf, &rkey, &value))
-          rc= HA_ERR_INTERNAL_ERROR;
-        else
-          rc= 0;
+        rc= HA_ERR_INTERNAL_ERROR;
       }
       else
-        rc= get_row_by_rowid(buf, (const char*)pk_packed_tuple, pk_size);
+        rc= 0;
     }
     else
-      rc= HA_ERR_INTERNAL_ERROR;
+    {
+      rc= find_icp_matching_index_rec(move_forward, buf);
+      if (!rc)
+      {
+        rocksdb::Slice cur_key= scan_it->key();
+        pk_size= kd->get_primary_key_tuple(pk_descr, &cur_key,
+                                           (char*) pk_packed_tuple);
+        if (pk_size == INVALID_LEN)
+          rc= HA_ERR_INTERNAL_ERROR;
+        else
+          rc= get_row_by_rowid(buf, (const char*)pk_packed_tuple, pk_size);
+      }
+    }
+
+    if (!rc)
+      last_rowkey.copy((const char*)pk_packed_tuple, pk_size, &my_charset_bin);
   }
 
   if (rc)
@@ -2983,6 +3010,86 @@ int ha_rocksdb::index_read_map(uchar *buf, const uchar *key,
 }
 
 
+/*
+  @brief
+  Scan the secondary index until we find an index record that satisfies ICP
+  
+  @param move_forward   TRUE  <=> move scan_it forward
+                        FALSE <=> move scan_it backward
+  @param buf            Record buffer (must be the same buffer that
+                        pushed index condition points to, in practice 
+                        it is table->record[0])
+
+  @detail
+  Move the current iterator scan_it until we get an index tuple that 
+  satisfies the pushed Index Condition.
+  (if there is no pushed index condition, return right away)
+
+  @return
+    0     - Index tuple satisfies ICP, can do index read.
+    other - error code
+*/
+
+int ha_rocksdb::find_icp_matching_index_rec(bool move_forward, uchar *buf)
+{
+  if (pushed_idx_cond && pushed_idx_cond_keyno == active_index)
+  {
+    RDBSE_KEYDEF *kd= key_descr[active_index];
+    while (1)
+    {
+      if (!scan_it->Valid())
+      {
+        table->status= STATUS_NOT_FOUND;
+        return HA_ERR_END_OF_FILE;
+      }
+      rocksdb::Slice rkey= scan_it->key();
+
+      if (!kd->covers_key(rkey.data(), rkey.size()))
+      {
+        table->status= STATUS_NOT_FOUND;
+        return HA_ERR_END_OF_FILE;
+      }
+
+      rocksdb::Slice value= scan_it->value();
+      if (kd->unpack_record(table, buf, &rkey, &value))
+      {
+        return HA_ERR_INTERNAL_ERROR;
+      }
+
+      enum icp_result icp_status= check_index_cond();
+      if (icp_status == ICP_NO_MATCH)
+      {
+        if (move_forward)
+          scan_it->Next();
+        else
+          scan_it->Prev();
+        continue; /* Get the next (or prev) index tuple */
+      }
+      else if (icp_status == ICP_OUT_OF_RANGE)
+      {
+        /* We have walked out of range we are scanning */
+        table->status= STATUS_NOT_FOUND;
+        return HA_ERR_END_OF_FILE;
+      }
+      else /* icp_status == ICP_MATCH */
+      {
+        /* Index Condition is satisfied. We have rc==0, proceed to fetch the row. */
+        break;
+      }
+      /*
+        TODO: should we have this here, or RockDB handles this internally? 
+        if (thd_killed(current_thd))
+        {
+          rc= HA_ERR_INTERNAL_ERROR; // doesn't matter
+          break;
+        }
+      */
+    }
+  }
+  return 0;
+}
+
+
 int ha_rocksdb::index_read_last_map(uchar * buf, const uchar * key, 
                                     key_part_map keypart_map)
 {
@@ -3167,6 +3274,14 @@ void dbug_dump_database(rocksdb::DB *db)
 }
 
 
+/*
+  Given a rowid (i.e. packed PK) as a parameter, get the record.
+
+  @return
+    0      - Ok
+    other  - HA_ERR error code.
+*/
+
 int ha_rocksdb::get_row_by_rowid(uchar *buf, const char *rowid, uint rowid_size)
 {
   int rc;
@@ -3253,8 +3368,9 @@ int ha_rocksdb::index_next_with_direction(uchar *buf, bool move_forward)
       else
         scan_it->Prev();
     }
-
-    rc= secondary_index_read(active_index, buf);
+    rc= find_icp_matching_index_rec(move_forward, buf);
+    if (!rc)
+      rc= secondary_index_read(active_index, buf);
   }
 
   DBUG_RETURN(rc);
@@ -3420,7 +3536,11 @@ int ha_rocksdb::index_last_intern(uchar *buf)
     rc= rnd_next_with_direction(buf, false);
   }
   else
-    rc= secondary_index_read(active_index, buf);
+  {
+    rc= find_icp_matching_index_rec(false /*move_forward*/, buf);
+    if (!rc)
+      rc= secondary_index_read(active_index, buf);
+  }
 
   if (!rc)
   {
@@ -3759,6 +3879,7 @@ int ha_rocksdb::index_end()
   }
 
   active_index= MAX_KEY; 
+  in_range_check_pushed_down= FALSE;
   return 0;
 }
 
@@ -4545,6 +4666,98 @@ void ha_rocksdb::get_auto_increment(ulonglong offset, ulonglong increment,
   mysql_mutex_unlock(&tbl_def->mutex);
 }
 
+
+#ifndef DBUG_OFF
+
+/* Debugger help function */
+static char dbug_item_print_buf[512];
+
+const char *dbug_print_item(Item *item)
+{
+  char *buf= dbug_item_print_buf;
+  String str(buf, sizeof(dbug_item_print_buf), &my_charset_bin);
+  str.length(0);
+  if (!item)
+    return "(Item*)NULL";
+  item->print(&str ,QT_ORDINARY);
+  if (str.c_ptr() == buf)
+    return buf;
+  else
+    return "Couldn't fit into buffer";
+}
+
+#endif /*DBUG_OFF*/
+
+/*
+  SQL layer calls this function to push an index condition.
+
+  @detail
+    The condition is for index keyno (only one condition can be pushed at a
+    time).
+    The caller guarantees that condition refers only to index fields; besides
+    that, fields must have 
+
+      $field->part_of_key.set_bit(keyno)
+
+    which means that
+
+       (handler->index_flags(keyno, $keypart, 0) & HA_KEYREAD_ONLY) == 1
+
+    which means that field value can be restored from the index tuple.
+
+  @return
+    Part of condition we couldn't check (always NULL).
+*/
+
+class Item* ha_rocksdb::idx_cond_push(uint keyno, class Item* idx_cond)
+{
+  DBUG_ASSERT(keyno != MAX_KEY);
+  DBUG_ASSERT(idx_cond != NULL);
+  
+  pushed_idx_cond = idx_cond;
+  pushed_idx_cond_keyno = keyno;
+  in_range_check_pushed_down = TRUE;
+
+  /* We will check the whole condition */
+  return NULL;
+}
+
+
+/*
+  @brief
+  Check the index condition.
+  
+  @detail
+  Check the index condition. (The caller has unpacked all needed index 
+  columns into table->record[0])
+
+  @return 
+    ICP_NO_MATCH - Condition not satisfied (caller should continue
+                   scanning)
+    OUT_OF_RANGE - We've left the range we're scanning (caller should
+                   stop scanning and return HA_ERR_END_OF_FILE)
+
+    ICP_MATCH    - Condition is satisfied (caller should fetch the record
+                   and return it)
+*/
+
+enum icp_result ha_rocksdb::check_index_cond()
+{
+  DBUG_ASSERT(pushed_idx_cond);
+  DBUG_ASSERT(pushed_idx_cond_keyno != MAX_KEY);
+
+  if (end_range && compare_key_icp(end_range) > 0)
+  {
+    /* caller should return HA_ERR_END_OF_FILE already */
+    return ICP_OUT_OF_RANGE;
+  }
+
+  return pushed_idx_cond->val_int() ? ICP_MATCH : ICP_NO_MATCH;
+}
+
+
+/////////////////////////////////////////////////////////////////////////
+
 /**
   Checking if an index is used for ascending scan or not
 
diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h
index 9544efb..5e6599c 100644
--- a/storage/rocksdb/ha_rocksdb.h
+++ b/storage/rocksdb/ha_rocksdb.h
@@ -388,10 +388,18 @@ public:
 
   int index_first(uchar *buf);
   int index_last(uchar *buf);
+
+  class Item* idx_cond_push(uint keyno, class Item* idx_cond);
+  /*
+    Default implementation from cancel_pushed_idx_cond() suits us
+  */
 private:
   int index_first_intern(uchar *buf);
   int index_last_intern(uchar *buf);
 
+  enum icp_result check_index_cond();
+  int find_icp_matching_index_rec(bool move_forward, uchar *buf);
+
   void calc_updated_indexes();
 public:
   int index_end();
diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc
index a2fba19..5ddaaf8 100644
--- a/storage/rocksdb/rdb_datadic.cc
+++ b/storage/rocksdb/rdb_datadic.cc
@@ -197,7 +197,7 @@ uint RDBSE_KEYDEF::get_primary_key_tuple(RDBSE_KEYDEF *pk_descr,
   
   // Skip the index number
   if ((!reader.read(INDEX_NUMBER_SIZE)))
-    return (uint)-1;
+    return INVALID_LEN;
 
   for (i= 0; i < m_key_parts; i++)
   {
@@ -212,7 +212,7 @@ uint RDBSE_KEYDEF::get_primary_key_tuple(RDBSE_KEYDEF *pk_descr,
     {
       const char* nullp;
       if (!(nullp= reader.read(1)))
-        return (uint)-1;
+        return INVALID_LEN;
       if (*nullp == 0)
       {
         /* This is a NULL value */
@@ -222,14 +222,14 @@ uint RDBSE_KEYDEF::get_primary_key_tuple(RDBSE_KEYDEF *pk_descr,
       {
         /* If NULL marker is not '0', it can be only '1'  */
         if (*nullp != 1)
-          return (uint)-1;
+          return INVALID_LEN;
       }
     }
     
     if (have_value)
     {
       if (pack_info[i].skip_func(&pack_info[i], &reader))
-        return (uint)-1;
+        return INVALID_LEN;
     }
 
     if (pk_key_part != -1)
diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h
index c5c4016..3a4f7ce 100644
--- a/storage/rocksdb/rdb_datadic.h
+++ b/storage/rocksdb/rdb_datadic.h
@@ -115,6 +115,8 @@ public:
 };
 
 
+const uint INVALID_LEN= uint(-1);
+
 /*
   An object of this class represents information about an index in an SQL 
   table. It provides services to encode and decode index tuples.


More information about the commits mailing list