[Commits] 89489f3e8fe: Adding support for the Vertical Partition Engine

jacob.mathew at mariadb.com jacob.mathew at mariadb.com
Tue Aug 8 17:00:22 EEST 2017


revision-id: 89489f3e8fe0f6ab39248471f799c1a4fe3c7e52 (mariadb-10.2.3-86-g89489f3e8fe)
parent(s): 5811bbd841736a2e5dae57de17c97ceb4ac646da
author: Jacob Mathew
committer: Jacob Mathew
timestamp: 2017-08-03 16:58:02 -0700
message:

Adding support for the Vertical Partition Engine

Contains Spiral patches:
- Spiral Patch 021: 021_mariadb-10.2.0.merge_table.diff MDEV-7719
  - Changes for identifying MyISAM Merge child tables that can be merged.
  - This patch has the following differences compared to the original patch:
    - Changed bit positions for handlerton flags to eliminate conflicts
      with flags merged from MySQL.
- Spiral Patch 048: 048_mariadb-10.2.0.vp_partition.diff MDEV-7744
  - Check and set the partition bitmap.
- Spiral Patch 054: 054_mariadb-10.2.0.for_vp_pruning.diff MDEV-7750
  - Support for vertical partition pruning.
- Spiral Patch 055: 055_mariadb-10.2.0.for_vp_same_columns.diff MDEV-13000
  - Support for MERGE tables in the vertical partition engine.
- Spiral Patch 056: 056_mariadb-10.2.0.partition_top_table_fields.diff
                    MDEV-12970
  - Push down the top table and fields to each partition.
- Spiral Patch 060: 060_mariadb-10.2.0.partition_reset_top_table_fields.diff
                    MDEV-12971
  - Completion of functionality to push down the top table and fields
    to each partition.

---
 include/my_base.h                 |   1 +
 sql/ha_partition.cc               | 226 +++++++++++++++++++++++++++++++++++++-
 sql/ha_partition.h                |   6 +
 sql/handler.cc                    |   4 +-
 sql/handler.h                     |  10 +-
 sql/sql_admin.cc                  |   2 +-
 sql/sql_base.cc                   |  14 ++-
 storage/myisammrg/ha_myisammrg.cc |   2 +-
 8 files changed, 253 insertions(+), 12 deletions(-)

diff --git a/include/my_base.h b/include/my_base.h
index 78d54b4e3f1..30b7a25936f 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -196,6 +196,7 @@ enum ha_extra_function {
   */
   HA_EXTRA_ADD_CHILDREN_LIST,
   HA_EXTRA_ATTACH_CHILDREN,
+  HA_EXTRA_INIT_AFTER_ATTACH_CHILDREN,
   HA_EXTRA_IS_ATTACHED_CHILDREN,
   HA_EXTRA_DETACH_CHILDREN,
   HA_EXTRA_DETACH_CHILD,
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index eef625a5de0..80cc2e2b236 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -4259,6 +4259,7 @@ int ha_partition::pre_write_row(uchar * buf)
   sql_mode_t saved_sql_mode= thd->variables.sql_mode;
   bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
   DBUG_ENTER("ha_partition::pre_write_row");
+  DBUG_PRINT("info", ("partition this=%p", this));
   DBUG_ASSERT(buf == m_rec0);
 
   /*
@@ -4313,6 +4314,7 @@ int ha_partition::pre_write_row(uchar * buf)
   }
   m_last_part= part_id;
   DBUG_PRINT("info", ("Insert in partition %d", part_id));
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   start_part_bulk_insert(thd, part_id);
 
   tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
@@ -4377,6 +4379,7 @@ int ha_partition::write_row(uchar * buf)
   sql_mode_t saved_sql_mode= thd->variables.sql_mode;
   bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
   DBUG_ENTER("ha_partition::write_row");
+  DBUG_PRINT("info", ("partition this=%p", this));
   DBUG_ASSERT(buf == m_rec0);
 
   /*
@@ -4445,6 +4448,7 @@ int ha_partition::write_row(uchar * buf)
   }
   m_last_part= part_id;
   DBUG_PRINT("info", ("Insert in partition %d", part_id));
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   start_part_bulk_insert(thd, part_id);
 
   tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
@@ -4490,6 +4494,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
   int error= 0;
   longlong func_value;
   DBUG_ENTER("ha_partition::update_row");
+  DBUG_PRINT("info", ("partition this=%p", this));
   m_err_rec= NULL;
 
   // Need to read partition-related columns, to locate the row's partition:
@@ -4528,11 +4533,14 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
   */
   if (old_part_id != m_last_part)
   {
+    DBUG_PRINT("info", ("partition old_part_id=%d", old_part_id));
+    DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
     m_err_rec= old_data;
     DBUG_RETURN(HA_ERR_ROW_IN_WRONG_PARTITION);
   }
 
   m_last_part= new_part_id;
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   start_part_bulk_insert(thd, new_part_id);
   if (new_part_id == old_part_id)
   {
@@ -4635,6 +4643,7 @@ int ha_partition::delete_row(const uchar *buf)
   int error;
   THD *thd= ha_thd();
   DBUG_ENTER("ha_partition::delete_row");
+  DBUG_PRINT("info", ("partition this=%p", this));
   m_err_rec= NULL;
 
   DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set,
@@ -4676,6 +4685,7 @@ int ha_partition::delete_row(const uchar *buf)
   }
 
   m_last_part= part_id;
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   tmp_disable_binlog(thd);
   error= m_file[part_id]->ha_delete_row(buf);
   reenable_binlog(thd);
@@ -5015,6 +5025,7 @@ int ha_partition::pre_rnd_init(bool scan)
   uint i= 0;
   uint32 part_id;
   DBUG_ENTER("ha_partition::pre_rnd_init");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   /*
     For operations that may need to change data, we may need to extend
@@ -5031,7 +5042,10 @@ int ha_partition::pre_rnd_init(bool scan)
     */
     if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
                               table->write_set))
+    {
+      DBUG_PRINT("info", ("partition set full bitmap"));
       bitmap_set_all(table->read_set);
+    }
     else
     {
       /*
@@ -5040,6 +5054,7 @@ int ha_partition::pre_rnd_init(bool scan)
         fields of the partition functions are read such that we can
         calculate the partition id to place updated and deleted records.
       */
+      DBUG_PRINT("info", ("partition set part_field bitmap"));
       bitmap_union(table->read_set, &m_part_info->full_part_field_set);
     }
   }
@@ -5163,7 +5178,10 @@ int ha_partition::rnd_init(bool scan)
     */
     if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
                               table->write_set))
+    {
+      DBUG_PRINT("info", ("partition set full bitmap"));
       bitmap_set_all(table->read_set);
+    }
     else
     {
       /*
@@ -5172,6 +5190,7 @@ int ha_partition::rnd_init(bool scan)
         fields of the partition functions are read such that we can
         calculate the partition id to place updated and deleted records.
       */
+      DBUG_PRINT("info", ("partition set part_field bitmap"));
       bitmap_union(table->read_set, &m_part_info->full_part_field_set);
     }
   }
@@ -5344,6 +5363,7 @@ int ha_partition::rnd_next(uchar *buf)
   int result= HA_ERR_END_OF_FILE, error;
   uint part_id= m_part_spec.start_part;
   DBUG_ENTER("ha_partition::rnd_next");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   /* upper level will increment this once again at end of call */
   decrement_statistics(&SSV::ha_read_rnd_next_count);
@@ -5378,6 +5398,7 @@ int ha_partition::rnd_next(uchar *buf)
     if (!result)
     {
       m_last_part= part_id;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       m_part_spec.start_part= part_id;
       table->status= 0;
       DBUG_RETURN(0);
@@ -5402,6 +5423,7 @@ int ha_partition::rnd_next(uchar *buf)
       break;
     }
     m_last_part= part_id;
+    DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
     m_part_spec.start_part= part_id;
     file= m_file[part_id];
     late_extra_cache(part_id);
@@ -5483,6 +5505,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos)
   uint part_id;
   handler *file;
   DBUG_ENTER("ha_partition::rnd_pos");
+  DBUG_PRINT("info", ("partition this=%p", this));
   decrement_statistics(&SSV::ha_read_rnd_count);
 
   part_id= uint2korr((const uchar *) pos);
@@ -5490,6 +5513,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos)
   file= m_file[part_id];
   DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id));
   m_last_part= part_id;
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   DBUG_RETURN(file->ha_rnd_pos(buf, (pos + PARTITION_BYTES_IN_POS)));
 }
 
@@ -5745,6 +5769,7 @@ int ha_partition::index_init(uint inx, bool sorted)
   int error= 0;
   uint i;
   DBUG_ENTER("ha_partition::index_init");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   DBUG_PRINT("info", ("inx %u sorted %u", inx, sorted));
   active_index= inx;
@@ -5781,7 +5806,19 @@ int ha_partition::index_init(uint inx, bool sorted)
     But this is required for operations that may need to change data only.
   */
   if (get_lock_type() == F_WRLCK)
-    bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+  {
+    if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+                              table->write_set))
+    {
+      DBUG_PRINT("info", ("partition set full bitmap"));
+      bitmap_set_all(table->read_set);
+    }
+    else
+    {
+      DBUG_PRINT("info", ("partition set part_field bitmap"));
+      bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+    }
+  }
   if (sorted)
   {
     /*
@@ -6116,6 +6153,7 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
                                  enum ha_rkey_function find_flag)
 {
   DBUG_ENTER("ha_partition::index_read_map");
+  DBUG_PRINT("info", ("partition this=%p", this));
   decrement_statistics(&SSV::ha_read_key_count);
   end_range= 0;
   m_index_scan_type= partition_index_read;
@@ -6383,6 +6421,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
 {
   int error= HA_ERR_KEY_NOT_FOUND;
   DBUG_ENTER("ha_partition::index_read_idx_map");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   if (find_flag == HA_READ_KEY_EXACT)
   {
@@ -6417,7 +6456,10 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
         break;
     }
     if (part <= m_part_spec.end_part)
+    {
       m_last_part= part;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
+    }
   }
   else
   {
@@ -7277,7 +7319,10 @@ int ha_partition::ft_init()
     */
     if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
                               table->write_set))
+    {
+      DBUG_PRINT("info", ("partition set full bitmap"));
       bitmap_set_all(table->read_set);
+    }
     else
     {
       /*
@@ -7286,6 +7331,7 @@ int ha_partition::ft_init()
         fields of the partition functions are read such that we can
         calculate the partition id to place updated and deleted records.
       */
+      DBUG_PRINT("info", ("partition set part_field bitmap"));
       bitmap_union(table->read_set, &m_part_info->full_part_field_set);
     }
   }
@@ -7366,7 +7412,10 @@ int ha_partition::pre_ft_init()
     */
     if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
                               table->write_set))
+    {
+      DBUG_PRINT("info", ("partition set full bitmap"));
       bitmap_set_all(table->read_set);
+    }
     else
     {
       /*
@@ -7375,6 +7424,7 @@ int ha_partition::pre_ft_init()
         fields of the partition functions are read such that we can
         calculate the partition id to place updated and deleted records.
       */
+      DBUG_PRINT("info", ("partition set part_field bitmap"));
       bitmap_union(table->read_set, &m_part_info->full_part_field_set);
     }
   }
@@ -7653,6 +7703,7 @@ int ha_partition::ft_read(uchar *buf)
     if (!result)
     {
       m_last_part= part_id;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       m_part_spec.start_part= part_id;
       table->status= 0;
       DBUG_RETURN(0);
@@ -7681,6 +7732,7 @@ int ha_partition::ft_read(uchar *buf)
       break;
     }
     m_last_part= part_id;
+    DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
     m_part_spec.start_part= part_id;
     file= m_file[part_id];
     DBUG_PRINT("info", ("ft_init on partition %d", part_id));
@@ -8004,6 +8056,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
   handler *file;
   int error;
   DBUG_ENTER("ha_partition::handle_unordered_next");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   if (m_part_spec.start_part >= m_tot_parts)
   {
@@ -8024,6 +8077,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
           multi_range_read_next(&m_range_info[m_part_spec.start_part])))
     {
       m_last_part= m_part_spec.start_part;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       DBUG_RETURN(0);
     }
   }
@@ -8032,6 +8086,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
     if (!(error= file->read_range_next()))
     {
       m_last_part= m_part_spec.start_part;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       DBUG_RETURN(0);
     }
   }
@@ -8041,6 +8096,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
                                           m_start_key.length)))
     {
       m_last_part= m_part_spec.start_part;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       DBUG_RETURN(0);
     }
   }
@@ -8049,6 +8105,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
     if (!(error= file->ha_index_next(buf)))
     {
       m_last_part= m_part_spec.start_part;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       DBUG_RETURN(0);                           // Row was in range
     }
   }
@@ -8084,6 +8141,7 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
   uint i= m_part_spec.start_part;
   int saved_error= HA_ERR_END_OF_FILE;
   DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   /* Read next partition that includes start_part */
   if (i)
@@ -8128,6 +8186,7 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
     if (!error)
     {
       m_last_part= i;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
       DBUG_RETURN(0);
     }
     if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
@@ -8184,6 +8243,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
   uchar *part_rec_buf_ptr= m_ordered_rec_buffer;
   int saved_error= HA_ERR_END_OF_FILE;
   DBUG_ENTER("ha_partition::handle_ordered_index_scan");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   if (!m_using_extended_keys &&
       (error= loop_extra(HA_EXTRA_STARTING_ORDERED_INDEX_SCAN)))
@@ -8400,12 +8460,66 @@ void ha_partition::return_top_record(uchar *buf)
   uint part_id;
   uchar *key_buffer= queue_top(&m_queue);
   uchar *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+  DBUG_ENTER("ha_partition::return_top_record");
+  DBUG_PRINT("info", ("partition this=%p", this));
 
   part_id= uint2korr(key_buffer);
   memcpy(buf, rec_buffer, m_rec_length);
   m_last_part= part_id;
+  DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
   m_top_entry= part_id;
   table->status= 0;                             // Found an existing row
+  DBUG_VOID_RETURN;
+}
+
+
+/*
+  Return the array of all fields used in partition
+  and subpartition expressions.
+
+  SYNOPSIS
+    get_full_part_fields()
+
+  RETURN VALUE
+    Array of all fields used in partition and subpartition expressions
+*/
+
+Field **ha_partition::get_full_part_fields()
+{
+  DBUG_ENTER("ha_partition::get_full_part_fields");
+  DBUG_RETURN(m_part_info->full_part_field_array);
+}
+
+
+
+/*
+  Calculate the partition id for a column value and set its bit in the
+  read partition bitmap.
+
+  SYNOPSIS
+    choose_partition_from_column_value()
+    in:buf                        Column value
+
+  RETURN VALUE
+    >0                            Error code
+    0                             Success
+*/
+
+int ha_partition::choose_partition_from_column_value(uchar *buf)
+{
+  int error;
+  uint32 part_id;
+  DBUG_ENTER("ha_partition::choose_partition_from_column_value");
+  DBUG_PRINT("info", ("partition buf=%p", buf));
+  DBUG_PRINT("info", ("partition m_rec0=%p", m_rec0));
+
+  if ((error = get_part_for_delete(buf, m_rec0, m_part_info, &part_id)))
+    DBUG_RETURN(error);
+
+  DBUG_PRINT("info", ("partition choose patition=%u", part_id));
+  bitmap_clear_all(&(m_part_info->read_partitions));
+  bitmap_set_bit(&(m_part_info->read_partitions), part_id);
+  DBUG_RETURN(0);
 }
 
 
@@ -8846,11 +8960,17 @@ int ha_partition::info(uint flag)
     bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0);
     DBUG_PRINT("info", ("HA_STATUS_AUTO"));
     if (!table->found_next_number_field)
+    {
       stats.auto_increment_value= 0;
+      DBUG_PRINT("info", ("HA_STATUS_AUTO 1 stats.auto_increment_value=%llu",
+                 stats.auto_increment_value));
+    }
     else if (part_share->auto_inc_initialized)
     {
       lock_auto_increment();
       stats.auto_increment_value= part_share->next_auto_inc_val;
+      DBUG_PRINT("info", ("HA_STATUS_AUTO 2 stats.auto_increment_value=%llu",
+                 stats.auto_increment_value));
       unlock_auto_increment();
     }
     else
@@ -8858,7 +8978,11 @@ int ha_partition::info(uint flag)
       lock_auto_increment();
       /* to avoid two concurrent initializations, check again when locked */
       if (part_share->auto_inc_initialized)
+      {
         stats.auto_increment_value= part_share->next_auto_inc_val;
+        DBUG_PRINT("info", ("HA_STATUS_AUTO 3 stats.auto_increment_value=%llu",
+                   stats.auto_increment_value));
+      }
       else
       {
         /*
@@ -8882,6 +9006,8 @@ int ha_partition::info(uint flag)
 
         DBUG_ASSERT(auto_increment_value);
         stats.auto_increment_value= auto_increment_value;
+        DBUG_PRINT("info", ("HA_STATUS_AUTO 4 stats.auto_increment_value=%llu",
+                   stats.auto_increment_value));
         if (auto_inc_is_first_in_idx)
         {
           set_if_bigger(part_share->next_auto_inc_val,
@@ -9548,11 +9674,15 @@ int ha_partition::extra(enum ha_extra_function operation)
     cached_table_flags |= additional_table_flags;
     break;
   }
+  case HA_EXTRA_INIT_AFTER_ATTACH_CHILDREN:
+    m_rec0 = table->record[0];
+    DBUG_RETURN(loop_extra(operation));
   case HA_EXTRA_IS_ATTACHED_CHILDREN:
     DBUG_RETURN(loop_extra(operation));
   case HA_EXTRA_DETACH_CHILDREN:
     cached_table_flags &= ~((ulonglong)
                             (HA_HAS_RECORDS | HA_CAN_BULK_ACCESS));
+    m_rec0 = table->record[0];
     DBUG_RETURN(loop_extra(operation));
   case HA_EXTRA_MARK_AS_LOG_TABLE:
   /*
@@ -10388,6 +10518,8 @@ void ha_partition::print_error(int error, myf errflag)
     {
       DBUG_ASSERT(0);
       m_last_part= 0;
+      DBUG_PRINT("info", ("partition m_last_part=%d", m_last_part));
+      DBUG_PRINT("info", ("partition m_last_part=%p", &m_last_part));
     }
     m_file[m_last_part]->print_error(error, errflag);
   }
@@ -11496,6 +11628,42 @@ void ha_partition::cond_pop()
 
 
 /**
+  Check and set the partition bitmap for partitions involved
+  in an update operation.
+
+  SYNOPSIS
+    check_and_set_bitmap_for_update()
+    rnd                Is random access
+
+  RETURN VALUE
+    NONE
+*/
+
+void ha_partition::check_and_set_bitmap_for_update(bool rnd)
+{
+  handler **file;
+  DBUG_ENTER("ha_partition::check_and_set_bitmap_for_update");
+  DBUG_PRINT("info", ("partition this=%p", this));
+
+  for (file= m_file; *file; file++)
+    (*file)->check_and_set_bitmap_for_update(rnd);
+
+  if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+                            table->write_set))
+  {
+    DBUG_PRINT("info", ("partition set full bitmap"));
+    bitmap_set_all(table->read_set);
+  }
+  else
+  {
+    DBUG_PRINT("info", ("partition set part_field bitmap"));
+    bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+  }
+  DBUG_VOID_RETURN;
+}
+
+
+/**
   Execute a bulk access request
 
   SYNOPSIS
@@ -11523,12 +11691,68 @@ void ha_partition::bulk_req_exec()
 }
 
 
+/**
+  Push down the top table and fields to each partition
+
+  SYNOPSIS
+    set_top_table_and_fields()
+    top_table                 Top table
+    top_table_field           Array of top table fields
+    top_table_fields          Number of top table fields
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+*/
+
+int ha_partition::set_top_table_and_fields(TABLE *top_table,
+                                           Field **top_table_field,
+                                           uint top_table_fields)
+{
+  int error;
+  handler **file, **file_err;
+  DBUG_ENTER("ha_partition::set_top_table_and_fields");
+
+  if (!set_top_table_fields)
+  {
+    for (file= m_file; *file; file++)
+    {
+      if ((error = (*file)->set_top_table_and_fields(top_table, top_table_field, top_table_fields)))
+        goto err;
+    }
+    this->top_table = top_table;
+    this->top_table_field = top_table_field;
+    this->top_table_fields = top_table_fields;
+    set_top_table_fields = TRUE;
+  }
+  DBUG_RETURN(0);
+
+err:
+  for (file_err= m_file; file_err < file; file_err++)
+    (*file_err)->clear_top_table_fields();
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Push down the clearing of the top table and fields to each partition
+
+  SYNOPSIS
+    clear_top_table_fields()
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::clear_top_table_fields()
 {
   handler **file;
   if (set_top_table_fields)
   {
     set_top_table_fields = FALSE;
+    top_table = NULL;
+    top_table_field = NULL;
+    top_table_fields = 0;
     for (file= m_file; *file; file++)
       (*file)->clear_top_table_fields();
   }
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c00b86b008a..f4c52985af0 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -353,6 +353,8 @@ class ha_partition :public handler
      m_part_info= part_info;
      m_is_sub_partitioned= part_info->is_sub_partitioned();
   }
+  virtual Field **get_full_part_fields();
+  virtual int choose_partition_from_column_value(uchar *buf);
   /*
     -------------------------------------------------------------------------
     MODULE create/delete handler object
@@ -1339,6 +1341,9 @@ class ha_partition :public handler
   */
     virtual const COND *cond_push(const COND *cond);
     virtual void cond_pop();
+    virtual int set_top_table_and_fields(TABLE *top_table,
+                                         Field **top_table_field,
+                                         uint top_table_fields);
     virtual void clear_top_table_fields();
 
     private:
@@ -1369,6 +1374,7 @@ class ha_partition :public handler
     virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
     virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
     virtual TABLE_LIST *get_next_global_for_child();
+    virtual void check_and_set_bitmap_for_update(bool rnd);
 
   /*
     -------------------------------------------------------------------------
diff --git a/sql/handler.cc b/sql/handler.cc
index 84ddc73ceed..ac6401b9876 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -5935,8 +5935,10 @@ int handler::ha_reset()
   mark_trx_read_write_done= check_table_binlog_row_based_done=
     check_table_binlog_row_based_result= 0;
   /* Reset information about pushed engine conditions */
-  cancel_pushed_idx_cond();
   /* Reset information about pushed index conditions */
+  cancel_pushed_idx_cond();
+  /* Reset information about pushed top table and fields */
+  clear_top_table_fields();
   DBUG_RETURN(reset());
 }
 
diff --git a/sql/handler.h b/sql/handler.h
index 77db208a9cc..248b72bfebf 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1399,9 +1399,6 @@ handlerton *ha_default_tmp_handlerton(THD *thd);
 #define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
 #define HTON_SUPPORT_LOG_TABLES      (1 << 7) //Engine supports log tables
 #define HTON_NO_PARTITION            (1 << 8) //Not partition of these tables
-#define HTON_CAN_MULTISTEP_MERGE     (1 << 9) //You can merge mearged tables
-// Engine needs to access the main connect string in partitions
-#define HTON_CAN_READ_CONNECT_STRING_IN_PARTITION (1 << 10)
 
 /*
   This flag should be set when deciding that the engine does not allow
@@ -1422,6 +1419,10 @@ handlerton *ha_default_tmp_handlerton(THD *thd);
 // MySQL compatibility. Unused.
 #define HTON_SUPPORTS_FOREIGN_KEYS   (1 << 0) //Foreign key constraint supported.
 
+#define HTON_CAN_MERGE               (1 <<11) //Merge type table
+// Engine needs to access the main connect string in partitions
+#define HTON_CAN_READ_CONNECT_STRING_IN_PARTITION (1 <<12)
+
 class Ha_trx_info;
 
 struct THD_TRANS
@@ -3456,6 +3457,8 @@ class handler :public Sql_alloc
     return 0;
   }
   virtual void set_part_info(partition_info *part_info) {return;}
+  virtual Field **get_full_part_fields() { return NULL; }
+  virtual int choose_partition_from_column_value(uchar *buf) { return 0; }
 
   virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
 
@@ -3727,6 +3730,7 @@ class handler :public Sql_alloc
 
  /* Needed for partition / spider */
   virtual TABLE_LIST *get_next_global_for_child() { return NULL; }
+  virtual void check_and_set_bitmap_for_update(bool rnd) { return; }
 
  /**
    Part of old, deprecated in-place ALTER API.
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index bc5b9bde8e8..d8afa51da03 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -182,7 +182,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
     goto end;					// No data file
 
   /* A MERGE table must not come here. */
-  DBUG_ASSERT(table->file->ht->db_type != DB_TYPE_MRG_MYISAM);
+  DBUG_ASSERT(!(table->file->ht->flags & HTON_CAN_MERGE));
 
   // Name of data file
   strxmov(from, table->s->normalized_path.str, ext[1], NullS);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 5eade2994ee..df9fbc25d91 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -974,7 +974,7 @@ TABLE_LIST* find_dup_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
   if (table->table)
   {
     /* All MyISAMMRG children are plain MyISAM tables. */
-    DBUG_ASSERT(table->table->file->ht->db_type != DB_TYPE_MRG_MYISAM);
+    DBUG_ASSERT(!(table->table->file->ht->flags & HTON_CAN_MERGE));
 
     table= table->find_underlying_table(table->table);
     /*
@@ -1080,7 +1080,8 @@ unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
   table= table->find_table_for_update();
 
   if (table->table &&
-      table->table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)
+      ((table->table->file->ht->flags & HTON_CAN_MERGE) ||
+       (table->table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)))
   {
     TABLE_LIST *child;
     dup= NULL;
@@ -1089,7 +1090,8 @@ unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
          child= child->next_global)
     {
       if (child->table &&
-          child->table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)
+          ((child->table->file->ht->flags & HTON_CAN_MERGE) ||
+           (child->table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)))
         continue;
 
       /*
@@ -4059,7 +4061,8 @@ bool open_tables(THD *thd, const DDL_options_st &options,
       continue;
 
     /* Schema tables may not have a TABLE object here. */
-    if (tbl->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)
+    if ((tbl->file->ht->flags & HTON_CAN_MERGE) ||
+        (tbl->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE))
     {
       /* MERGE tables need to access parent and child TABLE_LISTs. */
       DBUG_ASSERT(tbl->pos_in_table_list == tables);
@@ -4604,7 +4607,8 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type,
     */
     DBUG_ASSERT(table_list->table);
     table= table_list->table;
-    if (table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE)
+    if ((table->file->ht->flags & HTON_CAN_MERGE) ||
+        (table->file->ha_table_flags() & HA_CAN_MULTISTEP_MERGE))
     {
       /* A MERGE table must not come here. */
       /* purecov: begin tested */
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 3d91aa67793..8689205a21e 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -1755,7 +1755,7 @@ static int myisammrg_init(void *p)
   myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM;
   myisammrg_hton->create= myisammrg_create_handler;
   myisammrg_hton->panic= myisammrg_panic;
-  myisammrg_hton->flags= HTON_NO_PARTITION;
+  myisammrg_hton->flags= HTON_NO_PARTITION | HTON_CAN_MERGE;
   myisammrg_hton->tablefile_extensions= ha_myisammrg_exts;
 
   return 0;


More information about the commits mailing list