[Commits] f40ab24b897: Spiral Patch 020: 020_mariadb-10.2.0.bulk_access.diff MDEV-7718

jacob.mathew at mariadb.com jacob.mathew at mariadb.com
Tue Aug 8 16:58:14 EEST 2017


revision-id: f40ab24b89715984e5c3c0d71f5412d121d0dfe4 (mariadb-10.2.3-84-gf40ab24b897)
parent(s): b685dfc68635fbe3b6319292580ad9d13351dc85
author: Jacob Mathew
committer: Jacob Mathew
timestamp: 2017-07-31 16:37:55 -0700
message:

Spiral Patch 020: 020_mariadb-10.2.0.bulk_access.diff MDEV-7718

- Support for bulk access in the partition engine.
- This patch has the following differences compared to the original patch:
  - Changed bit position for the HA_CAN_BULK_ACCESS partition handler flag.
  - Omitted the parts of the patch that pertain to direct update and
    direct delete, which are a different feature. The omitted changes will
    be committed as part of the direct update/delete feature.

---
 sql/ha_partition.cc | 913 +++++++++++++++++++++++++++++++++++++++++++++++++---
 sql/ha_partition.h  |  42 +++
 sql/handler.h       |  72 ++++-
 3 files changed, 967 insertions(+), 60 deletions(-)

diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 08c5920196e..f9fe27c9fd8 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -245,6 +245,14 @@ const uint32 ha_partition::NO_CURRENT_PART_ID= NOT_A_PARTITION_ID;
 
 ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
   :handler(hton, share)
+  , m_pre_calling(FALSE)
+  , m_pre_call_use_parallel(FALSE)
+  , bulk_access_started(FALSE)
+  , bulk_access_executing(FALSE)
+  , bulk_access_pre_called(FALSE)
+  , bulk_access_info_first(NULL)
+  , bulk_access_info_current(NULL)
+  , bulk_access_info_exec_tgt(NULL)
 {
   DBUG_ENTER("ha_partition::ha_partition(table)");
   init_alloc_root(&m_mem_root, 512, 512, MYF(0));
@@ -266,6 +274,14 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
 
 ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
   :handler(hton, NULL)
+  , m_pre_calling(FALSE)
+  , m_pre_call_use_parallel(FALSE)
+  , bulk_access_started(FALSE)
+  , bulk_access_executing(FALSE)
+  , bulk_access_pre_called(FALSE)
+  , bulk_access_info_first(NULL)
+  , bulk_access_info_current(NULL)
+  , bulk_access_info_exec_tgt(NULL)
 {
   DBUG_ENTER("ha_partition::ha_partition(part_info)");
   DBUG_ASSERT(part_info);
@@ -294,6 +310,14 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
                            ha_partition *clone_arg,
                            MEM_ROOT *clone_mem_root_arg)
   :handler(hton, share)
+  , m_pre_calling(FALSE)
+  , m_pre_call_use_parallel(FALSE)
+  , bulk_access_started(FALSE)
+  , bulk_access_executing(FALSE)
+  , bulk_access_pre_called(FALSE)
+  , bulk_access_info_first(NULL)
+  , bulk_access_info_current(NULL)
+  , bulk_access_info_exec_tgt(NULL)
 {
   DBUG_ENTER("ha_partition::ha_partition(clone)");
   init_alloc_root(&m_mem_root, 512, 512, MYF(0));
@@ -3343,6 +3367,7 @@ void ha_partition::free_partition_bitmaps()
 {
   /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
   my_bitmap_free(&m_bulk_insert_started);
+  my_bitmap_free(&bulk_access_exec_bitmap);
   my_bitmap_free(&m_locked_partitions);
   my_bitmap_free(&m_partitions_to_reset);
   my_bitmap_free(&m_key_not_found_partitions);
@@ -3366,6 +3391,13 @@ bool ha_partition::init_partition_bitmaps()
   if (my_bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
     DBUG_RETURN(true);
 
+  /*
+    Initialize the bitmap we use to keep track of partitions which have
+    executing bulk access requests
+  */
+  if (my_bitmap_init(&bulk_access_exec_bitmap, NULL, m_tot_parts, FALSE))
+    DBUG_RETURN(true);
+
   /* Initialize the bitmap we use to keep track of locked partitions */
   if (my_bitmap_init(&m_locked_partitions, NULL, m_tot_parts, FALSE))
     DBUG_RETURN(true);
@@ -3730,6 +3762,17 @@ int ha_partition::close(void)
   destroy_record_priority_queue();
   free_partition_bitmaps();
 
+  /* Free bulk access info for active bulk accesss requests */
+  if (bulk_access_info_first)
+  {
+    do
+    {
+      bulk_access_info_current = bulk_access_info_first->next;
+      delete_bulk_access_info(bulk_access_info_first);
+      bulk_access_info_first = bulk_access_info_current;
+    } while (bulk_access_info_first);
+  }
+
   /* Free active mrr_ranges */
   for (i= 0; i < m_tot_parts; i++)
   {
@@ -3829,7 +3872,7 @@ int ha_partition::close(void)
 
 int ha_partition::external_lock(THD *thd, int lock_type)
 {
-  uint error;
+  int error;
   uint i, first_used_partition;
   MY_BITMAP *used_partitions;
   DBUG_ENTER("ha_partition::external_lock");
@@ -3893,6 +3936,30 @@ int ha_partition::external_lock(THD *thd, int lock_type)
 }
 
 
+/**
+   Based on the table lock type, update the access info for each partition
+
+   @param thd                   Thread handle
+   @param lock_type             Table lock type
+*/
+
+int ha_partition::additional_lock(THD *thd, enum thr_lock_type lock_type)
+{
+  int error;
+  handler **file;
+  DBUG_ENTER("ha_partition::additional_lock");
+  file= m_file;
+  do
+  {
+    DBUG_PRINT("info", ("additional lock for partition %d",
+               (int)(file - m_file)));
+    if ((error= (*file)->additional_lock(thd, lock_type)))
+      DBUG_RETURN(error);
+  } while (*(++file));
+  DBUG_RETURN(0);
+}
+
+
 /*
   Get the lock(s) for the table and perform conversion of locks if needed
 
@@ -4127,11 +4194,11 @@ void ha_partition::try_semi_consistent_read(bool yes)
                 MODULE change record
 ****************************************************************************/
 
-/*
-  Insert a row to the table
+/**
+  Bulk-insert a row to the table
 
   SYNOPSIS
-    write_row()
+    pre_write_row()
     buf                        The row in MySQL Row Format
 
   RETURN VALUE
@@ -4139,8 +4206,8 @@ void ha_partition::try_semi_consistent_read(bool yes)
     0                          Success
 
   DESCRIPTION
-    write_row() inserts a row. buf() is a byte array of data, normally
-    record[0].
+    pre_write_row() bulk-inserts a row. buf() is a byte array of data,
+    normally record[0].
 
     You can use the field information to extract the data from the native byte
     array type.
@@ -4155,16 +4222,12 @@ void ha_partition::try_semi_consistent_read(bool yes)
     ha_berkeley.cc has a variant of how to store it intact by "packing" it
     for ha_berkeley's own native storage type.
 
-    Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
-    sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
-
-    ADDITIONAL INFO:
-
+  ADDITIONAL INFO:
     We have to set auto_increment fields, because those may be used in
     determining which partition the row should be written to.
 */
 
-int ha_partition::write_row(uchar * buf)
+int ha_partition::pre_write_row(uchar * buf)
 {
   uint32 part_id;
   int error;
@@ -4174,11 +4237,11 @@ int ha_partition::write_row(uchar * buf)
   THD *thd= ha_thd();
   sql_mode_t saved_sql_mode= thd->variables.sql_mode;
   bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
-  DBUG_ENTER("ha_partition::write_row");
+  DBUG_ENTER("ha_partition::pre_write_row");
   DBUG_ASSERT(buf == m_rec0);
 
   /*
-    If we have an auto_increment column and we are writing a changed row
+    If we have an auto_increment column and we are pre-writing a changed row
     or a new row, then update the auto_increment value in the record.
   */
   if (have_auto_increment)
@@ -4227,6 +4290,131 @@ int ha_partition::write_row(uchar * buf)
     m_part_info->err_value= func_value;
     goto exit;
   }
+  m_last_part= part_id;
+  DBUG_PRINT("info", ("Insert in partition %d", part_id));
+  start_part_bulk_insert(thd, part_id);
+
+  tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+  error= m_file[part_id]->ha_pre_write_row(buf);
+  if (!error)
+    bitmap_set_bit(&bulk_access_exec_bitmap, part_id);
+  if (have_auto_increment && !table->s->next_number_keypart)
+    set_auto_increment_if_higher(table->next_number_field);
+  reenable_binlog(thd);
+exit:
+  thd->variables.sql_mode= saved_sql_mode;
+  table->auto_increment_field_not_null= saved_auto_inc_field_not_null;
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Insert a row to the table
+
+  SYNOPSIS
+    write_row()
+    buf                        The row in MySQL Row Format
+
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    write_row() inserts a row. buf() is a byte array of data, normally
+    record[0].
+
+    You can use the field information to extract the data from the native byte
+    array type.
+
+    Example of this would be:
+    for (Field **field=table->field ; *field ; field++)
+    {
+      ...
+    }
+
+    See ha_tina.cc for a variant of extracting all of the data as strings.
+    ha_berkeley.cc has a variant of how to store it intact by "packing" it
+    for ha_berkeley's own native storage type.
+
+    Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+    sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+
+    ADDITIONAL INFO:
+
+    We have to set auto_increment fields, because those may be used in
+    determining which partition the row should be written to.
+*/
+
+int ha_partition::write_row(uchar * buf)
+{
+  uint32 part_id;
+  int error;
+  longlong func_value;
+  bool have_auto_increment= table->next_number_field && buf == table->record[0];
+  my_bitmap_map *old_map;
+  THD *thd= ha_thd();
+  sql_mode_t saved_sql_mode= thd->variables.sql_mode;
+  bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
+  DBUG_ENTER("ha_partition::write_row");
+  DBUG_ASSERT(buf == m_rec0);
+
+  /*
+    If we have an auto_increment column and we are writing a changed row
+    or a new row, then update the auto_increment value in the record.
+  */
+  if (have_auto_increment)
+  {
+    /*
+      For bulk access requests, we update auto_increment
+      in pre_write_row()
+    */
+    if (!bulk_access_executing || !bulk_access_info_exec_tgt->called)
+    {
+      if (!part_share->auto_inc_initialized &&
+          !table_share->next_number_keypart)
+      {
+        /*
+          If auto_increment in table_share is not initialized, start by
+          initializing it.
+        */
+        info(HA_STATUS_AUTO);
+      }
+      error= update_auto_increment();
+
+      /*
+        If we have failed to set the auto-increment value for this row,
+        it is highly likely that we will not be able to insert it into
+        the correct partition. We must check and fail if neccessary.
+      */
+      if (error)
+        goto exit;
+    }
+
+    /*
+      Don't allow generation of auto_increment value the partitions handler.
+      If a partitions handler would change the value, then it might not
+      match the partition any longer.
+      This can occur if 'SET INSERT_ID = 0; INSERT (NULL)',
+      So allow this by adding 'MODE_NO_AUTO_VALUE_ON_ZERO' to sql_mode.
+      The partitions handler::next_insert_id must always be 0. Otherwise
+      we need to forward release_auto_increment, or reset it for all
+      partitions.
+    */
+    if (table->next_number_field->val_int() == 0)
+    {
+      table->auto_increment_field_not_null= TRUE;
+      thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO;
+    }
+  }
+
+  old_map= dbug_tmp_use_all_columns(table, table->read_set);
+  error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
+  dbug_tmp_restore_column_map(table->read_set, old_map);
+  if (unlikely(error))
+  {
+    m_part_info->err_value= func_value;
+    goto exit;
+  }
   if (!bitmap_is_set(&(m_part_info->lock_partitions), part_id))
   {
     DBUG_PRINT("info", ("Write to non-locked partition %u (func_value: %ld)",
@@ -4775,6 +4963,137 @@ int ha_partition::end_bulk_insert()
 /****************************************************************************
                 MODULE full table scan
 ****************************************************************************/
+/**
+  Initialize engine for random bulk access reads
+
+  SYNOPSIS
+    ha_partition::pre_rnd_init()
+    scan	0  Initialize for random reads through rnd_pos()
+		      1  Initialize for random scan through rnd_next()
+
+  RETURN VALUE
+    >0          Error code
+    0           Success
+
+  DESCRIPTION
+    pre_rnd_init() is called during a bulk access request when the server
+    wants the storage engine to do a table scan or when the server
+    wants to access data through rnd_pos.
+
+    When scan is used we will scan one handler partition at a time.
+    When preparing for rnd_pos we will init all handler partitions.
+    No extra cache handling is needed when scannning is not performed.
+
+    Before initializing we will call pre_rnd_end to ensure that we
+    clean up from any previous incarnation of a table scan.
+*/
+
+int ha_partition::pre_rnd_init(bool scan)
+{
+  int error;
+  uint i= 0;
+  uint32 part_id;
+  DBUG_ENTER("ha_partition::pre_rnd_init");
+
+  /*
+    For operations that may need to change data, we may need to extend
+    read_set.
+  */
+  if (get_lock_type() == F_WRLCK)
+  {
+    /*
+      If write_set contains any of the fields used in partition and
+      subpartition expression, we need to set all bits in read_set because
+      the row may need to be inserted in a different [sub]partition. In
+      other words update_row() can be converted into write_row(), which
+      requires a complete record.
+    */
+    if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+                              table->write_set))
+      bitmap_set_all(table->read_set);
+    else
+    {
+      /*
+        Some handlers only read fields as specified by the bitmap for the
+        read set. For partitioned handlers we always require that the
+        fields of the partition functions are read such that we can
+        calculate the partition id to place updated and deleted records.
+      */
+      bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+    }
+  }
+
+  /* Now we see what the index of our first important partition is */
+  DBUG_PRINT("info", ("m_part_info->read_partitions: 0x%lx",
+             (long)m_part_info->read_partitions.bitmap));
+  part_id= bitmap_get_first_set(&(m_part_info->read_partitions));
+  DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
+
+  if (MY_BIT_NONE == part_id)
+  {
+    error= 0;
+    goto err1;
+  }
+
+  /*
+    We have a partition and we are scanning with pre_rnd_next()
+    so we bump our cache
+  */
+  DBUG_PRINT("info", ("rnd_init on partition %d", part_id));
+  if (scan)
+  {
+    /*
+      pre_rnd_end() is needed for partitioning to reset internal data if scan
+      is already in use
+    */
+    pre_rnd_end();
+    late_extra_cache(part_id);
+    m_index_scan_type = partition_no_index_scan;
+    for (i= part_id; i < m_tot_parts; i++)
+    {
+      if (bitmap_is_set(&(m_part_info->read_partitions), i))
+      {
+        if ((error= m_file[i]->ha_pre_rnd_init(scan)))
+          goto err2;
+        bitmap_set_bit(&bulk_access_exec_bitmap, i);
+      }
+    }
+  }
+  else
+  {
+    for (i= part_id; i < m_tot_parts; i++)
+    {
+      if (bitmap_is_set(&(m_part_info->read_partitions), i))
+      {
+        if ((error= m_file[i]->ha_pre_rnd_init(scan)))
+          goto err;
+        bitmap_set_bit(&bulk_access_exec_bitmap, i);
+      }
+    }
+  }
+  m_scan_value= scan;
+  m_part_spec.start_part= part_id;
+  m_part_spec.end_part= m_tot_parts - 1;
+  m_rnd_init_and_first = TRUE;
+  DBUG_PRINT("info", ("m_scan_value=%d", m_scan_value));
+  DBUG_RETURN(0);
+
+err2:
+  late_extra_no_cache(part_id);
+err:
+  for (;
+       part_id < i;
+       part_id= bitmap_get_next_set(&m_part_info->read_partitions, part_id))
+  {
+    m_file[part_id]->ha_pre_rnd_end();
+  }
+err1:
+  m_scan_value= 2;
+  m_part_spec.start_part= NO_CURRENT_PART_ID;
+  DBUG_RETURN(error);
+}
+
+
 /*
   Initialize engine for random reads
 
@@ -4898,6 +5217,48 @@ int ha_partition::rnd_init(bool scan)
 }
 
 
+/**
+  End of a table scan during a bulk access request
+
+  SYNOPSIS
+    pre_rnd_end()
+
+  RETURN VALUE
+    >0          Error code
+    0           Success
+*/
+
+int ha_partition::pre_rnd_end()
+{
+  handler **file;
+  DBUG_ENTER("ha_partition::pre_rnd_end");
+  switch (m_scan_value) {
+  case 2:                                       // Error
+    break;
+  case 1:                                       // Table scan
+    if (NO_CURRENT_PART_ID != m_part_spec.start_part)
+      late_extra_no_cache(m_part_spec.start_part);
+    file= m_file;
+    do
+    {
+      if (bitmap_is_set(&(m_part_info->read_partitions), (file - m_file)))
+        (*file)->ha_pre_rnd_end();
+    } while (*(++file));
+    break;
+  case 0:
+    uint i;
+    for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+         i < m_tot_parts;
+         i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+      m_file[i]->ha_pre_rnd_end();
+    break;
+  }
+  m_scan_value= 2;
+  m_part_spec.start_part= NO_CURRENT_PART_ID;
+  DBUG_RETURN(0);
+}
+
+
 /*
   End of a table scan
 
@@ -4934,6 +5295,7 @@ int ha_partition::rnd_end()
   DBUG_RETURN(0);
 }
 
+
 /*
   read next row during full table scan (scan in random row order)
 
@@ -4978,10 +5340,13 @@ int ha_partition::rnd_next(uchar *buf)
 
   if (m_rnd_init_and_first)
   {
-    m_rnd_init_and_first= FALSE;
-    error= handle_pre_scan(FALSE, check_parallel_search());
-    if (m_pre_calling || error)
-      DBUG_RETURN(error);
+    if (!bulk_access_executing)
+    {
+      m_rnd_init_and_first= FALSE;
+      error= handle_pre_scan(FALSE, check_parallel_search());
+      if (m_pre_calling || error)
+        DBUG_RETURN(error);
+    }
   }
 
   file= m_file[part_id];
@@ -5232,16 +5597,108 @@ bool ha_partition::init_record_priority_queue()
   Destroy the ordered record buffer and the priority queue.
 */
 
-void ha_partition::destroy_record_priority_queue()
-{
-  DBUG_ENTER("ha_partition::destroy_record_priority_queue");
-  if (m_ordered_rec_buffer)
+void ha_partition::destroy_record_priority_queue()
+{
+  DBUG_ENTER("ha_partition::destroy_record_priority_queue");
+  if (m_ordered_rec_buffer)
+  {
+    delete_queue(&m_queue);
+    my_free(m_ordered_rec_buffer);
+    m_ordered_rec_buffer= NULL;
+  }
+  DBUG_VOID_RETURN;
+}
+
+
+/**
+  Initialize handler before start of index scan
+
+  SYNOPSIS
+    pre_index_init()
+    inx                Index number
+    sorted             Is rows to be returned in sorted order
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    pre_index_init is called during a bulk access request before starting
+    an index scan.
+*/
+
+int ha_partition::pre_index_init(uint inx, bool sorted)
+{
+  int error= 0;
+  uint i;
+  DBUG_ENTER("ha_partition::pre_index_init");
+
+  DBUG_PRINT("info", ("inx %u sorted %u", inx, sorted));
+  active_index= inx;
+  m_part_spec.start_part= NO_CURRENT_PART_ID;
+  m_start_key.length= 0;
+  m_ordered= sorted;
+  m_curr_key_info[0]= table->key_info+inx;
+  if (m_pkey_is_clustered && table->s->primary_key != MAX_KEY)
+  {
+    /*
+      if PK is clustered, then the key cmp must use the pk to
+      differentiate between equal key in given index.
+    */
+    DBUG_PRINT("info", ("Clustered pk, using pk as secondary cmp"));
+    m_curr_key_info[1]= table->key_info+table->s->primary_key;
+    m_curr_key_info[2]= NULL;
+  }
+  else
+    m_curr_key_info[1]= NULL;
+  /*
+    Some handlers only read fields as specified by the bitmap for the
+    read set. For partitioned handlers we always require that the
+    fields of the partition functions are read such that we can
+    calculate the partition id to place updated and deleted records.
+    But this is required for operations that may need to change data only.
+  */
+  if (get_lock_type() == F_WRLCK)
+    bitmap_union(table->read_set, &m_part_info->full_part_field_set);
+  if (sorted)
+  {
+    /*
+      An ordered scan is requested. We must make sure all fields of the
+      used index are in the read set, as partitioning requires them for
+      sorting (see ha_partition::handle_ordered_index_scan).
+
+      The SQL layer may request an ordered index scan without having index
+      fields in the read set when
+       - it needs to do an ordered scan over an index prefix.
+       - it evaluates ORDER BY with SELECT COUNT(*) FROM t1.
+
+      TODO: handle COUNT(*) queries via unordered scan.
+    */
+    KEY **key_info= m_curr_key_info;
+    do
+    {
+      for (i= 0; i < (*key_info)->user_defined_key_parts; i++)
+        bitmap_set_bit(table->read_set,
+                       (*key_info)->key_part[i].field->field_index);
+    } while (*(++key_info));
+  }
+  for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+       i < m_tot_parts;
+       i= bitmap_get_next_set(&m_part_info->read_partitions, i))
   {
-    delete_queue(&m_queue);
-    my_free(m_ordered_rec_buffer);
-    m_ordered_rec_buffer= NULL;
+    if ((error= m_file[i]->ha_pre_index_init(inx, sorted)))
+      goto error;
+    bitmap_set_bit(&bulk_access_exec_bitmap, i);
   }
-  DBUG_VOID_RETURN;
+  DBUG_RETURN(error);
+
+error:
+  uint j;
+  for (j= bitmap_get_first_set(&m_part_info->read_partitions);
+       j < i;
+       j= bitmap_get_next_set(&m_part_info->read_partitions, j))
+    (void)m_file[j]->ha_pre_index_end();
+  DBUG_RETURN(error);
 }
 
 
@@ -5392,6 +5849,247 @@ int ha_partition::index_end()
 }
 
 
+/**
+  End of index scan during a bulk access request
+
+  SYNOPSIS
+    pre_index_end()
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    pre_index_end is called to do cleanup at the end of an index scan
+    during a bulk access request.
+*/
+
+int ha_partition::pre_index_end()
+{
+  int error= 0;
+  uint i;
+  DBUG_ENTER("ha_partition::pre_index_end");
+
+  active_index= MAX_KEY;
+  m_part_spec.start_part= NO_CURRENT_PART_ID;
+  for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+       i < m_tot_parts;
+       i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+  {
+    int tmp;
+    if ((tmp= m_file[i]->ha_pre_index_end()))
+      error= tmp;
+  }
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Start an ordered index pre-scan using a start key.
+
+  SYNOPSIS
+    pre_index_read_map()
+    key                    Key parts in consecutive order
+    keypart_map            Which part of key is used
+    find_flag              What type of key condition is used
+    use_parallel           Is it a parallel search
+
+  RETURN VALUE
+    >0                     Error code
+    0                      Success
+
+  DESCRIPTION
+    pre_index_read_map starts an ordered index pre-scan using
+    a start key. The server will check the end key on its own.
+    Thus to function properly the partitioned handler needs to ensure
+    that it delivers records in the sort order of the server. This is
+    particularly used in conjunction with multi read ranges.
+*/
+
+int ha_partition::pre_index_read_map(const uchar *key,
+                                     key_part_map keypart_map,
+                                     enum ha_rkey_function find_flag,
+                                     bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_index_read_map");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = index_read_map(table->record[0], key, keypart_map, find_flag);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Start an ordered index pre-scan starting from the leftmost record
+  and return the first record
+
+  SYNOPSIS
+    pre_index_first()
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    pre_index_first() asks for the first key in the index.
+    There is no start key since the scan starts from the leftmost entry.
+*/
+
+int ha_partition::pre_index_first(bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_index_first");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = index_first(table->record[0]);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Start an ordered index pre-scan starting from the rightmost record
+  and return the first record
+
+  SYNOPSIS
+    pre_index_last()
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    pre_index_last() asks for the last key in the index.
+    There is no start key since the scan starts from the rightmost entry.
+*/
+
+int ha_partition::pre_index_last(bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_index_last");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = index_last(table->record[0]);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Return the next record during an ordered index pre-scan for
+  multi range read
+
+  SYNOPSIS
+    pre_multi_range_read_next()
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+*/
+
+int ha_partition::pre_multi_range_read_next(bool use_parallel)
+{
+  int error;
+  range_id_t range_info;
+  DBUG_ENTER("ha_partition::pre_multi_range_read_next");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = multi_range_read_next(&range_info);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Start a read of one range with start and end key for the
+  pre-scan of an index
+
+  SYNOPSIS
+    pre_read_range_first()
+    start_key           Specification of start key
+    end_key             Specification of end key
+    eq_range_arg        Is it equal range
+    sorted              Should records be returned in sorted order
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+*/
+
+int ha_partition::pre_read_range_first(const key_range *start_key,
+                                       const key_range *end_key,
+                                       bool eq_range, bool sorted,
+                                       bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_read_range_first");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = read_range_first(start_key, end_key, eq_range, sorted);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Return the next record from the FT result set during an ordered index
+  pre-scan
+
+  SYNOPSIS
+    pre_ft_read()
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+*/
+
+int ha_partition::pre_ft_read(bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_ft_read");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = ft_read(table->record[0]);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
+/**
+  Read next row during the pre-scan of an entire table
+  (scan in random row order)
+
+  SYNOPSIS
+    pre_rnd_next()
+    use_parallel        Is it a parallel search
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    This is called for each row of the table scan.
+*/
+
+int ha_partition::pre_rnd_next(bool use_parallel)
+{
+  int error;
+  DBUG_ENTER("ha_partition::pre_rnd_next");
+  m_pre_calling= TRUE;
+  m_pre_call_use_parallel= use_parallel;
+  error = rnd_next(table->record[0]);
+  m_pre_calling= FALSE;
+  DBUG_RETURN(error);
+}
+
+
 /*
   Read one record in an index scan and start an index scan
 
@@ -5566,9 +6264,13 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key)
       The unordered index scan will use the partition set created.
     */
     DBUG_PRINT("info", ("doing unordered scan"));
-    error= handle_pre_scan(FALSE, FALSE);
-    if (!error)
-      error= handle_unordered_scan_next_partition(buf);
+    if (!bulk_access_executing)
+    {
+      error= handle_pre_scan(FALSE, FALSE);
+      if (m_pre_calling || error)
+        DBUG_RETURN(error);
+    }
+    error= handle_unordered_scan_next_partition(buf);
   }
   else
   {
@@ -5662,9 +6364,13 @@ int ha_partition::common_first_last(uchar *buf)
   if (!m_ordered_scan_ongoing &&
       m_index_scan_type != partition_index_last)
   {
-    if ((error= handle_pre_scan(FALSE, check_parallel_search())))
-      return error;
-   return handle_unordered_scan_next_partition(buf);
+    if (!bulk_access_executing)
+    {
+      error = handle_pre_scan(FALSE, FALSE);
+      if (m_pre_calling || error)
+        return error;
+    }
+    return handle_unordered_scan_next_partition(buf);
   }
   return handle_ordered_index_scan(buf, FALSE);
 }
@@ -6662,11 +7368,6 @@ int ha_partition::handle_pre_scan(bool reverse_order, bool use_parallel)
     case partition_index_last:
       error= file->pre_index_last(use_parallel);
       break;
-    case partition_index_read_last:
-      error= file->pre_index_read_last_map(m_start_key.key,
-                                       m_start_key.keypart_map,
-                                       use_parallel);
-      break;
     case partition_read_range:
       error= file->pre_read_range_first(m_start_key.key? &m_start_key: NULL,
                                     end_range, eq_range, TRUE, use_parallel);
@@ -6691,6 +7392,8 @@ int ha_partition::handle_pre_scan(bool reverse_order, bool use_parallel)
     if (error)
       DBUG_RETURN(error);
   }
+  if (bulk_access_started)
+    bulk_access_info_current->called = TRUE;
   table->status= 0;
   DBUG_RETURN(0);
 }
@@ -6912,12 +7615,14 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
       (error= loop_extra(HA_EXTRA_STARTING_ORDERED_INDEX_SCAN)))
     DBUG_RETURN(error);
 
-   if (m_pre_calling)
-     error= handle_pre_scan(reverse_order, m_pre_call_use_parallel);
-   else
-     error= handle_pre_scan(reverse_order, check_parallel_search());
-  if (error)
-    DBUG_RETURN(error);
+  if (!bulk_access_executing)
+  {
+    error = handle_pre_scan(reverse_order,
+                            (m_pre_calling ? m_pre_call_use_parallel
+                                           : check_parallel_search()));
+    if (m_pre_calling || error)
+      DBUG_RETURN(error);
+  }
 
   if (m_key_not_found)
   {
@@ -7206,10 +7911,14 @@ int ha_partition::handle_ordered_index_scan_key_not_found()
 int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
 {
   int error;
+  DBUG_ENTER("ha_partition::handle_ordered_next");
+
+  if (m_top_entry == NO_CURRENT_PART_ID)
+    DBUG_RETURN(HA_ERR_END_OF_FILE);
+
   uint part_id= m_top_entry;
   uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS;
   handler *file;
-  DBUG_ENTER("ha_partition::handle_ordered_next");
 
   if (m_key_not_found)
   {
@@ -7423,11 +8132,15 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
 int ha_partition::handle_ordered_prev(uchar *buf)
 {
   int error;
+  DBUG_ENTER("ha_partition::handle_ordered_prev");
+  DBUG_PRINT("enter", ("partition: %p", this));
+
+  if (m_top_entry == NO_CURRENT_PART_ID)
+    DBUG_RETURN(HA_ERR_END_OF_FILE);
+
   uint part_id= m_top_entry;
   uchar *rec_buf= queue_top(&m_queue) + PARTITION_BYTES_IN_POS;
   handler *file= m_file[part_id];
-  DBUG_ENTER("ha_partition::handle_ordered_prev");
-  DBUG_PRINT("enter", ("partition: %p", this));
 
   if ((error= file->ha_index_prev(rec_buf)))
   {
@@ -8239,25 +8952,33 @@ int ha_partition::extra(enum ha_extra_function operation)
   case HA_EXTRA_ATTACH_CHILDREN:
   {
     int result;
-    uint num_locks= 0;
+    uint num_locks;
+    ulonglong additional_table_flags;
     handler **file;
     if ((result = loop_extra(operation)))
       DBUG_RETURN(result);
 
     /* Recalculate lock count as each child may have different set of locks */
     num_locks = 0;
+    additional_table_flags = (HA_HAS_RECORDS | HA_CAN_BULK_ACCESS);
     file = m_file;
     do
     {
       num_locks+= (*file)->lock_count();
+      additional_table_flags &= ~((ulonglong)
+                                  ((*file)->ha_table_flags() ^
+                                   (HA_HAS_RECORDS | HA_CAN_BULK_ACCESS)));
     } while (*(++file));
 
     m_num_locks= num_locks;
+    cached_table_flags |= additional_table_flags;
     break;
   }
   case HA_EXTRA_IS_ATTACHED_CHILDREN:
     DBUG_RETURN(loop_extra(operation));
   case HA_EXTRA_DETACH_CHILDREN:
+    cached_table_flags &= ~((ulonglong)
+                            (HA_HAS_RECORDS | HA_CAN_BULK_ACCESS));
     DBUG_RETURN(loop_extra(operation));
   case HA_EXTRA_MARK_AS_LOG_TABLE:
   /*
@@ -8307,6 +9028,21 @@ int ha_partition::reset(void)
       result= tmp;
   }
   bitmap_clear_all(&m_partitions_to_reset);
+  if (bulk_access_info_first)
+  {
+    PARTITION_BULK_ACCESS_INFO *bulk_access_info = bulk_access_info_first;
+    while (bulk_access_info && bulk_access_info->used)
+    {
+      bulk_access_info->used = FALSE;
+      bulk_access_info = bulk_access_info->next;
+    }
+    bitmap_clear_all(&bulk_access_exec_bitmap);
+  }
+  bulk_access_started = FALSE;
+  bulk_access_executing = FALSE;
+  bulk_access_pre_called = FALSE;
+  bulk_access_info_current = NULL;
+  bulk_access_info_exec_tgt = NULL;
   DBUG_RETURN(result);
 }
 
@@ -10184,6 +10920,35 @@ void ha_partition::cond_pop()
   DBUG_VOID_RETURN;
 }
 
+
+/**
+  Execute a bulk access request
+
+  SYNOPSIS
+    bulk_req_exec()
+
+  RETURN VALUE
+    NONE
+*/
+
+void ha_partition::bulk_req_exec()
+{
+  uint i;
+  handler **file;
+  DBUG_ENTER("ha_partition::bulk_req_exec");
+  DBUG_PRINT("info", ("partition this=%p", this));
+  for (file= m_file, i= 0; *file; ++file, ++i)
+  {
+    if (bitmap_is_set(&bulk_access_exec_bitmap, i))
+    {
+      (*file)->bulk_req_exec();
+    }
+  }
+  bitmap_clear_all(&bulk_access_exec_bitmap);
+  DBUG_VOID_RETURN;
+}
+
+
 void ha_partition::clear_top_table_fields()
 {
   handler **file;
@@ -10196,6 +10961,60 @@ void ha_partition::clear_top_table_fields()
 }
 
 
+/**
+  Allocate and initialize a bulk access request info structure
+
+  SYNOPSIS
+    create_bulk_access_info()
+
+  RETURN VALUE
+    New bulk access request info structure
+*/
+
+PARTITION_BULK_ACCESS_INFO *ha_partition::create_bulk_access_info()
+{
+  PARTITION_BULK_ACCESS_INFO *bulk_access_info;
+  void **tmp_info;
+  DBUG_ENTER("ha_partition::create_bulk_access_info");
+  DBUG_PRINT("info", ("partition this=%p", this));
+  if (!(bulk_access_info = (PARTITION_BULK_ACCESS_INFO *)
+                           my_multi_malloc(MYF(MY_WME),
+                                           &bulk_access_info,
+                                           sizeof(PARTITION_BULK_ACCESS_INFO),
+                                           &tmp_info,
+                                           sizeof(void *) * m_tot_parts,
+                                           NullS)))
+    goto error_bulk_malloc;
+  bulk_access_info->info = tmp_info;
+  bulk_access_info->next = NULL;
+  bulk_access_info->used = FALSE;
+  DBUG_RETURN(bulk_access_info);
+
+error_bulk_malloc:
+  DBUG_RETURN(NULL);
+}
+
+
+/**
+  Free a bulk access request info structure
+
+  SYNOPSIS
+    delete_bulk_access_info()
+
+  RETURN VALUE
+    NONE
+*/
+
+void ha_partition::delete_bulk_access_info(
+  PARTITION_BULK_ACCESS_INFO *bulk_access_info)
+{
+  DBUG_ENTER("ha_partition::delete_bulk_access_info");
+  DBUG_PRINT("info", ("partition this=%p", this));
+  my_free(bulk_access_info);
+  DBUG_VOID_RETURN;
+}
+
+
 struct st_mysql_storage_engine partition_storage_engine=
 { MYSQL_HANDLERTON_INTERFACE_VERSION };
 
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index be98eaafb01..0159e78030a 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -31,6 +31,16 @@ enum partition_keywords
 #define PARTITION_BYTES_IN_POS 2
 
 
+typedef struct st_partition_bulk_access_info
+{
+  uint                          sequence_num;
+  bool                          used;
+  bool                          called;
+  void                          **info;
+  st_partition_bulk_access_info *next;
+} PARTITION_BULK_ACCESS_INFO;
+
+
 /** Struct used for partition_name_hash */
 typedef struct st_part_name_def
 {
@@ -294,6 +304,14 @@ class ha_partition :public handler
   enum_monotonicity_info m_part_func_monotonicity_info;
   bool                m_pre_calling;
   bool                m_pre_call_use_parallel;
+  /* Keep track of bulk access requests */
+  bool                bulk_access_started;
+  bool                bulk_access_executing;
+  bool                bulk_access_pre_called;
+  PARTITION_BULK_ACCESS_INFO *bulk_access_info_first;
+  PARTITION_BULK_ACCESS_INFO *bulk_access_info_current;
+  PARTITION_BULK_ACCESS_INFO *bulk_access_info_exec_tgt;
+  MY_BITMAP           bulk_access_exec_bitmap;
   /** keep track of locked partitions */
   MY_BITMAP m_locked_partitions;
   /** Stores shared auto_increment etc. */
@@ -460,6 +478,7 @@ class ha_partition :public handler
     and these go directly to the handlers supporting transactions
     -------------------------------------------------------------------------
   */
+  virtual int additional_lock(THD *thd, enum thr_lock_type lock_type);
   virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
 				     enum thr_lock_type lock_type);
   virtual int external_lock(THD * thd, int lock_type);
@@ -517,6 +536,7 @@ class ha_partition :public handler
     start_bulk_insert and end_bulk_insert is called before and after a
     number of calls to write_row.
   */
+  virtual int pre_write_row(uchar *buf);
   virtual int write_row(uchar * buf);
   virtual int update_row(const uchar * old_data, uchar * new_data);
   virtual int delete_row(const uchar * buf);
@@ -574,7 +594,9 @@ class ha_partition :public handler
     position it to the start of the table, no need to deallocate
     and allocate it again
   */
+  virtual int pre_rnd_init(bool scan);
   virtual int rnd_init(bool scan);
+  virtual int pre_rnd_end();
   virtual int rnd_end();
   virtual int rnd_next(uchar * buf);
   virtual int rnd_pos(uchar * buf, uchar * pos);
@@ -613,10 +635,25 @@ class ha_partition :public handler
     index_init initializes an index before using it and index_end does
     any end processing needed.
   */
+  virtual int pre_index_read_map(const uchar *key,
+                                 key_part_map keypart_map,
+                                 enum ha_rkey_function find_flag,
+                                 bool use_parallel);
+  virtual int pre_index_first(bool use_parallel);
+  virtual int pre_index_last(bool use_parallel);
+  virtual int pre_multi_range_read_next(bool use_parallel);
+  virtual int pre_read_range_first(const key_range *start_key,
+                                   const key_range *end_key,
+                                   bool eq_range, bool sorted,
+                                   bool use_parallel);
+  virtual int pre_ft_read(bool use_parallel);
+  virtual int pre_rnd_next(bool use_parallel);
   virtual int index_read_map(uchar * buf, const uchar * key,
                              key_part_map keypart_map,
                              enum ha_rkey_function find_flag);
+  virtual int pre_index_init(uint idx, bool sorted);
   virtual int index_init(uint idx, bool sorted);
+  virtual int pre_index_end();
   virtual int index_end();
 
   /**
@@ -796,6 +833,7 @@ class ha_partition :public handler
     The next method will never be called if you do not implement indexes.
   */
   virtual double read_time(uint index, uint ranges, ha_rows rows);
+  virtual void bulk_req_exec();
   /*
     For the given range how many records are estimated to be in this range.
     Used by optimiser to calculate cost of using a particular index.
@@ -1359,6 +1397,10 @@ class ha_partition :public handler
     return h;
   }
 
+  virtual PARTITION_BULK_ACCESS_INFO *create_bulk_access_info();
+  virtual void delete_bulk_access_info(
+    PARTITION_BULK_ACCESS_INFO *bulk_access_info);
+
   friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
 };
 #endif /* HA_PARTITION_INCLUDED */
diff --git a/sql/handler.h b/sql/handler.h
index 1fed7fc4754..ffaef39f329 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -43,6 +43,10 @@
 #include <keycache.h>
 #include <mysql/psi/mysql_table.h>
 
+#define INFO_KIND_BULK_ACCESS_BEGIN   105
+#define INFO_KIND_BULK_ACCESS_CURRENT 106
+#define INFO_KIND_BULK_ACCESS_END     107
+
 class Alter_info;
 class Virtual_column_info;
 
@@ -269,6 +273,7 @@ enum enum_alter_inplace_result {
 
 /* The following is for partition handler */
 #define HA_CAN_MULTISTEP_MERGE (1LL << 47)
+#define HA_CAN_BULK_ACCESS     (1LL << 48)
 
 /* bits in index_flags(index_number) for what you can do with index */
 #define HA_READ_NEXT            1       /* TODO really use this flag */
@@ -2664,7 +2669,9 @@ class handler :public Sql_alloc
   /** Length of ref (1-8 or the clustered key length) */
   uint ref_length;
   FT_INFO *ft_handler;
-  enum {NONE=0, INDEX, RND} inited;
+  enum init_stat { NONE=0, INDEX, RND };
+  init_stat inited;
+  init_stat pre_inited;
 
   const COND *pushed_cond;
   /**
@@ -2763,7 +2770,7 @@ class handler :public Sql_alloc
     key_used_on_scan(MAX_KEY),
     active_index(MAX_KEY),
     ref_length(sizeof(my_off_t)),
-    ft_handler(0), inited(NONE),
+    ft_handler(0), inited(NONE), pre_inited(NONE),
     pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
     tracker(NULL),
     pushed_idx_cond(NULL),
@@ -2782,6 +2789,7 @@ class handler :public Sql_alloc
   {
     DBUG_ASSERT(m_lock_type == F_UNLCK);
     DBUG_ASSERT(inited == NONE);
+    DBUG_ASSERT(pre_inited == NONE);
   }
   virtual handler *clone(const char *name, MEM_ROOT *mem_root);
   /** This is called after create to allow us to set up cached variables */
@@ -2806,6 +2814,15 @@ class handler :public Sql_alloc
     }
     DBUG_RETURN(result);
   }
+  int ha_pre_index_init(uint idx, bool sorted)
+  {
+    int result;
+    DBUG_ENTER("ha_pre_index_init");
+    DBUG_ASSERT(pre_inited==NONE);
+    if (!(result= pre_index_init(idx, sorted)))
+      pre_inited=INDEX;
+    DBUG_RETURN(result);
+  }
   int ha_index_end()
   {
     DBUG_ENTER("ha_index_end");
@@ -2815,6 +2832,13 @@ class handler :public Sql_alloc
     end_range=    NULL;
     DBUG_RETURN(index_end());
   }
+  int ha_pre_index_end()
+  {
+    DBUG_ENTER("ha_pre_index_end");
+    DBUG_ASSERT(pre_inited==INDEX);
+    pre_inited=NONE;
+    DBUG_RETURN(pre_index_end());
+  }
   /* This is called after index_init() if we need to do a index scan */
   virtual int prepare_index_scan() { return 0; }
   virtual int prepare_index_key_scan_map(const uchar * key, key_part_map keypart_map)
@@ -2837,6 +2861,14 @@ class handler :public Sql_alloc
     end_range= NULL;
     DBUG_RETURN(result);
   }
+  int ha_pre_rnd_init(bool scan)
+  {
+    int result;
+    DBUG_ENTER("ha_pre_rnd_init");
+    DBUG_ASSERT(pre_inited==NONE || (pre_inited==RND && scan));
+    pre_inited= (result= pre_rnd_init(scan)) ? NONE: RND;
+    DBUG_RETURN(result);
+  }
   int ha_rnd_end()
   {
     DBUG_ENTER("ha_rnd_end");
@@ -2845,6 +2877,13 @@ class handler :public Sql_alloc
     end_range= NULL;
     DBUG_RETURN(rnd_end());
   }
+  int ha_pre_rnd_end()
+  {
+    DBUG_ENTER("ha_pre_rnd_end");
+    DBUG_ASSERT(pre_inited==RND);
+    pre_inited=NONE;
+    DBUG_RETURN(pre_rnd_end());
+  }
   int ha_rnd_init_with_error(bool scan) __attribute__ ((warn_unused_result));
   int ha_reset();
   /* this is necessary in many places, e.g. in HANDLER command */
@@ -2852,6 +2891,12 @@ class handler :public Sql_alloc
   {
     return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0;
   }
+  int ha_pre_index_or_rnd_end()
+  {
+    return pre_inited == INDEX ? ha_pre_index_end()
+                               : pre_inited == RND ?
+                                 ha_pre_rnd_end() : 0;
+  }
   /**
     The cached_table_flags is set at ha_open and ha_external_lock
   */
@@ -2864,8 +2909,10 @@ class handler :public Sql_alloc
   */
   int ha_external_lock(THD *thd, int lock_type);
   int ha_write_row(uchar * buf);
+  int ha_pre_write_row(uchar * buf) { return pre_write_row(buf); }
   int ha_update_row(const uchar * old_data, uchar * new_data);
   int ha_delete_row(const uchar * buf);
+  virtual void bulk_req_exec() {}
   void ha_release_auto_increment();
 
   int check_collation_compatibility();
@@ -3100,17 +3147,6 @@ class handler :public Sql_alloc
    { return 0; }
   virtual int pre_index_last(bool use_parallel)
    { return 0; }
-  virtual int pre_index_read_last_map(const uchar *key,
-                                      key_part_map keypart_map,
-                                      bool use_parallel)
-   { return 0; }
-/*
-  virtual int pre_read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
-                                         KEY_MULTI_RANGE *ranges,
-                                         uint range_count,
-                                         bool sorted, HANDLER_BUFFER *buffer,
-                                         bool use_parallel);
-*/
   virtual int pre_multi_range_read_next(bool use_parallel)
   { return 0; }
   virtual int pre_read_range_first(const key_range *start_key,
@@ -3470,6 +3506,8 @@ class handler :public Sql_alloc
   virtual THR_LOCK_DATA **store_lock(THD *thd,
 				     THR_LOCK_DATA **to,
 				     enum thr_lock_type lock_type)=0;
+  virtual int additional_lock(THD *thd, enum thr_lock_type lock_type)
+  { return 0; }
 
   /** Type of table for caching query */
   virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; }
@@ -4025,7 +4063,9 @@ class handler :public Sql_alloc
   virtual int open(const char *name, int mode, uint test_if_locked)=0;
   /* Note: ha_index_read_idx_map() may bypass index_init() */
   virtual int index_init(uint idx, bool sorted) { return 0; }
+  virtual int pre_index_init(uint idx, bool sorted) { return 0; }
   virtual int index_end() { return 0; }
+  virtual int pre_index_end() { return 0; }
   /**
     rnd_init() can be called two times without rnd_end() in between
     (it only makes sense if scan=1).
@@ -4034,11 +4074,17 @@ class handler :public Sql_alloc
     to the start of the table, no need to deallocate and allocate it again
   */
   virtual int rnd_init(bool scan)= 0;
+  virtual int pre_rnd_init(bool scan) { return 0; }
   virtual int rnd_end() { return 0; }
+  virtual int pre_rnd_end() { return 0; }
   virtual int write_row(uchar *buf __attribute__((unused)))
   {
     return HA_ERR_WRONG_COMMAND;
   }
+  virtual int pre_write_row(uchar *buf __attribute__((unused)))
+  {
+    return HA_ERR_WRONG_COMMAND;
+  }
 
   /**
     Update a single row.



More information about the commits mailing list