[Commits] Rev 2880: Added ha_write_tmp_row() for slightly faster write_row for internal temp tables. in http://bazaar.launchpad.net/~maria-captains/maria/5.3/

serg at askmonty.org serg at askmonty.org
Fri Jan 14 12:58:48 EET 2011


At http://bazaar.launchpad.net/~maria-captains/maria/5.3/

------------------------------------------------------------
revno: 2880
revision-id: sergii at pisem.net-20110114105845-ol0sfi01g52sw8y3
parent: sergii at pisem.net-20110114105439-ecmz8u2qrgwe10bm
committer: Sergei Golubchik <sergii at pisem.net>
branch nick: 5.3-monty
timestamp: Fri 2011-01-14 11:58:45 +0100
message:
  Added ha_write_tmp_row() for slightly faster write_row for internal temp tables.
  This will also enable us in the future to collect statistics for
  writes to internal tmp tables.
  
  sql/handler.h:
    Added ha_write_tmp_row()
  sql/opt_subselect.cc:
    ha_write_row -> ha_write_tmp_row
  sql/sql_class.h:
    Added ha_write_tmp_row()
  sql/sql_select.cc:
    ha_write_row -> ha_write_tmp_row
-------------- next part --------------
=== modified file 'sql/handler.h'
--- a/sql/handler.h	2010-12-13 17:01:32 +0000
+++ b/sql/handler.h	2011-01-14 10:58:45 +0000
@@ -2415,6 +2415,7 @@ class handler :public Sql_alloc
   /* XXX to be removed, see ha_partition::partition_ht() */
   virtual handlerton *partition_ht() const
   { return ht; }
+  inline int ha_write_tmp_row(uchar *buf);
 };
 
 #include "multi_range_read.h"

=== modified file 'sql/opt_subselect.cc'
--- a/sql/opt_subselect.cc	2011-01-14 10:31:09 +0000
+++ b/sql/opt_subselect.cc	2011-01-14 10:58:45 +0000
@@ -2907,7 +2907,7 @@ int do_sj_dups_weedout(THD *thd, SJ_TMP_
     }
   }
 
-  error= sjtbl->tmp_table->file->ha_write_row(sjtbl->tmp_table->record[0]);
+  error= sjtbl->tmp_table->file->ha_write_tmp_row(sjtbl->tmp_table->record[0]);
   if (error)
   {
     /* create_internal_tmp_table_from_heap will generate error if needed */

=== modified file 'sql/sql_class.h'
--- a/sql/sql_class.h	2010-12-27 22:22:05 +0000
+++ b/sql/sql_class.h	2011-01-14 10:58:45 +0000
@@ -3639,5 +3639,10 @@ inline int handler::ha_read_first_row(uc
   return error;
 }
 
+inline int handler::ha_write_tmp_row(uchar *buf)
+{
+  increment_statistics(&SSV::ha_write_count);
+  return write_row(buf);
+}
 
 #endif /* MYSQL_SERVER */

=== modified file 'sql/sql_select.cc'
--- a/sql/sql_select.cc	2011-01-14 10:54:39 +0000
+++ b/sql/sql_select.cc	2011-01-14 10:58:45 +0000
@@ -7485,7 +7485,7 @@ end_sj_materialize(JOIN *join, JOIN_TAB 
     fill_record(thd, table->field, sjm->sjm_table_cols, TRUE, FALSE);
     if (thd->is_error())
       DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
-    if ((error= table->file->ha_write_row(table->record[0])))
+    if ((error= table->file->ha_write_tmp_row(table->record[0])))
     {
       /* create_myisam_from_heap will generate error if needed */
       if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
@@ -12757,13 +12757,13 @@ create_internal_tmp_table_from_heap2(THD
   */
   while (!table->file->ha_rnd_next(new_table.record[1]))
   {
-    write_err= new_table.file->ha_write_row(new_table.record[1]);
+    write_err= new_table.file->ha_write_tmp_row(new_table.record[1]);
     DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
     if (write_err)
       goto err;
   }
   /* copy row that filled HEAP table */
-  if ((write_err=new_table.file->ha_write_row(table->record[0])))
+  if ((write_err=new_table.file->ha_write_tmp_row(table->record[0])))
   {
     if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
         !ignore_last_dupp_key_error)
@@ -14640,7 +14640,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab
     {
       int error;
       join->found_records++;
-      if ((error= table->file->ha_write_row(table->record[0])))
+      if ((error= table->file->ha_write_tmp_row(table->record[0])))
       {
         if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
           goto end;
@@ -14729,7 +14729,7 @@ end_update(JOIN *join, JOIN_TAB *join_ta
   init_tmptable_sum_functions(join->sum_funcs);
   if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
     DBUG_RETURN(NESTED_LOOP_ERROR);           /* purecov: inspected */
-  if ((error= table->file->ha_write_row(table->record[0])))
+  if ((error= table->file->ha_write_tmp_row(table->record[0])))
   {
     if (create_internal_tmp_table_from_heap(join->thd, table,
                                             join->tmp_table_param.start_recinfo,
@@ -14772,7 +14772,7 @@ end_unique_update(JOIN *join, JOIN_TAB *
   if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
     DBUG_RETURN(NESTED_LOOP_ERROR);           /* purecov: inspected */
 
-  if (!(error= table->file->ha_write_row(table->record[0])))
+  if (!(error= table->file->ha_write_tmp_row(table->record[0])))
     join->send_records++;                       // New group
   else
   {
@@ -14832,7 +14832,7 @@ end_write_group(JOIN *join, JOIN_TAB *jo
                        join->sum_funcs_end[send_group_parts]);
         if (!join->having || join->having->val_int())
         {
-          int error= table->file->ha_write_row(table->record[0]);
+          int error= table->file->ha_write_tmp_row(table->record[0]);
           if (error && 
               create_internal_tmp_table_from_heap(join->thd, table,
                                                   join->tmp_table_param.start_recinfo,
@@ -18507,7 +18507,7 @@ int JOIN::rollup_write_data(uint idx, TA
           item->save_in_result_field(1);
       }
       copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
-      if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
+      if ((write_error= table_arg->file->ha_write_tmp_row(table_arg->record[0])))
       {
         if (create_internal_tmp_table_from_heap(thd, table_arg, 
                                                 tmp_table_param.start_recinfo,



More information about the commits mailing list