Hi, Nikita,

This is a review of all the three commits

On Jul 20, Nikita Malyavin wrote:
> revision-id: ca64ddcc709 (mariadb-11.0.1-144-gca64ddcc709)
> parent(s): f34c7419cb8
> author: Nikita Malyavin
> committer: Nikita Malyavin
> timestamp: 2023-07-19 02:31:32 +0400
> message:
> 
> MDEV-31646 Online alter applies binlog cache limit to cache writes

> diff --git a/sql/log.cc b/sql/log.cc
> index afd0643fe82..7466d63d325 100644
> --- a/sql/log.cc
> +++ b/sql/log.cc
> @@ -2291,8 +2294,14 @@ int binlog_log_row_online_alter(TABLE* table, const 
> uchar *before_record,
>                           before_record, after_record);
>  
>    table->rpl_write_set= old_rpl_write_set;
> +  thd->pop_internal_handler();
> +
> +  if (unlikely(error))
> +    push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DISK_FULL,
> +                        "Broken online alter log. "
> +                        "ALTER TABLE will finish with error.");

I don't think so. Why would a DML statement get a warning about the
concurrently running ALTER TABLE? It didn't do anything wrong, I don't think
there should be a warning here

Like, someone is running a series of DML statements, or the same statement
many times. At some point another connection runs ALTER TABLE
and suddenly those perfectly good DML statements start spewing out
warnings. ALTER ends and all is clear again - looks wrong to me

>  
> -  return unlikely(error) ? HA_ERR_RBR_LOGGING_FAILED : 0;
> +  return 0;
>  }
>  
>  static void
> @@ -2592,7 +2601,7 @@ void Event_log::set_write_error(THD *thd, bool 
> is_transactional)
>    DBUG_VOID_RETURN;
>  }
>  
> -bool Event_log::check_write_error(THD *thd)
> +bool MYSQL_BIN_LOG::check_write_error(THD *thd)

What's the difference? it's a static method anyway, FWIW it
could be outside of any class. What's the point of moving it from one class
to another?

>  {
>    DBUG_ENTER("MYSQL_BIN_LOG::check_write_error");
>  
> @@ -3806,9 +3815,9 @@ bool MYSQL_BIN_LOG::open_index_file(const char 
> *index_file_name_arg,
>  }
>  
>  
> -bool Event_log::open(enum cache_type io_cache_type_arg)
> +bool Event_log::open(enum cache_type io_cache_type_arg, size_t buffer_size)
>  {
> -  bool error= init_io_cache(&log_file, -1, LOG_BIN_IO_SIZE, 
> io_cache_type_arg,
> +  bool error= init_io_cache(&log_file, -1, buffer_size, io_cache_type_arg,

why? it's only used by online alter, where would you need
a different buffer size? only in your DBUG_EXECUTE_IF?

>                              0, 0, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL));
>  
>    log_state= LOG_OPENED;
> @@ -6544,7 +6562,8 @@ Event_log::flush_and_set_pending_rows_event(THD *thd, 
> Rows_log_event* event,
>      if (writer.write(pending))
>      {
>        set_write_error(thd, is_transactional);
> -      if (check_write_error(thd) && cache_data &&
> +      if (dynamic_cast<MYSQL_BIN_LOG*>(this) &&

why?

> +          MYSQL_BIN_LOG::check_write_error(thd) && cache_data &&
>            stmt_has_updated_non_trans_table(thd))
>          cache_data->set_incident();
>        delete pending;
> @@ -7721,8 +7749,11 @@ static int binlog_online_alter_end_trans(THD *thd, 
> bool all, bool commit)
>        {
>          DBUG_ASSERT(cache.cache_log.type != READ_CACHE);
>          mysql_mutex_lock(binlog->get_log_lock());
> -        error= binlog->write_cache(thd, &cache.cache_log);
> +        error= binlog->write_cache_raw(thd, &cache.cache_log);
>          mysql_mutex_unlock(binlog->get_log_lock());
> +
> +        if (unlikely(error))
> +          binlog->set_write_error(my_errno);

shouldn't binlog->write_cache() call set_write_error() internally?
why would the caller have to do it?

>        }
>      }
>      else if (!commit) // rollback
> @@ -7736,14 +7767,14 @@ static int binlog_online_alter_end_trans(THD *thd, 
> bool all, bool commit)
>        cache.store_prev_position();
>      }
>  
> +    thd->pop_internal_handler();
>  
>      if (error)
>      {
> -      my_error(ER_ERROR_ON_WRITE, MYF(ME_ERROR_LOG),
> -               binlog->get_name(), errno);
> -      binlog_online_alter_cleanup(thd->online_alter_cache_list,
> -                                  is_ending_transaction);
> -      DBUG_RETURN(error);
> +      push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN,
> +                          ER_ERROR_ON_WRITE, ER_THD(thd, ER_ERROR_ON_WRITE),
> +                          binlog->get_name(), errno);
> +      DBUG_ASSERT(binlog->get_write_error());

same comment as above about not showing ALTER problems in concurrent threads

>      }
>    }
>  
> diff --git a/sql/log.h b/sql/log.h
> index 83e323cb6fb..39512b01991 100644
> --- a/sql/log.h
> +++ b/sql/log.h
> @@ -444,17 +445,20 @@ class Event_log: public MYSQL_LOG
>   */
>  class Cache_flip_event_log: public Event_log {
>    IO_CACHE alt_buf;
> +IF_DBUG(public:,)

yuck
I don't see you using online_write_error or ref_count
in DBUG macros

>    IO_CACHE *current, *alt;
>    std::atomic<uint> ref_count;
> +  std::atomic<int> online_write_error;
>  public:
>  
>    Cache_flip_event_log() : Event_log(), alt_buf{},
> -                           current(&log_file), alt(&alt_buf), ref_count(1) {}
> -  bool open(enum cache_type io_cache_type_arg)
> +                           current(&log_file), alt(&alt_buf), ref_count(1),
> +                           online_write_error(0) {}
> +  bool open(size_t buffer_size)

looks like Cache_flip_event_log::open() doesn't need
any arguments at all

>    {
>      log_file.dir= mysql_tmpdir;
>      alt_buf.dir= log_file.dir;
> -    bool res= Event_log::open(io_cache_type_arg);
> +    bool res= Event_log::open(WRITE_CACHE);
>      if (res)
>        return res;
>  
> diff --git a/sql/log_event.cc b/sql/log_event.cc
> index 10180a95d99..0c4c284a40f 100644
> --- a/sql/log_event.cc
> +++ b/sql/log_event.cc
> @@ -761,16 +761,21 @@ Log_event::Log_event(const uchar *buf,
>  
>  int Log_event::read_log_event(IO_CACHE* file, String* packet,
>                                const Format_description_log_event *fdle,
> -                              enum enum_binlog_checksum_alg checksum_alg_arg)
> +                              enum enum_binlog_checksum_alg checksum_alg_arg,
> +                              size_t max_allowed_packet)
>  {
>    ulong data_len;
>    char buf[LOG_EVENT_MINIMAL_HEADER_LEN];
>    uchar ev_offset= packet->length();
>  #if !defined(MYSQL_CLIENT)
> -  THD *thd=current_thd;
> -  ulong max_allowed_packet= thd ? thd->slave_thread ? 
> slave_max_allowed_packet
> -                                                    : 
> thd->variables.max_allowed_packet
> -                                : ~(uint)0;
> +  if (max_allowed_packet == 0)

may be better pass THD as an argument?
or even correct value of max_allowed_packet. As

  ulong max_allowed_packet(THD *thd)
  { return thd ? ... : ~(uint)0; }

on the second thought, do you need to ignore max_allowed_packet
here for online ALTER? Is there no event size limit when the
event is written into binlog?

> +  {
> +    THD *thd=current_thd;
> +    max_allowed_packet= thd ? thd->slave_thread
> +                              ? slave_max_allowed_packet
> +                              : thd->variables.max_allowed_packet
> +                            : ~(uint)0;
> +  }
>  #endif
>    DBUG_ENTER("Log_event::read_log_event(IO_CACHE*,String*...)");
>  

Regards,
Sergei
VP of MariaDB Server Engineering
and secur...@mariadb.org
_______________________________________________
developers mailing list -- developers@lists.mariadb.org
To unsubscribe send an email to developers-le...@lists.mariadb.org

Reply via email to