[mysql]
auto-rehash
[mysqld]
####: for global
user ={{ mysql_user }} # mysql
basedir ={{ mysql_base_dir }} # /usr/local/mysql/
datadir ={{ mysql_data_dir_base }}/data/{{mysql_port}} # /usr/local/mysql/data
tmpdir ={{ mysql_data_dir_base }}/tmp/{{mysql_port}} # /tmp
server_id ={{ mysql_server_id }} # 0
port ={{ mysql_port }} # 3306
character_set_server ={{ mysql_character_set_server }} # latin1
log_bin_trust_function_creators =on # 0
max_prepared_stmt_count =1048576
log_timestamps =system # utc
read_only =0 # off
skip_name_resolve =1 # 0
auto_increment_increment =1 # 1
auto_increment_offset =1 # 1
lower_case_table_names =1 # 0
secure_file_priv = # null
open_files_limit =65536 # 1024
max_connections ={{mysql_max_connections | default(1000)}}
thread_cache_size ={{ [(server_specs_processor_count|int) * 8,256] | max}} # 9
table_open_cache =4096 # 2000
table_definition_cache =2000 # 1400
table_open_cache_instances =32 # 16
local_infile =off # on
disabled_storage_engines=archive,blackhole,example,federated,memory,merge,ndb
plugin_load = "rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so;validate_password=validate_password.so"
pid_file=mysqld.pid
transaction_isolation ={{ mysql_transaction_isolation }} #RR
{% if sock_type == 1 %}
socket = mysql.sock # /tmp/mysql.sock
{% elif sock_type == 2 %}
socket = /tmp/mysql{{ mysql_port }}.sock
{% elif sock_type == 3 %}
socket = {{ mysql_data_dir_base }}/run/mysql{{ mysql_port }}.sock
{% endif %}
####: for binlog
binlog_format ={{mysql_binlog_format}} # row
binlog_error_action =abort_server # abort_server
log_bin ={{ mysql_data_dir_base }}/log/binlog/{{mysql_port}}/mysql-bin # off
binlog_rows_query_log_events =on # off
log_slave_updates =on # off
expire_logs_days =8 # 0
{# set binlog cache size #}
{% if mysql_binlog_format == 'mixed' %}
binlog_cache_size =32768 # 32768(32k)
{% else %}
binlog_cache_size =65536 # 65536(64k)
{% endif %}
{# set binlog cache size #}
binlog_checksum =CRC32 # CRC32
sync_binlog =1 # 1
slave_preserve_commit_order =ON # OFF
####: for error-log
log_error =err.log # /usr/local/mysql/data/localhost.localdomain.err
{# set general log #}
general_log =off # off
general_log_file =general.log # hostname.log
{# set general log #}
####: for slow query log
slow_query_log =on # off
slow_query_log_file =slow.log # hostname-slow.log
log_queries_not_using_indexes =off # off
long_query_time =2.000000 # 10.000000
####: for gtid
gtid_executed_compression_period =1000 # 1000
gtid_mode =on # off
enforce_gtid_consistency =on # off
####: for replication
skip_slave_start =0 #
relay_log ={{ mysql_data_dir_base }}/log/relaylog/{{mysql_port}}/relay-bin
relay_log_recovery=1
master_info_repository =table # file
relay_log_info_repository =table # file
slave_parallel_type =logical_clock # database | LOGICAL_CLOCK
slave_parallel_workers ={{[(server_specs_processor_count |int) * 4 ,32] | min}} # 0
rpl_semi_sync_master_enabled =1 # 0
rpl_semi_sync_slave_enabled =1 # 0
rpl_semi_sync_master_timeout =1000 # 1000(1 second)
binlog_group_commit_sync_delay =500 # 0 500(0.05% seconde)
binlog_group_commit_sync_no_delay_count = 13 # 0
{% if use_write_set == 1 %}
binlog_transaction_dependency_tracking = WRITESET # COMMIT_ORDER
transaction_write_set_extraction = XXHASH64
{% endif %}
####: for innodb
default_storage_engine =innodb # innodb
default_tmp_storage_engine =innodb # innodb
innodb_data_file_path =ibdata1:64M:autoextend # ibdata1:12M:autoextend
innodb_temp_data_file_path =ibtmp1:12M:autoextend # ibtmp1:12M:autoextend
innodb_buffer_pool_filename =ib_buffer_pool # ib_buffer_pool
innodb_log_group_home_dir ={{ mysql_data_dir_base }}/log/redolog/{{mysql_port}} # ./
innodb_log_files_in_group ={{ mysql57_innodb_log_files_in_group }} # 2
innodb_log_file_size ={{ mysql57_innodb_log_file_size }} # 50331648(48M)
innodb_file_per_table =on # on
innodb_online_alter_log_max_size =128M # 134217728(128M)
innodb_open_files ={{mysql_innodb_open_files}} # 2000
innodb_page_size =16k # 16384(16k)
innodb_thread_concurrency =0 # 0
innodb_read_io_threads =4 # 4
innodb_write_io_threads =4 # 4
innodb_purge_threads =4 # 4(garbage collection)
innodb_page_cleaners =4 # 4(flush lru list)
innodb_print_all_deadlocks =on # off
innodb_deadlock_detect =on # on
innodb_lock_wait_timeout =50 # 50
innodb_spin_wait_delay =6 # 6
innodb_autoinc_lock_mode =2 # 1
innodb_io_capacity =200 # 200
innodb_io_capacity_max =2000 # 2000
#--------Persistent Optimizer Statistics
innodb_stats_auto_recalc =on # on
innodb_stats_persistent =on # on
innodb_stats_persistent_sample_pages =20 # 20
{# -- set innodb_buffer_pool_instances -- #}
{% if ((server_specs_memtotal_gb | int ) * 0.6 ) < 64 %}
innodb_buffer_pool_instances ={{ [ (((server_specs_memtotal_gb | int ) * 0.6) | round(0, 'ceil') | int ) , 1 ] | max }}
{% else %}
innodb_buffer_pool_instances =64
{% endif %}
{# -- set innodb_buffer_pool_instances -- #}
innodb_adaptive_hash_index =on # on
innodb_change_buffering =all # all
innodb_change_buffer_max_size =25 # 25
innodb_flush_neighbors =1 # 1
#innodb_flush_method = #
innodb_doublewrite =on # on
innodb_log_buffer_size ={{mysql_innodb_log_buffer_size}} # 16777216(16M)
innodb_flush_log_at_timeout =1 # 1
innodb_flush_log_at_trx_commit =1 # 1
innodb_buffer_pool_size ={{ ((server_specs_memtotal_gb | int ) * 1024 * 0.6 // 128 * 128 ) | round(0, 'ceil') | int }}M # 134217728(128M)
autocommit =1 # 1
#--------innodb scan resistant
innodb_old_blocks_pct =37 # 37
innodb_old_blocks_time =1000 # 1000
#--------innodb read ahead
innodb_read_ahead_threshold =56 # 56 (0..64)
innodb_random_read_ahead =OFF # OFF
#--------innodb buffer pool state
innodb_buffer_pool_dump_pct =25 # 25
innodb_buffer_pool_dump_at_shutdown =ON # ON
innodb_buffer_pool_load_at_startup =ON # ON
#--------undo log
innodb_undo_log_truncate = 1 # 0
innodb_max_undo_log_size = 2G # 1G
#innodb_undo_logs = 128 # 128
innodb_undo_tablespaces = 2 # 0
#--------disable QC
query_cache_size = 0
query_cache_type = 0
#--------
slave_rows_search_algorithms = 'INDEX_SCAN,HASH_SCAN'
report_host = {{ ansible_default_ipv4.address }}
default_time_zone = {{ mysql_default_time_zone }}
{% if mtls_with_mysql_group_replication == 1 %}
####: for mysql group replication
loose-group_replication_recovery_retry_count =10 # 10
loose-group_replication_recovery_reconnect_interval =60 # 60
loose-group_replication_allow_local_disjoint_gtids_join=off # off
loose-group_replication_allow_local_lower_version_join=off # off
loose-group_replication_ip_whitelist =AUTOMATIC # AUTOMATIC
loose-transaction_write_set_extraction =XXHASH64 # off
loose-group_replication_group_name ="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" #
loose-group_replication_start_on_boot =off # off
loose-group_replication_bootstrap_group =off # off
loose-group_replication_single_primary_mode =on # on
loose-group_replication_enforce_update_everywhere_checks=off
loose-group_replication_gtid_assignment_block_size =1000000 # 1000000
loose-group_replication_poll_spin_loops =0 # 0
loose-group_replication_compression_threshold =1024 # 1000000
loose-group_replication_flow_control_mode =QUOTA # QUOTA
{% for ip in ansible_all_ipv4_addresses %}
{%- for host_ip in mysql_mgr_hosts %}
{%- if ip == host_ip %}
loose-group_replication_local_address ="{{ip}}:{{mysql_mgr_port}}"
{%- endif %}
{%- endfor %}
{% endfor %}
{% set gs = ':' + (mysql_mgr_port | string)+',' %}
loose-group_replication_group_seeds ="{{ mysql_mgr_hosts | join(gs) + ':' + (mysql_mgr_port | string) }}"
{% endif %}
#### for performance_schema
performance_schema =on # on
performance_schema_consumer_global_instrumentation =on # on
performance_schema_consumer_thread_instrumentation =on # on
performance_schema_consumer_events_stages_current =on # off
performance_schema_consumer_events_stages_history =on # off
performance_schema_consumer_events_stages_history_long =off # off
performance_schema_consumer_statements_digest =on # on
performance_schema_consumer_events_statements_current =on # on
performance_schema_consumer_events_statements_history =on # on
performance_schema_consumer_events_statements_history_long =off # off
performance_schema_consumer_events_waits_current =on # off
performance_schema_consumer_events_waits_history =on # off
performance_schema_consumer_events_waits_history_long =off # off
performance-schema-instrument ='memory/%=COUNTED'
[dbops: dbops 专注于解决开源数据库各种部署问题,第一期只支持 MySQL,第二期会支持 openGauss (gitee.com)](https://gitee.com/fanderchan/dbops)
网友评论