Current Path : /usr/opt/mysql57/mysql-test/suite/group_replication/t/ |
FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64 |
Current File : //usr/opt/mysql57/mysql-test/suite/group_replication/t/gr_recovery_channel.test |
################################################################################ # This test validates the behavior of the group replication recovery channel # against common slave commands like START/STOP/RESET SLAVE. # # Test: # 0. The test requires three servers: M1, M2 and M3. # 1. Bootstrap start group replication on M2. Add some data for recovery. # Start M1. # 2. Stop replication on M1. On M2 lock a table to block recovery. Start M1. # 3. Execute a STOP SLAVE command and see the recovery channel still active. # 4. Execute a RESET SLAVE ALL command and see the recovery channel is not # affected on M1 when GR node is active. # 5. Kill the server M1. The recovery channel should still be there but it's # not started. Recreate the slave channel to test that it started # 6. Clean the created channels and tables. ################################################################################ # This test does crashes servers, thence we skip it on asan and valgrind. --source include/not_asan.inc --source include/not_valgrind.inc --source include/big_test.inc --source include/force_restart.inc --source ../inc/have_group_replication_plugin.inc --let $rpl_skip_group_replication_start= 1 --let $rpl_server_count= 3 --source ../inc/group_replication.inc # # Phase 1: Start group replication on two servers # --connection server2 --echo server2 --source ../inc/start_and_bootstrap_group_replication.inc # Add some data for recovery CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); --connection server1 --echo server1 --source include/start_group_replication.inc --let $member1_uuid= query_get_value(SELECT @@SERVER_UUID, @@SERVER_UUID, 1) --connection server2 --let $group_replication_number_of_members= 2 --source ../inc/gr_wait_for_number_of_members.inc # # Phase 2: Stop replication on server 1 # On server 2 lock a table to block recovery # --connection server1 --source include/stop_group_replication.inc --connection server2 INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (3); #lock the table in a different connection --connection server_1 LOCK TABLE t1 READ; --connection server1 --let $group_replication_start_member_state= RECOVERING --source include/start_group_replication.inc # # Phase 3: Execute a STOP SLAVE command and see the channel still active. # let $wait_condition= SELECT COUNT(*)=1 FROM performance_schema.replication_connection_status where CHANNEL_NAME="group_replication_recovery" and SERVICE_STATE="ON"; --source include/wait_condition.inc STOP SLAVE; --let $assert_text= 'The group replication recovery channel is ON' --let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="group_replication_recovery" and SERVICE_STATE="ON", count, 1] = 1 --source include/assert.inc # # Phase 4: Execute a RESET SLAVE ALL command and see the recovery channel is # not affected as GR node is active. # --connection server1 SET GLOBAL SUPER_READ_ONLY=0; # Bug#22097534 --replace_result $SERVER_MYPORT_3 SERVER_3_PORT --eval CHANGE MASTER TO MASTER_HOST="127.0.0.1", MASTER_USER="root", MASTER_PASSWORD="", MASTER_PORT=$SERVER_MYPORT_3, MASTER_AUTO_POSITION=1 FOR CHANNEL "channel_1" --let $assert_text= 'The slave channel is present' --let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="channel_1", count, 1] = 1 --source include/assert.inc --let $datadir_1= `SELECT @@GLOBAL.datadir` --let $recovery_relay_log_file= `SELECT CONCAT('$datadir_1', 'mgr-group_replication_recovery.000001')` --file_exists $recovery_relay_log_file --let $recovery_relay_log_index= `SELECT CONCAT('$datadir_1', 'mgr-group_replication_recovery.index')` --file_exists $recovery_relay_log_index --error ER_SLAVE_CHANNEL_OPERATION_NOT_ALLOWED RESET SLAVE ALL; --let $assert_text= 'The group replication recovery channel is present' --let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_recovery", count, 1] = 1 --source include/assert.inc --let $assert_text= 'The group replication recovery channel is ON' --let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="group_replication_recovery" and SERVICE_STATE="ON", count, 1] = 1 --source include/assert.inc --file_exists $recovery_relay_log_file --file_exists $recovery_relay_log_index --let $assert_text= 'The slave channel is not present' --let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="channel_1", count, 1] = 0 --source include/assert.inc # # Phase 5: Kill the server # The recovery channel should still be there but it's not started # Recreate the slave channel to test that it started # --connection server1 --replace_result $SERVER_MYPORT_3 SERVER_3_PORT --eval CHANGE MASTER TO MASTER_HOST="127.0.0.1", MASTER_USER="root", MASTER_PASSWORD="", MASTER_PORT=$SERVER_MYPORT_3, MASTER_AUTO_POSITION=1 FOR CHANNEL "channel_1" --source include/start_slave.inc --let $assert_text= 'The created slave channel is ON' --let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="channel_1" and SERVICE_STATE="ON", count, 1] = 1 --source include/assert.inc --let $_group_replication_local_address= `SELECT @@GLOBAL.group_replication_local_address` --let $_group_replication_group_seeds= `SELECT @@GLOBAL.group_replication_group_seeds` --let $_group_replication_group_whitelist= `SELECT @@GLOBAL.group_replication_ip_whitelist` # kill server 1 --let $restart_parameters=restart:--group_replication_local_address=$_group_replication_local_address --group_replication_group_seeds=$_group_replication_group_seeds --group_replication_ip_whitelist=$_group_replication_group_whitelist --replace_result $_group_replication_local_address GROUP_REPLICATION_LOCAL_ADDRESS $_group_replication_group_seeds GROUP_REPLICATION_GROUP_SEEDS $_group_replication_group_whitelist GROUP_REPLICATION_GROUP_WHITELIST --source ../inc/kill_and_restart_mysqld.inc #Needed as we are not using rpl_restart_server.inc --let $rpl_server_number= 1 --source include/rpl_reconnect.inc # Wait until group has only 1 member, that is, server1 death is detected by # the group. --let $rpl_connection_name= server2 --source include/rpl_connection.inc --let $group_replication_member_state= UNREACHABLE --let $group_replication_member_id= $member1_uuid --source ../inc/gr_wait_for_member_state.inc # unblock the group, since server1 has gone away and # there were only two in the group - majority lost --let $local_address_server2= `SELECT @@GLOBAL.group_replication_local_address` --disable_query_log # Reseting the group to server2 only --eval SET GLOBAL group_replication_force_members= "$local_address_server2" --enable_query_log --let $group_replication_number_of_members= 1 --source ../inc/gr_wait_for_number_of_members.inc --let $rpl_connection_name= server1 --source include/rpl_connection.inc # Wait for the slave channel start --let $rpl_channel_name= channel_1 --source include/wait_for_slave_to_start.inc --let $rpl_channel_name= --let $assert_text= 'The group replication recovery channel is present' --let $assert_cond= [SELECT COUNT(*) AS count FROM mysql.slave_relay_log_info WHERE channel_name="group_replication_recovery", count, 1] = 1 --source include/assert.inc --let $assert_text= 'The group replication recovery channel is OFF' --let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="group_replication_recovery" and SERVICE_STATE="OFF", count, 1] = 1 --source include/assert.inc --let $assert_text= 'The created slave channel is ON' --let $assert_cond= [SELECT COUNT(*) AS count FROM performance_schema.replication_connection_status where CHANNEL_NAME="channel_1" and SERVICE_STATE="ON", count, 1] = 1 --source include/assert.inc # # Phase 6: Clean the created channels and tables # --connection server2 DROP TABLE t1; --connection server1 --source include/stop_slave.inc CHANGE MASTER TO MASTER_AUTO_POSITION=0 FOR CHANNEL "channel_1"; --source include/start_group_replication.inc --let $skip_restore_connection= 0 --source ../inc/group_replication_end.inc