config root man

Current Path : /home/usr.opt/mysql57/mysql-test/suite/ndb/t/

FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64
Upload File :
Current File : //home/usr.opt/mysql57/mysql-test/suite/ndb/t/ndbinfo.test

# The include statement below is a temp one for tests that are yet to
#be ported to run with InnoDB,
#but needs to be kept for tests that would need MyISAM in future.
--source include/force_myisam_default.inc

--result_format 2
--source include/have_ndb.inc

SELECT PLUGIN_NAME,PLUGIN_VERSION,PLUGIN_STATUS,PLUGIN_TYPE,
       PLUGIN_LIBRARY,PLUGIN_LIBRARY_VERSION,PLUGIN_AUTHOR,PLUGIN_DESCRIPTION
FROM information_schema.plugins WHERE PLUGIN_NAME = 'ndbinfo';

## Creation of temporary tables should not be supported by NDBINFO engine
--error ER_ILLEGAL_HA_CREATE_OPTION
CREATE TEMPORARY TABLE `t1` (
  `dummy` INT UNSIGNED
) ENGINE=NDBINFO;

USE ndbinfo;

# Current usage may vary. Therefor the test only checks that if falls in the 
# <0%,5%> range.
SELECT node_id, memory_type, used>0 AND used < total/20 AS used_ok, 
  used_pages>0 AND used_pages < total_pages/20 AS used_pages_ok, 
  total, total_pages 
  FROM ndbinfo.memoryusage WHERE memory_type="Long message buffer"
  ORDER BY node_id;

# Current usage may vary. Therefor the test only checks that if falls in the 
# <0%,5%> range.
SELECT node_id, block_number, block_instance, pool_name, 
  used>0 AND used < total/20 AS used_ok, 
  total, high>=used AND high < total/20 AS high_ok, entry_size, config_param1, 
  config_param2, config_param3, config_param4  
  FROM ndbinfo.ndb$pools WHERE pool_name="Long message buffer"
  ORDER BY node_id;

SHOW CREATE TABLE ndb$tables;

SELECT * FROM ndb$tables;
SELECT COUNT(*) FROM ndb$tables;
SELECT * FROM ndb$tables WHERE table_id = 2;
SELECT * FROM ndb$tables WHERE table_id > 5;
SELECT * FROM ndb$tables WHERE table_name = 'LOGDESTINATION';
SELECT COUNT(*) FROM ndb$tables t1, ndb$tables t2 WHERE t1.table_id = t1.table_id;

SELECT table_id, table_name, comment from ndb$tables
  WHERE table_id > 2 AND table_id <= 5 ORDER BY table_id;
SELECT table_id FROM ndb$tables  WHERE table_id = 2 ORDER BY table_name;
SELECT table_id, table_name FROM ndb$tables ORDER BY table_name;

SELECT table_id, column_id, column_name FROM ndb$columns LIMIT 7;

--error ER_OPEN_AS_READONLY
UPDATE ndb$tables SET table_id=2 WHERE table_id=3;

--error ER_OPEN_AS_READONLY
UPDATE ndb$tables SET table_id=9 WHERE 1=0;

--error ER_OPEN_AS_READONLY
UPDATE ndb$tables SET table_id=9 WHERE table_id > 1;

--error ER_OPEN_AS_READONLY
DELETE FROM ndb$tables WHERE table_id=3;

--error ER_OPEN_AS_READONLY
DELETE FROM ndb$tables WHERE 1=0;

--error ER_OPEN_AS_READONLY
DELETE FROM ndb$tables WHERE table_id > 1;

--error ER_ILLEGAL_HA
ALTER TABLE ndb$test ADD COLUMN another_col varchar(255);

FLUSH TABLES;
SELECT table_id FROM ndb$tables;

--error ER_OPEN_AS_READONLY
TRUNCATE ndb$tables;

## Variables and status
SHOW GLOBAL STATUS LIKE 'ndbinfo\_%';
let $current_version = `select @@ndbinfo_version`;
--replace_result $current_version NDB_VERSION_D
SHOW GLOBAL VARIABLES LIKE 'ndbinfo\_%';

SELECT counter, HEX(counter2) FROM ndb$test LIMIT 10;

# All tables that contain data are hidden by default
# and becomes visible with ndbinfo_show_hidden
SHOW TABLES LIKE 'ndb$te%';
set @@ndbinfo_show_hidden=TRUE;
SHOW TABLES LIKE 'ndb$te%';
set @@ndbinfo_show_hidden=default;

# Check that ndbinfo_table_prefix is readonly
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
set @@ndbinfo_table_prefix="somethingelse";

# Check that ndbinfo_database is readonly
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
set @@ndbinfo_database="somethingelse";

# Check that block table has been created and contain data
SELECT count(*) >= 20 FROM blocks;

# Test incompatible table definition between NDB and MySQL Server
# using the ndb$test table which originally looks like
# CREATE TABLE `ndb$test` (
#   `node_id` int unsigned DEFAULT NULL,
#   `block_number` int unsigned DEFAULT NULL,
#   `block_instance` int unsigned DEFAULT NULL,
#   `counter` int unsigned DEFAULT NULL,
#   `counter2` bigint unsigned DEFAULT NULL
# ) ENGINE=NDBINFO COMMENT='for testing'

## 1) More columns in NDB -> allowed, with warning
DROP TABLE ndb$test;
CREATE TABLE ndb$test (node_id int unsigned) ENGINE = ndbinfo;
SELECT node_id != 0 FROM ndb$test LIMIT 1;
DROP TABLE ndb$test;

## 2) Column does not exist in NDB -> allowed, with warning, non existing
##    column(s) return NULL
## 2a) Extra column at end
CREATE TABLE ndb$test (node_id int, non_existing int) ENGINE = ndbinfo;
SELECT DISTINCT node_id, non_existing FROM ndb$test;
DROP TABLE ndb$test;

## 2b) Extra column(s) in middle
CREATE TABLE ndb$test (
  node_id int unsigned,
  non_existing int unsigned,
  block_number int unsigned,
  block_instance int unsigned,
  counter int unsigned,
  counter2 bigint unsigned
) ENGINE = ndbinfo;
SELECT DISTINCT node_id, non_existing, block_number FROM ndb$test;
DROP TABLE ndb$test;

## 2c) Extra column first
CREATE TABLE ndb$test (non_existing int, node_id int) ENGINE = ndbinfo;
SELECT DISTINCT node_id, non_existing FROM ndb$test;
SELECT DISTINCT non_existing, node_id FROM ndb$test;
DROP TABLE ndb$test;

## 3) Incompatible column type -> error, with warning
## 3a) int instead of bigint
CREATE TABLE ndb$test (counter2 int) ENGINE = ndbinfo;
--error ER_GET_ERRMSG
SELECT * FROM ndb$test;
SHOW WARNINGS;
## 3b) bigint instead of int
DROP TABLE ndb$test;
CREATE TABLE ndb$test (node_id bigint) ENGINE = ndbinfo;
--error ER_GET_ERRMSG
SELECT * FROM ndb$test;
SHOW WARNINGS;
## 3c) varchar instead of int
DROP TABLE ndb$test;
CREATE TABLE ndb$test (node_id varchar(255)) ENGINE = ndbinfo;
--error ER_GET_ERRMSG
SELECT * FROM ndb$test;
SHOW WARNINGS;
DROP TABLE ndb$test;
## 3d) column which is NOT NULL
CREATE TABLE ndb$test (node_id int unsigned NOT NULL) ENGINE = ndbinfo;
--error ER_GET_ERRMSG
SELECT * FROM ndb$test;
SHOW WARNINGS;
DROP TABLE ndb$test;
## 3e) non existing column which is NOT NULL
CREATE TABLE ndb$test (
  block_number int unsigned,
  non_existing int NOT NULL) ENGINE = ndbinfo;
--error ER_GET_ERRMSG
SELECT * FROM ndb$test;
SHOW WARNINGS;
DROP TABLE ndb$test;


## 4) Table with primary key/indexes not supported
--error ER_TOO_MANY_KEYS
CREATE TABLE ndb$test (node_id int, block_number int PRIMARY KEY) ENGINE = ndbinfo;

## 5) Table with blobs not supported
--error ER_TABLE_CANT_HANDLE_BLOB
CREATE TABLE ndb$test (node_id int, block_number blob) ENGINE = ndbinfo;

## 6) Table with autoincrement not supported
--error ER_TABLE_CANT_HANDLE_AUTO_INCREMENT
CREATE TABLE ndb$test (node_id int AUTO_INCREMENT) ENGINE = ndbinfo;

# wl#5567 - exercise table...
#   only node_id is guranteed to be same...
#   (unless we change cluster config for test)
# but this will anyway pull results from datanode...
#
--sorted_result
select distinct node_id
from ndbinfo.diskpagebuffer;


#
# BUG#11885602
# - It was allowed to CREATE TABLE which was not in NDB, but
#   creating a view on that table failed. Implement ndbinfo_offline
#   mode which allows tables to be created and opened although they
#   don't exists or have different table definition.
#   This is exactly the same behaviour as when NDBCLUSTER
#   is disabled
#

# Check ndbinfo_offline is GLOBAL variable
--error ER_GLOBAL_VARIABLE
set @@ndbinfo_offline=1;

# Query used to check that open tables are closed
# when offline mode is turned on and off
let $q1 = SELECT DISTINCT(node_id) FROM ndbinfo.counters ORDER BY node_id;
eval $q1;

# new views
desc threadblocks;
select distinct block_name from threadblocks order by 1;
desc threadstat;
select count(*) > 0 block_name from threadstat;

desc disk_write_speed_aggregate;
select count(*) from disk_write_speed_aggregate;

desc disk_write_speed_base;
select node_id from disk_write_speed_base group by node_id, thr_no;

select count(*) from disk_write_speed_aggregate_node;

desc restart_info;
select * from restart_info;

desc cluster_transactions;
desc server_transactions;
desc cluster_operations;
desc server_operations;

create table t1 (a int primary key) engine = ndb;
begin;
insert into t1 values (1);
select state, count_operations, outstanding_operations,
IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
  client_node_id
from server_transactions;
select node_id, operation_type, state,
IF(tc_node_id <= 48, "<tc_node_id>", "<incorrect nodeid>") tc_node_id,
IF(client_node_id <= 255, "<client_node_id>", "<incorrect node id>") 
  client_node_id
from server_operations
order by 1;

--sorted_result
select st.state, st.count_operations, st.outstanding_operations,
       so.node_id, so.state, so.operation_type
from server_transactions st,
     server_operations so
where st.transid = so.transid
  and so.tc_node_id = st.node_id
  and so.tc_block_instance = st.block_instance
  and so.client_node_id = st.client_node_id
  and so.client_block_ref = st.client_block_ref; 
rollback;
drop table t1;

#
select distinct resource_name
from resources
order by 1;

select distinct counter_name
from counters
order by 1;

# Turn on ndbinfo_offline
set @@global.ndbinfo_offline=TRUE;
select @@ndbinfo_offline;

CREATE TABLE ndb$does_not_exist_in_ndb(
  node_id int,
  message varchar(255)
) ENGINE = ndbinfo;

CREATE VIEW view_on_table_which_does_not_exist_in_ndb AS
  SELECT node_id, message
  FROM ndbinfo.ndb$does_not_exist_in_ndb;

SHOW CREATE TABLE ndb$does_not_exist_in_ndb;

# SELECTs return no rows in offline mode
SELECT * FROM view_on_table_which_does_not_exist_in_ndb;
SELECT * FROM ndb$does_not_exist_in_ndb;
eval $q1;

DROP VIEW view_on_table_which_does_not_exist_in_ndb;
DROP TABLE ndb$does_not_exist_in_ndb;

# Restore original value
set @@global.ndbinfo_offline = FALSE;

eval $q1;

#
# Bug#17018500 BOGUS DEFINER VALUE FOR NDBINFO VIEWS REPORTED
#              IN INFORMATION_SCHEMA.VIEW
#
--echo Verify there are no ndbinfo views without 'root@localhost' as definer
select table_schema, table_name, definer from information_schema.views
  where table_schema = 'ndbinfo' and
        definer != 'root@localhost';

# ################################
# Test memory_per_fragment table.
# ################################

# A helper table for populating t1.
create table numbers(
  i int
); 

insert into numbers values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);

create table t1(
  a int not null primary key,
  b int not null,
  c1 char(255),
  vc1 varchar(1024),
  tx1 text
)engine=ndbcluster;

# Popultate t1 with 1000 rows, including som sizable varsize data.
insert into t1 select x1.i+x2.i*10+x3.i*100, x1.i-x2.i*10-x3.i*100, repeat('x', 1+x1.i), repeat('y', 1000+x1.i), repeat('z', x1.i*100) from numbers as x1, numbers as x2, numbers as x3;

drop table numbers;

# Create some extra indexes to test different schema object types.
create index ix1 on t1(c1);
create unique index ix2 on t1(b) using hash;

# The table id of 't1' may vary from run to run. We need to make sure that the
# output from the subsequent tests does not depend on the numrical value of 
# that table id.
set @t1_id = (select cast(id as signed integer) from ndbinfo.ndb$dict_obj_info where fq_name REGEXP "def/t1$");


# Test the dict_obj_info table. Replace table id of t1 in 'fq_name' values that
# contain it (index and blob table names do), since the table id may vary 
# between runs.
select type, state, parent_obj_type, replace(fq_name, cast(@t1_id as char(10)), "<t1 id>") as fq_name, if(parent_obj_id=@t1_id, "child_of_t1", if(parent_obj_id=0, "root", "error!")) as  nesting from ndbinfo.ndb$dict_obj_info where id >= @t1_id and type <= 6 order by type, id;

# Disable query log while defining the view below, since view definition
# conatins the table id of t1, which may vary between runs.
disable_query_log;
let $t1_id = query_get_value(select @t1_id as id, id, 1);

# Create som views that we reuse after modifying t1 below.
# This one sums usage counts for all fragments of each table.
# Some values, such as fixed_elem_free_count and fixed_elem_count for ordered 
# indexes may vary depending on the number of fragments and other factors. 
# These are therefore not returned directly, but used in calculations that 
# should provide stable results.
# Observe that memory_per_fragment.fq_name, parent_fq_name, type are all
# functionally dependent on 'table_id', but since this cannot be deduced from
# the schema, they are all included in the 'group by' clause.
eval create view sum_per_tab as 
select replace(fq_name, cast($t1_id as char(10)), "<t1 id>") as fq_name, 
parent_fq_name, type, 
if (type="Ordered index", 
-(round(sum(fixed_elem_count)/165)), 
    sum(fixed_elem_count)) as fixed_elem_count,
max(fixed_elem_size_bytes) as fixed_elem_size_bytes,
round(sum(fixed_elem_alloc_bytes/fixed_elem_size_bytes)
  /sum(fixed_elem_count+fixed_elem_free_count),1) as fixed_elem_alloc_ok,
sum(fixed_elem_free_bytes/fixed_elem_size_bytes) = 
   sum(fixed_elem_free_count) as fixed_elem_free_bytes_ok,
sum(var_elem_count) as var_elem_count,
sum(var_elem_alloc_bytes) >= sum(var_elem_free_bytes) as var_bytes_ok
from ndbinfo.memory_per_fragment
where table_id >= $t1_id
group by table_id, memory_per_fragment.fq_name, parent_fq_name, type
order by table_id;

enable_query_log;


# Sum usage counts for all fragments of all tables.
create view frag_sum as 
select 
sum(fixed_elem_alloc_bytes) as fixed_elem_alloc_bytes,
sum(var_elem_alloc_bytes) as var_elem_alloc_bytes,
sum(hash_index_alloc_bytes) as hash_index_alloc_bytes
from ndbinfo.memory_per_fragment;

# Sum pool values across all block instances.
# 'Data memory' appears twice in 'pools' therefore a 'where' clause is needed.
create view pool_sum as
select pool_name, block_number, max(entry_size) * sum(used) as used_bytes 
from  ndbinfo.ndb$pools where pool_name <> "Data memory" 
or block_number=249 group by pool_name, block_number;

# Check that the combined size of all L2PMap nodes equals the total size
# of the allocated L2PMap pages minus a small (<1%) page overhead.
create view check_l2pmap_pools as 
select min(floor(1000*nodes.total*nodes.entry_size/
           (pages.used*pages.entry_size))) as l2pmap_ratio 
from ndbinfo.ndb$pools as pages join ndbinfo.ndb$pools as nodes 
where pages.node_id=nodes.node_id 
and pages.block_instance=nodes.block_instance 
and pages.block_number=nodes.block_number
and pages.pool_name="L2PMap pages" 
and nodes.pool_name="L2PMap nodes";

create view l2p_frag_sum as
select sum(tuple_l2pmap_alloc_bytes) as l2p_sum, 'TUP' as block 
from ndbinfo.ndb$frag_mem_use 
union 
select sum(hash_index_l2pmap_alloc_bytes) as l2p_sum, 'ACC' as block 
from ndbinfo.ndb$frag_mem_use;

# Check that we get the same result from the common pool as when we add up
# the values for each fragment.

select used_bytes - hash_index_alloc_bytes from pool_sum, frag_sum 
where pool_name='Index memory';

select used_bytes - fixed_elem_alloc_bytes - var_elem_alloc_bytes
  from pool_sum, frag_sum 
  where pool_name='Data memory';

select l2p_sum-used_bytes from l2p_frag_sum, pool_sum 
  where pool_name='L2PMap nodes' and ((block='ACC' and block_number=248) or 
                                      (block='TUP' and block_number=249));

select * from check_l2pmap_pools;

select * from sum_per_tab;

# Now delete som scattered rows and check memory usage again.
delete from t1 where a%10=0;

select used_bytes - hash_index_alloc_bytes from pool_sum, frag_sum 
where pool_name='Index memory';

select used_bytes - fixed_elem_alloc_bytes - var_elem_alloc_bytes
  from pool_sum, frag_sum 
  where pool_name='Data memory';

select l2p_sum-used_bytes from l2p_frag_sum, pool_sum 
  where pool_name='L2PMap nodes' and ((block='ACC' and block_number=248) or 
                                      (block='TUP' and block_number=249));

select * from check_l2pmap_pools;

select * from sum_per_tab;

# Now update some var-sized fields and check memory usage again.
update t1 set vc1=repeat('x', a%300), tx1=repeat('y', a*2);

select used_bytes - hash_index_alloc_bytes from pool_sum, frag_sum 
where pool_name='Index memory';

select used_bytes - fixed_elem_alloc_bytes - var_elem_alloc_bytes
  from pool_sum, frag_sum 
  where pool_name='Data memory';

select l2p_sum-used_bytes from l2p_frag_sum, pool_sum 
  where pool_name='L2PMap nodes' and ((block='ACC' and block_number=248) or 
                                      (block='TUP' and block_number=249));

select * from check_l2pmap_pools;

select * from sum_per_tab;

drop table t1;
drop view l2p_frag_sum;
drop view check_l2pmap_pools;
drop view sum_per_tab;
drop view pool_sum;
drop view frag_sum; 

# ################################
# Test operations_per_fragment table.
# ################################

# Create a view that sums counters across all fragments, so that results
# will not depend on endianness.
# Observe that operations_per_fragment.fq_name, parent_fq_name, type are all
# functionally dependent on 'table_id', but since this cannot be deduced from
# the schema, they are all included in the 'group by' clause.
create view frag_ops as select 
  if(type in ('Ordered index', 'Unique hash index'), 
    substring(fq_name from 1+locate('/', fq_name, 9)), 
    if(fq_name like '%BLOB%', left(fq_name, locate('_', fq_name)), fq_name)) 
    as fq_name,
  parent_fq_name,
  type,
  count(fragment_num) as frag_count,
  sum(tot_key_reads) as tot_key_reads, 
  sum(tot_key_inserts) as tot_key_inserts, 
  sum(tot_key_updates) as tot_key_updates, 
  sum(tot_key_writes) as tot_key_writes, 
  sum(tot_key_deletes) as tot_key_deletes, 
  sum(tot_key_refs) as tot_key_refs, 
  # Attrinfo for read operations is a bit longer when running with embedded 
  # mysqld, which is why we do the rounding below. 
  round(log2(1+sum(tot_key_attrinfo_bytes))) as log_tot_key_attrinfo_bytes,
  sum(tot_key_keyinfo_bytes) as tot_key_keyinfo_bytes,
  sum(tot_key_prog_bytes) as tot_key_prog_bytes,
  sum(tot_key_inst_exec) as tot_key_inst_exec,
  sum(tot_key_bytes_returned) as tot_key_bytes_returned,
  sum(tot_frag_scans) as tot_frag_scans, 
  sum(tot_scan_rows_examined) as tot_scan_rows_examined, 
  sum(tot_scan_rows_returned) as tot_scan_rows_returned, 
  sum(tot_scan_bytes_returned) as tot_scan_bytes_returned, 
  sum(tot_scan_prog_bytes) as tot_scan_prog_bytes,
  sum(tot_scan_bound_bytes) as tot_scan_bound_bytes,
  sum(tot_scan_inst_exec) as tot_scan_inst_exec, 
  sum(conc_frag_scans) as conc_frag_scans,
  sum(conc_qd_frag_scans) as conc_qd_frag_scans, 
  sum(tot_commits) as tot_commits
  from ndbinfo.operations_per_fragment
  where fq_name like 'ndbinfo%' or parent_fq_name like 'ndbinfo%'
  group by table_id, type, operations_per_fragment.fq_name, parent_fq_name
  order by table_id;


# Create and populate test tables.
create table t1 (
  a int not null,
  b int not null,
  c int not null,
  d int not null,
  t text not null,
  primary key (b,a)
) engine=ndbcluster
partition by key(b,a) partitions 8;

create unique index ix1 on t1(d) using hash;


# A helper table for populating t1.
create table numbers(
  i int
); 

insert into numbers values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);

insert into t1 select x1.i+x2.i*10, 1, 1+x1.i+x2.i*10, 1+x1.i+x2.i*10,
  repeat('x', 512)
  from numbers as x1, numbers as x2;

drop table numbers;

create table t2 (
  a int, 
  b int, 
  primary key(a) using hash)
engine = ndb 
partition by key(a) partitions 8;

insert into t2 values (1, 2);
insert into t2 values (2, 3);
insert into t2 values (3, 1);

# Check counters before the test queries.
--query_vertical select * from frag_ops;

# Do a table scan with a pushed condition.
explain select count(*) from t1 where d<10;
select count(*) from t1 where d<10;

# Do an index scan.
explain select count(*) from t1 where b=1 and a<10;
select count(*) from t1 where b=1 and a<10;

# Do a PK read.
explain select * from t1 where b=1 and a = -5;
select * from t1 where b=1 and a = -5;

# Try to insert a duplicate key.
--error ER_DUP_ENTRY
insert into t1 values (1,1,1,1,'');

# Use replace to generate PK write operations.
replace t1 values (-1,-1,-1,-1,'');
replace t1 values (1,1,5,500,'');

# Do update and delete.
update t1 set d = -d, t = repeat('a', 300) where b=1 and a > 90;

delete from t1 where a%30=0;

# This error insert causes the next index scan from SPJ to be queued on node 2.
# This command may fail if error inserts is not available on this build,
# therefore we permit return code 255.
--error 0,255
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "2 error 5084" >> $NDB_TOOLS_OUTPUT

# Do a pushed query that starts multiple index scans on each t1 fragment.
# That way the first scan (on node 2) will be queued until the next finishes.
explain select count(*) from t1 as x1 join t1 as x2 on x1.c=x2.b;
select count(*) from t1 as x1 join t1 as x2 on x1.c=x2.b;

# Check number of queued scans. Test for debug build
# to handle builds without error inserts (i.e. non-debug builds).
select 
  if ((select version() like '%debug'),
      sum(tot_qd_frag_scans),
      4) as tot_qd_frag_scans
  from ndbinfo.operations_per_fragment
  where fq_name like 'ndbinfo%' or parent_fq_name like 'ndbinfo%';

# Check how these operations changed the counters. 
--query_vertical select * from frag_ops;

# This query should give PK read operations with an interpreted program.
# (Multi-range read mapped to PK reads.)
explain select count(*)
from t2 as tx, t2 as ty
where tx.a in (1,3,5)
and ty.a = tx.b;

# The 'explain' above triggers an implicit 'analyze table'. That increments
# operations_per_fragment counters in a way that is endian-dependent.
# We therefore save the current values of the counters to isolate the 
# effect of the query below.
create table save_counters as select * from frag_ops;

select count(*)
from t2 as tx, t2 as ty
where tx.a in (1,3,5)
and ty.a = tx.b;

create view changes as select 
new.fq_name,
new.parent_fq_name,
new.type,
new.frag_count,
new.tot_key_reads - old.tot_key_reads,
new.tot_key_inserts - old.tot_key_inserts,
new.tot_key_updates - old.tot_key_updates,
new.tot_key_writes - old.tot_key_writes,
new.tot_key_deletes - old.tot_key_deletes,
new.tot_key_refs - old.tot_key_refs,
new.log_tot_key_attrinfo_bytes - old.log_tot_key_attrinfo_bytes,
new.tot_key_keyinfo_bytes - old.tot_key_keyinfo_bytes,
new.tot_key_prog_bytes - old.tot_key_prog_bytes,
new.tot_key_inst_exec - old.tot_key_inst_exec,
new.tot_key_bytes_returned - old.tot_key_bytes_returned,
new.tot_frag_scans - old.tot_frag_scans,
new.tot_scan_rows_examined - old.tot_scan_rows_examined,
new.tot_scan_rows_returned - old.tot_scan_rows_returned,
new.tot_scan_bytes_returned - old.tot_scan_bytes_returned,
new.tot_scan_prog_bytes - old.tot_scan_prog_bytes,
new.tot_scan_bound_bytes - old.tot_scan_bound_bytes,
new.tot_scan_inst_exec - old.tot_scan_inst_exec,
new.tot_commits  - old.tot_commits
from frag_ops as new, save_counters as old
where new.fq_name=old.fq_name and new.fq_name like '%t2';

--query_vertical select * from changes



# Drop test views and tables.
drop view changes;
drop table save_counters;
drop view frag_ops;
drop table t1;
drop table t2;


#
# WL#7575 Remove ndbinfo's usage of other engine
# - the five lookup tables which resided in another engine
#   was converted to virtual(aka. hardcoded) tables inside ha_ndbinfo
# - three new views was added to preserve backwards compatibility
#   for those tabes which was changed to prefix ndb$

## Check that the new tables return data and exists.
select count(*) > 10 from ndb$blocks;
select param_number, param_name from ndb$config_params
  where param_name = "DataMemory";
select * from ndb$dict_obj_types where type_id < 3;
--disable_result_log
select * from ndb$dbtc_apiconnect_state;
select * from ndb$dblqh_tcconnect_state;
--enable_result_log

## Check that the new views return data and exists.
select block_number from blocks where block_name = "DBTC";
select param_name from config_params where param_number = 101;
select type_name from ndb$dict_obj_types where type_id = 3;

## All tables in ndbinfo/ should now be using engine=NDBINFO
select count(*) from information_schema.tables
  where table_schema = 'ndbinfo' and engine != 'ndbinfo';

#
# BUG#20075747 RND_INIT() ON AN OPEN SCAN IS USED TO REPOSITION THE CURSOR
# - rnd_init() on an open scan is used to reposition the "cursor"
#   back to first row
#
set @@optimizer_switch='block_nested_loop=off';
create temporary table old_count
       select counter_name, sum(val) as val
       from ndbinfo.counters
       where block_name='DBSPJ'
       group by counter_name;
drop table old_count;
set @@optimizer_switch=default;

#
# Bug#16731538 MYSQLD CRITICAL FAILURE DURING ORDERED SELECT FROM NDBINFO.CLUSTER_OPERATIONS
#  - ha_ndbinfo::estimate_rows_upper_bound() returned 0 rows
#
--disable_result_log
select * from ndbinfo.config_params order by param_number;
--enable_result_log

#
# Bug#11762750 TABLE NDBINFO.CONFIG_PARAMS SHOULD BE READ-ONLY (FOR NOW)
#
#
# Checking the table
--error ER_OPEN_AS_READONLY
UPDATE ndb$config_params SET param_number=1 WHERE param_name = "NoOfReplicas";

# Checking the view
--error ER_OPEN_AS_READONLY
UPDATE config_params SET param_number=1 WHERE param_name = "NoOfReplicas";


Man Man