config root man

Current Path : /home/usr.opt/mysql57/mysql-test/suite/ndb/t/

FreeBSD hs32.drive.ne.jp 9.1-RELEASE FreeBSD 9.1-RELEASE #1: Wed Jan 14 12:18:08 JST 2015 root@hs32.drive.ne.jp:/sys/amd64/compile/hs32 amd64
Upload File :
Current File : //home/usr.opt/mysql57/mysql-test/suite/ndb/t/ndb_blob.test

# The include statement below is a temp one for tests that are yet to
#be ported to run with InnoDB,
#but needs to be kept for tests that would need MyISAM in future.
--source include/force_myisam_default.inc

--source include/have_ndb.inc

--disable_warnings
drop table if exists t1;
drop database if exists test2;
--enable_warnings

#
# Minimal NDB blobs test.
#
# On NDB API level there is an extensive test program "testBlobs".
# A prerequisite for this handler test is that "testBlobs" succeeds.
#

# -- general test starts --

# make test harder with autocommit off
set autocommit=0;

create table t1 (
  a int not null primary key,
  b text not null,
  c int not null,
  d longblob,
  key (c)
) engine=ndbcluster;

# -- values --

# x0 size 256 (current inline size)
set @x0 = '01234567012345670123456701234567';
set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);

# b1 length 2000+256 (blob part aligned)
set @b1 = 'b1';
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@x0);
# d1 length 3000
set @d1 = 'dd1';
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);

# b2 length 20000
set @b2 = 'b2';
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
# d2 length 30000
set @d2 = 'dd2';
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);

select length(@x0),length(@b1),length(@d1) from dual;
select length(@x0),length(@b2),length(@d2) from dual;

# -- pk ops --

insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
--replace_column 10 # 11 #
explain select * from t1 where a = 1;

# pk read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=2;

# pk update
update t1 set b=@b2,d=@d2 where a=1;
update t1 set b=@b1,d=@d1 where a=2;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=2;

# pk update
update t1 set b=concat(b,b),d=concat(d,d) where a=1;
update t1 set b=concat(b,b),d=concat(d,d) where a=2;
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
from t1 where a=2;

# pk update to null
update t1 set d=null where a=1;
commit;
select a from t1 where d is null;

# bug#24028 - does not occur on MySQL level
# bug#17986 - not seen by us anymore but could show as warning here
delete from t1 where a=45567;
commit;

# pk delete
delete from t1 where a=1;
delete from t1 where a=2;
commit;
select count(*) from t1;

# -- replace ( bug-6018 ) --

# insert
replace t1 set a=1,b=@b1,c=111,d=@d1;
replace t1 set a=2,b=@b2,c=222,d=@d2;
commit;
--replace_column 10 # 11 #
explain select * from t1 where a = 1;

# pk read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=2;

# update
replace t1 set a=1,b=@b2,c=111,d=@d2;
replace t1 set a=2,b=@b1,c=222,d=@d1;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=2;

# update
replace t1 set a=1,b=concat(@b2,@b2),c=111,d=concat(@d2,@d2);
replace t1 set a=2,b=concat(@b1,@b1),c=222,d=concat(@d1,@d1);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
from t1 where a=2;

# update to null
replace t1 set a=1,b='xyz',c=111,d=null;
commit;
select a,b from t1 where d is null;

# pk delete
delete from t1 where a=1;
delete from t1 where a=2;
commit;
select count(*) from t1;

# -- hash index ops --

insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
--replace_column 10 # 11 #
explain select * from t1 where c = 111;

# hash key read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=111;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=222;

# hash key update
update t1 set b=@b2,d=@d2 where c=111;
update t1 set b=@b1,d=@d1 where c=222;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=111;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=222;

# hash key update to null
update t1 set d=null where c=111;
commit;
select a from t1 where d is null;

# hash key delete
delete from t1 where c=111;
delete from t1 where c=222;
commit;
select count(*) from t1;

# -- table scan ops, short values --

insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
--replace_column 10 # 11 #
explain select * from t1;

# table scan read
select * from t1 order by a;

# table scan update
update t1 set b=concat(a,'x',b),d=concat(a,'x',d);
commit;
select * from t1 order by a;

# table scan delete
delete from t1;
commit;
select count(*) from t1;

# -- table scan ops, long values --

insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
--replace_column 10 # 11 #
explain select * from t1;

# table scan read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;

# table scan update
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 order by a;

# table scan delete
delete from t1;
commit;
select count(*) from t1;

# -- range scan ops, short values --

insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
--replace_column 10 # 11 #
explain select * from t1 where c >= 100 order by a;

# range scan read
select * from t1 where c >= 100 order by a;

# range scan update
update t1 set b=concat(a,'x',b),d=concat(a,'x',d)
where c >= 100;
commit;
select * from t1 where c >= 100 order by a;

# range scan delete
delete from t1 where c >= 100;
commit;
select count(*) from t1;

# -- range scan ops, long values --

insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
--replace_column 10 # 11 #
explain select * from t1 where c >= 100 order by a;

# range scan read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c >= 100 order by a;

# range scan update
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where c >= 100 order by a;

# range scan delete
delete from t1 where c >= 100;
commit;
select count(*) from t1;

# -- rollback --

insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
# 626
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 0;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 1;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 2;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;
rollback;
select count(*) from t1;

# -- alter table and multi db --

insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;

select * from t1 order by a;
--disable_warnings
alter table t1 add x int;
--enable_warnings
select * from t1 order by a;
alter table t1 drop x;
select * from t1 order by a;

create database test2;
use test2;

CREATE TABLE t2 (
  a bigint unsigned NOT NULL PRIMARY KEY,
  b int unsigned not null,
  c int unsigned
) engine=ndbcluster;

insert into t2 values (1,1,1),(2,2,2);
select * from test.t1,t2 where test.t1.a = t2.a order by test.t1.a;

drop table t2;
use test;

select * from t1 order by a;
--disable_warnings
alter table t1 add x int;
--enable_warnings
select * from t1 order by a;
alter table t1 drop x;
select * from t1 order by a;

# -- end general test --

drop table t1;
drop database test2;

# -- bug-5252 tinytext crashes + no-commit result + replace --

set autocommit=0;
create table t1 (
  a int not null primary key,
  b tinytext
) engine=ndbcluster;

insert into t1 values(1, 'x');
update t1 set b = 'y';
select * from t1;
delete from t1;
select * from t1;
commit;
replace t1 set a=2, b='y';
select * from t1;
delete from t1;
select * from t1;
drop table t1;

# -- bug-5013 insert empty string to text --

set autocommit=0;
create table t1 (
  a int not null primary key,
  b text not null
) engine=ndbcluster;

insert into t1 values(1, '');
select * from t1;
commit;

# -- bug #35593 Memory leak in failed NDB execute() with blobs.
connect (con1,localhost,root,,test);
connect (con2,localhost,root,,test);

# Force a deadlock.

connection con2;
begin;
insert into t1 values (3, repeat("w", 8000));

connection con1;
--error 1205
insert into t1 values (2, repeat("x", 10000)),
                      (3, repeat("y", 5000)),
                      (4, repeat("z", 15000));
rollback;

connection con2;
commit;

drop table t1;

# -- bug #5349 --
set autocommit=1;
use test;
CREATE TABLE t1 (
  a int,
  b text,
  PRIMARY KEY  (a)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
 
INSERT INTO t1 VALUES 
(1,'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA');
INSERT INTO t1 VALUES 
(2,'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB');

select * from t1 order by a;
alter table t1 engine=ndb;
select * from t1 order by a;

# -- bug #5872 --
set autocommit=1;
alter table t1 engine=myisam;
select * from t1 order by a;
drop table t1;

# -- bug #7340 --
create table t1 (
  id int(11) unsigned primary key NOT NULL auto_increment,
  msg text NOT NULL
) engine=ndbcluster default charset=utf8;
insert into t1 (msg) values(
'Tries to validate (8 byte length + inline bytes) as UTF8 :(
Fast fix: removed validation for Text.  It is not yet indexable
so bad data will not crash kernel.');
select * from t1;
drop table t1;

# -- bug #19201
create table t1 (
  a int primary key not null auto_increment,
  b text
) engine=ndbcluster;
--disable_query_log
set autocommit=1;
# more rows than batch size (64)
# for this bug no blob parts would be necessary
let $1 = 500;
while ($1)
{
  insert into t1 (b) values (repeat('x',4000));
  dec $1;
}
--enable_query_log
select count(*) from t1;
truncate t1;
select count(*) from t1;
drop table t1;

# -- bug#19956 - var* key, complex key

create table t1 (
  a varchar(40) not null,
  b mediumint not null,
  t text,
  c varchar(2) not null,
  d bigint not null,
  primary key (a,b,c),
  key (c,a),
  unique key (d)
) engine=ndb;

--disable_query_log
set @s1 = 'rggurloniukyehuxdbfkkyzlceixzrehqhvxvxbpwizzvjzpucqmzrhzxzfau';
set @s2 = 'ykyymbzqgqlcjhlhmyqelfoaaohvtbekvifukdtnvcrrjveevfakxarxexomz';
set @s3 = 'dbnfqyzgtqxalcrwtfsqabknvtfcbpoonxsjiqvmhnfikxxhcgoexlkoezvah';
set @v1 = repeat(@s1,123);
set @v2 = repeat(@s2,234);
set @v3 = repeat(@s3,345);
set @v4 = NULL;
--enable_query_log

insert into t1 (a,b,c,d,t) values ('a',1110,'a',1,@v1);
insert into t1 (a,b,c,d,t) values ('b',1110,'a',2,@v2);
insert into t1 (a,b,c,d,t) values ('a',1110,'b',3,@v3);
insert into t1 (a,b,c,d,t) values ('b',1110,'b',4,@v4);
select a,b,c,d,sha1(t) from t1 order by c,a;

select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='a';
select a,b,c,d,sha1(t) from t1 where a='a' and b=1110 and c='b';

update t1 set t=@v4 where a='b' and b=1110 and c='a';
update t1 set t=@v2 where a='b' and b=1110 and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;

update t1 set t=@v2 where d=2;
update t1 set t=@v4 where d=4;
select a,b,c,d,sha1(t) from t1 order by c,a;

update t1 set t=@v4 where a='b' and c='a';
update t1 set t=@v2 where a='b' and c='b';
select a,b,c,d,sha1(t) from t1 order by c,a;

update t1 set t=@v2 where b+d=1112;
update t1 set t=@v4 where b+d=1114;
select a,b,c,d,sha1(t) from t1 order by c,a;

delete from t1 where a='a' and b=1110 and c='a';
delete from t1 where a='b' and c='a';
delete from t1 where d=3;
delete from t1 where b+d=1114;
select count(*) from t1;

drop table t1;

# End of 4.1 tests


# bug # 30674 :
#   NOT NULL Blobs should default to zero-length.  Not NULL TEXT
#   should default to zero-chars
create table t1(
  a int,
  blob_nn blob not null,
  text_nn text not null,
  blob_nl blob,
  text_nl text,
  primary key(a)
) engine=ndb;

# NOTE! using IGNORE to force the two inserts despite of warnings
insert ignore into t1(a) values (1);
insert ignore into t1(a, text_nl) values (2, 'MySQL Cluster NDB');

select a, length(blob_nn), length(text_nn), blob_nl, text_nl from t1 order by a;

drop table t1;

# bug # 36756
#  Behaviour of delete of non existing row should not be affected 
#  by presence of Blob operations in the same transaction
#  Specifically, transaction should not be aborted.
create table `t1` (
  `f1` int(11) not null default -1,
  `f11` text,
  unique key `i1` (`f1`)
) engine=ndbcluster default charset=utf8;

insert into `t1` values (123,null);

create table `t2` (
  `f1` int(11) not null default -1,
  unique key `i2` (`f1`)
) engine=ndbcluster default charset=utf8;

begin;
delete from t2 where f1=5;   # No such row, no problem
delete from t1 where f1=123; # Blob operation
delete from t2 where f1=5;   # No such row, no problem
commit;
show warnings;

drop table t1;
drop table t2;

# bug # 31284
#   Long Blob (8000 byte parts) with Blob v2 (+2 bytes length)
#   exposed buffer overrun for old Api setValue() call used for
#   writing parts.
#   Check that data written into long Blob can be retrieved
#   correctly
create table t1 (
  a    int primary key,
  giga longblob) 
engine=ndb;

# length 10
set @stuff = '1234567890';

# 20,000 bytes = 3 parts
insert into t1 values (0, repeat(@stuff, 2000));

# Check that we get the same data back that we put in
select sha1(repeat(@stuff, 2000));
select sha1(giga) from t1;
select (giga = repeat(@stuff, 2000)) from t1 where a=0;

drop table t1;

--echo Test that automatic lock upgrade on Blobs behaves as expected
--echo w.r.t other operations on the table.
connection con1;

create table t1 (a int primary key, b longblob, c int, unique(c)) engine=ndb;

insert into t1 values (1, repeat('1', 256), 1);
insert into t1 values (2, repeat('2', 2256), 2);
insert into t1 values (3, repeat('3', 4256), 3);
insert into t1 values (4, repeat('4', 8256), 4);
insert into t1 values (5, repeat('5', 20000), 5);

--echo No Deadlock scenarios
--echo Transaction 1 reading PK with no lock.  Transaction 2 reading PK with EX lock
begin;
select a, length(b) from t1 where a=2;

connection con2;
select a, length(b) from t1 where a=2 FOR UPDATE;

--echo Transaction 2 scanning with EX lock - will not deadlock with transaction 1
select a, length(b) from t1 ORDER BY a FOR UPDATE;

connection con1;
commit;

--echo Transaction1 reading PK with SH lock.  Transaction 2 reading PK with no lock
begin;
select a, length(b) from t1 where a=3 LOCK IN SHARE MODE;

connection con2;
select a, length(b) from t1 where a=3;

--echo Transaction 2 scanning with SH lock - will not deadlock with transaction 1
select a, length(b) from t1 ORDER BY a;

connection con1;
commit;

--echo Transaction1 reading PK with SH lock.  Transaction 2 reading PK with SH lock
begin;
select a, length(b) from t1 where a=4 LOCK IN SHARE MODE;

connection con2;
select a, length(b) from t1 where a=4 LOCK IN SHARE MODE;

--echo Transaction 2 scanning with SH lock - will not deadlock with transaction 1
select a, length(b) from t1 ORDER BY a LOCK IN SHARE MODE;

connection con1;
commit;

--echo Deadlock scenarios on PK access : 
connection con1;
--echo Transaction 1 reading PK with EX lock.  Transaction 2 reading PK with no lock
begin;
select a, length(b) from t1 where a=5 FOR UPDATE;

connection con2;
--error 1205
select a, length(b) from t1 where a=5;

connection con1;
commit;

connection con1;
--echo Transaction 1 reading PK with EX lock.  Transaction 2 reading PK with SH lock
begin;
select a, length(b) from t1 where a=1 FOR UPDATE;

connection con2;
--error 1205
select a, length(b) from t1 where a=1 LOCK IN SHARE MODE;

connection con1;
commit;
--echo Verify that Nolock Unique index access still deadlocks
--echo as unique index accesses always upgrade to SH

begin;
select a, length(b) from t1 where c=2;

connection con2;
--error 1205
select a, length(b) from t1 FOR UPDATE;

connection con1;
commit;

connection con1;
drop table t1;

--echo 5.6 tests

# Bug#16749788 
#   ASSERTION: !USES_BLOB_VALUE(TABLE->READ_SET) IN MULTI_RANGE_START_RETRIEVALS
#   table->read_set was calculated to late to detect Blob read.

create table `t1` (`a` int,`b` blob,primary key (`a`)) engine=ndbcluster;
insert into t1 values (2, 'Hei og hopp! ');

update t1 set b=concat(b,b,b,b,b);
update t1 set b=concat(b,b,b,b,b);
update t1 set b=concat(b,b,b,b,b);

#Expect use of (default) MRR implementation for update

explain update `t1` set `a`=1 where `a`!= 1;
update `t1` set `a`=1 where `a`!= 1;
select * from t1;

drop table t1;

--echo End of 5.6 tests

Man Man