################################################################################
#
# INSERT
#
# INSERT and commit new rows, using the constants "40" for most values.
# For each new transaction, the constant is increased by 1.
#
# This test runs a number of consecutive transactions to generate high
# concurrency:
#
# Tx 1:
# - multi-statemement insert, inserting first positive then negative number (0-sum).
#
# Tx 2:
# - insert multiple rows using a single statement.
#
# Tx 3:
# - INSERT IGNORE using both known duplicate values and non-duplicates.
#
# Net effect: 6 more rows
#
# In this test we need some kind of valid unique integer value for the columns
# with unique indexes.
#
# Alternatively:
# - Set unique value as 0 and rollback if ERR_DUP_KEY (see
# check_error_rollback.inc), then make sure to UPDATE where unique value is 0
# in other tests.
# - OR: insert NULL (requires special handling when calculating row sums in
# other tests).
# - OR: skip unique indexes entirely (except `pk`) (remove from t1 in init).
#
# Using CONNECTION_ID (swithcing sign and doing +/- 3) as unique value, meaning
# that some of the INSERTs will fail with duplicate key until this is high
# enough (should not take long with a relatively high number of threads and some
# duration, given that the number of initial rows is relatively low, ~1000).
# Let's just say this is a warm-up period.
#
# Alternatively, add some random integer to the value or use UNIX_TIMESTAMP()
# (the latter requires that some care is taken in subsequent updates etc. For
# example, simply doubling the value will cause overflow/truncation).
#
# No need to ROLLBACK if all statements in a transaction by themselves are
# consistent.
#
#
################################################################################
SET autocommit = 0;
START TRANSACTION;
--echo
--echo *** multi-statemement insert, inserting first positive then negative number:
--echo
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (40, 40, 40, CONNECTION_ID(),
-40, -40, -CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (-40, -40, -40, -CONNECTION_ID(),
40, 40, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 1);
COMMIT;
START TRANSACTION;
--echo
--echo *** insert multiple rows using a single statement:
--echo
# First row is by itself consistent (sum = 0). Row 3 zero-sums row 2, so the
# statement itself is consistent.
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (41, 41, 41, CONNECTION_ID()+1,
-41, -41, -(CONNECTION_ID()+1),
0, CONNECTION_ID(), 0, 0, 1),
(41, 41, 41, CONNECTION_ID()+2,
41, 41, CONNECTION_ID()+2,
0, CONNECTION_ID(), 0, 0, 0),
(41, -41, -41, -(CONNECTION_ID()+2),
-41, -41, -(CONNECTION_ID()+2),
0, CONNECTION_ID(), 0, 0, 0);
COMMIT;
START TRANSACTION;
--echo
--echo *** INSERT IGNORE using both known duplicate values and non-duplicates:
--echo
# This MAY be discarded (duplicate entry in UNIQUE index) - should succeed if CONNECTION_ID is high enough (int*_unique).
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT, ER_DUP_ENTRY
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (42, 42, 42, CONNECTION_ID()+3,
-42, -42, -(CONNECTION_ID()+3),
0, CONNECTION_ID(), 0, 0, 1);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This WILL (SHOULD) be discarded (duplicate entry as primary key).
# pk's that are 1000 or less AND divisible by 5 should all be present (i.e. never deleted), so we pick pk 5.
# Note that we insert an inconsistent row, so it will show up as a sum anomaly if it succeeds.
INSERT IGNORE INTO t1 (`pk`, `id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (5, 43, 42, 42, CONNECTION_ID(),
-42, -42, CONNECTION_ID(),
0, CONNECTION_ID(), 0, 0, 0);
--error 0, ER_LOCK_DEADLOCK, ER_LOCK_WAIT_TIMEOUT
# This MAY be discarded (duplicate entry in UNIQUE index).
INSERT IGNORE INTO t1 (`id`, `int1`, `int1_key`, `int1_unique`,
`int2`, `int2_key`, `int2_unique`,
`for_update`, `connection_id`, `thread_id`, `is_uncommitted`, `is_consistent`)
VALUES (44, 42, 42, (CONNECTION_ID() + 1000) MOD 5000,
-42, -42, -((CONNECTION_ID() + 1000) MOD 5000),
0, CONNECTION_ID(), 0, 0, 1);
--source suite/engines/rr_trx/include/check_for_error_rollback.inc
COMMIT;