should be it
This commit is contained in:
16
external/duckdb/test/sql/parallelism/interquery/CMakeLists.txt
vendored
Normal file
16
external/duckdb/test/sql/parallelism/interquery/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
add_library_unity(
|
||||
test_sql_interquery_parallelism
|
||||
OBJECT
|
||||
concurrent_attach_detach.cpp
|
||||
concurrent_checkpoint.cpp
|
||||
test_concurrentappend.cpp
|
||||
test_concurrentdelete.cpp
|
||||
test_concurrent_dependencies.cpp
|
||||
test_concurrent_index.cpp
|
||||
test_concurrentupdate.cpp
|
||||
test_concurrent_sequence.cpp
|
||||
test_concurrent_prepared.cpp
|
||||
test_default_catalog.cpp)
|
||||
set(ALL_OBJECT_FILES
|
||||
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_sql_interquery_parallelism>
|
||||
PARENT_SCOPE)
|
||||
92
external/duckdb/test/sql/parallelism/interquery/concurrent_append_metadata_queries.test_slow
vendored
Normal file
92
external/duckdb/test/sql/parallelism/interquery/concurrent_append_metadata_queries.test_slow
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_append_metadata_queries.test_slow
|
||||
# description: Run metadata queries while appending/checkpointing
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
CREATE INDEX i_index ON integers(i);
|
||||
|
||||
statement ok
|
||||
CREATE VIEW v1 AS FROM integers;
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
CREATE SCHEMA s1;
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
CREATE TABLE s1.tbl(i INTEGER);
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
CREATE INDEX i_index ON s1.tbl(i);
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO s1.tbl FROM range(10000);
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000 + ${i} * 100, 10100 + ${i} * 100);
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
DROP TABLE s1.tbl;
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
DROP SCHEMA s1;
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 100
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
FROM duckdb_tables();
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
FROM duckdb_indexes();
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
FROM duckdb_schemas();
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
FROM pragma_metadata_info();
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
FROM pragma_storage_info('integers');
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
from pragma_table_info('integers');
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
from duckdb_dependencies();
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
from duckdb_temporary_files();
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
12000 71994000
|
||||
39
external/duckdb/test/sql/parallelism/interquery/concurrent_append_transactions.test_slow
vendored
Normal file
39
external/duckdb/test/sql/parallelism/interquery/concurrent_append_transactions.test_slow
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_append_transactions.test_slow
|
||||
# description: Test concurrent appends and transaction isolation
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
concurrentloop threadid 0 10
|
||||
|
||||
statement ok
|
||||
BEGIN TRANSACTION
|
||||
|
||||
statement ok
|
||||
CREATE TABLE count_table_${threadid} AS SELECT * FROM integers
|
||||
|
||||
loop i 0 100
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers VALUES (${threadid} * 10000 + ${i})
|
||||
|
||||
# verify that we inserted exactly one element in this transaction
|
||||
query I
|
||||
SELECT * FROM integers EXCEPT (SELECT * FROM count_table_${threadid} UNION SELECT ${threadid} * 10000 + ${i})
|
||||
----
|
||||
|
||||
statement ok
|
||||
CREATE OR REPLACE TABLE count_table_${threadid} AS (SELECT * FROM count_table_${threadid} UNION SELECT ${threadid} * 10000 + ${i})
|
||||
|
||||
endloop
|
||||
|
||||
statement ok
|
||||
COMMIT
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
1000 45049500
|
||||
36
external/duckdb/test/sql/parallelism/interquery/concurrent_append_transactions_indexes.test_slow
vendored
Normal file
36
external/duckdb/test/sql/parallelism/interquery/concurrent_append_transactions_indexes.test_slow
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_append_transactions_indexes.test_slow
|
||||
# description: Test concurrent appends and transaction isolation
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER PRIMARY KEY)
|
||||
|
||||
concurrentloop threadid 0 10
|
||||
|
||||
statement ok
|
||||
BEGIN TRANSACTION
|
||||
|
||||
statement ok
|
||||
CREATE TABLE count_table_${threadid} AS SELECT COUNT(*) AS count FROM integers WHERE i >= 0
|
||||
|
||||
loop i 0 100
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers VALUES (${threadid} * 1000 + ${i})
|
||||
|
||||
# verify that we inserted exactly one element in this transaction
|
||||
query II
|
||||
SELECT COUNT(*), COUNT(DISTINCT i) FROM integers WHERE i >= 0 EXCEPT SELECT count+${i}+1, count+${i}+1 FROM count_table_${threadid}
|
||||
----
|
||||
|
||||
endloop
|
||||
|
||||
statement ok
|
||||
COMMIT
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), COUNT(DISTINCT i) FROM integers
|
||||
----
|
||||
1000 1000
|
||||
19
external/duckdb/test/sql/parallelism/interquery/concurrent_appends.test
vendored
Normal file
19
external/duckdb/test/sql/parallelism/interquery/concurrent_appends.test
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_appends.test
|
||||
# description: Test concurrent small appends to persistent storage
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(100);
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
2000 99000
|
||||
|
||||
498
external/duckdb/test/sql/parallelism/interquery/concurrent_attach_detach.cpp
vendored
Normal file
498
external/duckdb/test/sql/parallelism/interquery/concurrent_attach_detach.cpp
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/atomic.hpp"
|
||||
#include "duckdb/common/map.hpp"
|
||||
#include "duckdb/common/mutex.hpp"
|
||||
#include "duckdb/common/vector.hpp"
|
||||
#include "duckdb/common/optional_idx.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
|
||||
enum class AttachTaskType { CREATE_TABLE, LOOKUP, APPEND, APPLY_CHANGES, DESCRIBE_TABLE, CHECKPOINT };
|
||||
|
||||
namespace {
|
||||
|
||||
string test_dir_path;
|
||||
const string prefix = "db_";
|
||||
const string suffix = ".db";
|
||||
|
||||
string getDBPath(idx_t i) {
|
||||
return test_dir_path + "/" + prefix + to_string(i) + suffix;
|
||||
}
|
||||
|
||||
string getDBName(idx_t i) {
|
||||
return prefix + to_string(i);
|
||||
}
|
||||
|
||||
const idx_t db_count = 10;
|
||||
const idx_t worker_count = 40;
|
||||
const idx_t iteration_count = 100;
|
||||
const idx_t nr_initial_rows = 2050;
|
||||
|
||||
vector<vector<string>> logging;
|
||||
atomic<bool> success {true};
|
||||
|
||||
duckdb::unique_ptr<MaterializedQueryResult> execQuery(Connection &conn, const string &query) {
|
||||
auto result = conn.Query(query);
|
||||
if (result->HasError()) {
|
||||
Printer::PrintF("Failed to execute query %s:\n------\n%s\n-------", query, result->GetError());
|
||||
success = false;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
struct TableInfo {
|
||||
idx_t size;
|
||||
};
|
||||
|
||||
struct DBInfo {
|
||||
mutex mu;
|
||||
idx_t table_count = 0;
|
||||
vector<TableInfo> tables;
|
||||
};
|
||||
|
||||
struct AttachTask {
|
||||
AttachTaskType type;
|
||||
duckdb::optional_idx db_id;
|
||||
duckdb::optional_idx tbl_id;
|
||||
duckdb::optional_idx tbl_size;
|
||||
std::vector<idx_t> ids;
|
||||
bool actual_describe = false;
|
||||
};
|
||||
|
||||
struct AttachWorker;
|
||||
|
||||
class DBPoolMgr {
|
||||
public:
|
||||
mutex mu;
|
||||
map<idx_t, idx_t> m;
|
||||
|
||||
void addWorker(AttachWorker &worker, const idx_t i);
|
||||
void removeWorker(AttachWorker &worker, const idx_t i);
|
||||
|
||||
DBInfo db_infos[db_count];
|
||||
};
|
||||
|
||||
struct AttachWorker {
|
||||
public:
|
||||
AttachWorker(DuckDB &db, idx_t worker_id, vector<string> &logs, DBPoolMgr &db_pool)
|
||||
: conn(db), worker_id(worker_id), logs(logs), db_pool(db_pool) {
|
||||
}
|
||||
|
||||
public:
|
||||
duckdb::unique_ptr<MaterializedQueryResult> execQuery(const string &query) {
|
||||
return ::execQuery(conn, query);
|
||||
}
|
||||
void Work();
|
||||
|
||||
private:
|
||||
AttachTask RandomTask();
|
||||
void createTbl(AttachTask &task);
|
||||
void lookup(AttachTask &task);
|
||||
void append_internal(AttachTask &task);
|
||||
void append(AttachTask &task);
|
||||
void delete_internal(AttachTask &task);
|
||||
void apply_changes(AttachTask &task);
|
||||
void describe_tbl(AttachTask &task);
|
||||
void checkpoint_db(AttachTask &task);
|
||||
void GetRandomTable(AttachTask &task);
|
||||
void addLog(const string &msg) {
|
||||
logs.push_back(msg);
|
||||
}
|
||||
|
||||
public:
|
||||
Connection conn;
|
||||
idx_t worker_id;
|
||||
vector<string> &logs;
|
||||
DBPoolMgr &db_pool;
|
||||
};
|
||||
|
||||
void DBPoolMgr::addWorker(AttachWorker &worker, const idx_t i) {
|
||||
lock_guard<mutex> lock(mu);
|
||||
|
||||
if (m.find(i) != m.end()) {
|
||||
m[i]++;
|
||||
return;
|
||||
}
|
||||
m[i] = 1;
|
||||
|
||||
string query = "ATTACH '" + getDBPath(i) + "'";
|
||||
worker.execQuery(query);
|
||||
}
|
||||
|
||||
void DBPoolMgr::removeWorker(AttachWorker &worker, const idx_t i) {
|
||||
lock_guard<mutex> lock(mu);
|
||||
|
||||
m[i]--;
|
||||
if (m[i] != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
m.erase(i);
|
||||
string query = "DETACH " + getDBName(i);
|
||||
worker.execQuery(query);
|
||||
}
|
||||
|
||||
void AttachWorker::createTbl(AttachTask &task) {
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto &db_infos = db_pool.db_infos;
|
||||
lock_guard<mutex> lock(db_infos[db_id].mu);
|
||||
auto tbl_id = db_infos[db_id].table_count;
|
||||
db_infos[db_id].tables.emplace_back(TableInfo {nr_initial_rows});
|
||||
db_infos[db_id].table_count++;
|
||||
|
||||
string tbl_path = StringUtil::Format("%s.tbl_%d", getDBName(db_id), tbl_id);
|
||||
string create_sql = StringUtil::Format(
|
||||
"CREATE TABLE %s(i BIGINT PRIMARY KEY, s VARCHAR, ts TIMESTAMP, obj STRUCT(key1 UBIGINT, key2 VARCHAR))",
|
||||
tbl_path);
|
||||
addLog("; q: " + create_sql);
|
||||
execQuery(create_sql);
|
||||
string insert_sql = "INSERT INTO " + tbl_path +
|
||||
" SELECT "
|
||||
"range::UBIGINT AS i, "
|
||||
"range::VARCHAR AS s, "
|
||||
// Note: We increment timestamps by 1 millisecond (i.e., 1000 microseconds).
|
||||
"epoch_ms(range) AS ts, "
|
||||
"{'key1': range::UBIGINT, 'key2': range::VARCHAR} AS obj "
|
||||
"FROM range(" +
|
||||
to_string(nr_initial_rows) + ")";
|
||||
addLog("; q: " + insert_sql);
|
||||
execQuery(insert_sql);
|
||||
}
|
||||
|
||||
void AttachWorker::lookup(AttachTask &task) {
|
||||
if (!task.tbl_id.IsValid()) {
|
||||
return;
|
||||
}
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto tbl_id = task.tbl_id.GetIndex();
|
||||
auto expected_max_val = task.tbl_size.GetIndex() - 1;
|
||||
|
||||
// Run the query.
|
||||
auto table_name = getDBName(db_id) + ".tbl_" + to_string(tbl_id);
|
||||
string query = "SELECT i, s, ts, obj FROM " + table_name + " WHERE i = " + to_string(expected_max_val);
|
||||
addLog("q: " + query);
|
||||
auto result = execQuery(query);
|
||||
if (result->RowCount() == 0) {
|
||||
addLog("FAILURE - No rows returned from query");
|
||||
success = false;
|
||||
}
|
||||
if (!CHECK_COLUMN(result, 0, {Value::UBIGINT(expected_max_val)})) {
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
if (!CHECK_COLUMN(result, 1, {to_string(expected_max_val)})) {
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
if (!CHECK_COLUMN(result, 2, {Value::TIMESTAMP(timestamp_t {static_cast<int64_t>(expected_max_val * 1000)})})) {
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
if (!CHECK_COLUMN(
|
||||
result, 3,
|
||||
{Value::STRUCT({{"key1", Value::UBIGINT(expected_max_val)}, {"key2", to_string(expected_max_val)}})})) {
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void AttachWorker::append_internal(AttachTask &task) {
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto tbl_id = task.tbl_id.GetIndex();
|
||||
auto tbl_str = "tbl_" + to_string(tbl_id);
|
||||
// set appender
|
||||
addLog("db: " + getDBName(db_id) + "; table: " + tbl_str + "; append rows");
|
||||
|
||||
try {
|
||||
Appender appender(conn, getDBName(db_id), DEFAULT_SCHEMA, tbl_str);
|
||||
DataChunk chunk;
|
||||
|
||||
child_list_t<LogicalType> struct_children;
|
||||
struct_children.emplace_back(make_pair("key1", LogicalTypeId::UBIGINT));
|
||||
struct_children.emplace_back(make_pair("key2", LogicalTypeId::VARCHAR));
|
||||
|
||||
const vector<LogicalType> types = {LogicalType::UBIGINT, LogicalType::VARCHAR, LogicalType::TIMESTAMP,
|
||||
LogicalType::STRUCT(struct_children)};
|
||||
|
||||
// fill up datachunk
|
||||
chunk.Initialize(*conn.context, types);
|
||||
// int
|
||||
auto &col_ubigint = chunk.data[0];
|
||||
auto data_ubigint = FlatVector::GetData<uint64_t>(col_ubigint);
|
||||
// varchar
|
||||
auto &col_varchar = chunk.data[1];
|
||||
auto data_varchar = FlatVector::GetData<string_t>(col_varchar);
|
||||
// timestamp
|
||||
auto &col_ts = chunk.data[2];
|
||||
auto data_ts = FlatVector::GetData<timestamp_t>(col_ts);
|
||||
// struct
|
||||
auto &col_struct = chunk.data[3];
|
||||
auto &data_struct_entries = StructVector::GetEntries(col_struct);
|
||||
auto &entry_ubigint = data_struct_entries[0];
|
||||
auto data_struct_ubigint = FlatVector::GetData<uint64_t>(*entry_ubigint);
|
||||
auto &entry_varchar = data_struct_entries[1];
|
||||
auto data_struct_varchar = FlatVector::GetData<string_t>(*entry_varchar);
|
||||
|
||||
for (idx_t i = 0; i < task.ids.size(); i++) {
|
||||
auto row_idx = task.ids[i];
|
||||
data_ubigint[i] = row_idx;
|
||||
data_varchar[i] = StringVector::AddString(col_varchar, to_string(row_idx));
|
||||
data_ts[i] = timestamp_t {static_cast<int64_t>(1000 * (row_idx))};
|
||||
data_struct_ubigint[i] = row_idx;
|
||||
data_struct_varchar[i] = StringVector::AddString(*entry_varchar, to_string(row_idx));
|
||||
}
|
||||
|
||||
chunk.SetCardinality(task.ids.size());
|
||||
appender.AppendDataChunk(chunk);
|
||||
appender.Close();
|
||||
|
||||
} catch (const std::exception &e) {
|
||||
addLog("Caught exception when using Appender: " + string(e.what()));
|
||||
success = false;
|
||||
return;
|
||||
} catch (...) {
|
||||
addLog("Caught error when using Appender!");
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void AttachWorker::append(AttachTask &task) {
|
||||
if (!task.tbl_id.IsValid()) {
|
||||
return;
|
||||
}
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto tbl_id = task.tbl_id.GetIndex();
|
||||
auto &db_infos = db_pool.db_infos;
|
||||
lock_guard<mutex> lock(db_infos[db_id].mu);
|
||||
auto current_num_rows = db_infos[db_id].tables[tbl_id].size;
|
||||
idx_t append_count = STANDARD_VECTOR_SIZE;
|
||||
|
||||
for (idx_t i = 0; i < append_count; i++) {
|
||||
task.ids.push_back(current_num_rows + i);
|
||||
}
|
||||
|
||||
append_internal(task);
|
||||
db_infos[db_id].tables[tbl_id].size += append_count;
|
||||
}
|
||||
|
||||
void AttachWorker::delete_internal(AttachTask &task) {
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto tbl_id = task.tbl_id.GetIndex();
|
||||
auto &ids = task.ids;
|
||||
auto tbl_str = "tbl_" + to_string(tbl_id);
|
||||
|
||||
string delete_list;
|
||||
for (auto delete_idx : ids) {
|
||||
if (!delete_list.empty()) {
|
||||
delete_list += ", ";
|
||||
}
|
||||
delete_list += "(" + to_string(delete_idx) + ")";
|
||||
}
|
||||
string delete_sql =
|
||||
StringUtil::Format("WITH ids (id) AS (VALUES %s) DELETE FROM %s.%s.%s AS t USING ids WHERE t.i = ids.id",
|
||||
delete_list, getDBName(db_id), DEFAULT_SCHEMA, tbl_str);
|
||||
addLog("q: " + delete_sql);
|
||||
execQuery(delete_sql);
|
||||
}
|
||||
|
||||
void AttachWorker::apply_changes(AttachTask &task) {
|
||||
if (!task.tbl_id.IsValid()) {
|
||||
return;
|
||||
}
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto &db_infos = db_pool.db_infos;
|
||||
lock_guard<mutex> lock(db_infos[db_id].mu);
|
||||
execQuery("BEGIN");
|
||||
delete_internal(task);
|
||||
append_internal(task);
|
||||
execQuery("COMMIT");
|
||||
}
|
||||
|
||||
void AttachWorker::describe_tbl(AttachTask &task) {
|
||||
if (!task.tbl_id.IsValid()) {
|
||||
return;
|
||||
}
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto tbl_id = task.tbl_id.GetIndex();
|
||||
auto tbl_str = "tbl_" + to_string(tbl_id);
|
||||
auto actual_describe = task.actual_describe;
|
||||
string describe_sql;
|
||||
if (actual_describe) {
|
||||
describe_sql = StringUtil::Format("DESCRIBE %s.%s.%s", getDBName(db_id), DEFAULT_SCHEMA, tbl_str);
|
||||
} else {
|
||||
describe_sql = StringUtil::Format("SELECT 1 FROM %s.%s.%s LIMIT 1", getDBName(db_id), DEFAULT_SCHEMA, tbl_str);
|
||||
}
|
||||
|
||||
addLog("q: " + describe_sql);
|
||||
execQuery(describe_sql);
|
||||
}
|
||||
|
||||
void AttachWorker::checkpoint_db(AttachTask &task) {
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
auto &db_infos = db_pool.db_infos;
|
||||
unique_lock<mutex> lock(db_infos[db_id].mu);
|
||||
string checkpoint_sql = "CHECKPOINT " + getDBName(db_id);
|
||||
addLog("q: " + checkpoint_sql);
|
||||
// checkpoint can fail, we don't care
|
||||
conn.Query(checkpoint_sql);
|
||||
}
|
||||
|
||||
void AttachWorker::GetRandomTable(AttachTask &task) {
|
||||
auto &db_infos = db_pool.db_infos;
|
||||
auto db_id = task.db_id.GetIndex();
|
||||
lock_guard<mutex> lock(db_infos[db_id].mu);
|
||||
auto max_tbl_id = db_infos[db_id].table_count;
|
||||
|
||||
if (max_tbl_id == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
task.tbl_id = std::rand() % max_tbl_id;
|
||||
task.tbl_size = db_infos[db_id].tables[task.tbl_id.GetIndex()].size;
|
||||
}
|
||||
|
||||
AttachTask AttachWorker::RandomTask() {
|
||||
AttachTask result;
|
||||
idx_t scenario_id = std::rand() % 10;
|
||||
result.db_id = std::rand() % db_count;
|
||||
auto db_id = result.db_id.GetIndex();
|
||||
switch (scenario_id) {
|
||||
case 0:
|
||||
result.type = AttachTaskType::CREATE_TABLE;
|
||||
GetRandomTable(result);
|
||||
break;
|
||||
case 1:
|
||||
result.type = AttachTaskType::LOOKUP;
|
||||
GetRandomTable(result);
|
||||
break;
|
||||
case 2:
|
||||
result.type = AttachTaskType::APPEND;
|
||||
GetRandomTable(result);
|
||||
break;
|
||||
case 3:
|
||||
result.type = AttachTaskType::APPLY_CHANGES;
|
||||
GetRandomTable(result);
|
||||
if (result.tbl_id.IsValid()) {
|
||||
auto current_num_rows = result.tbl_size.GetIndex();
|
||||
idx_t delete_count = std::rand() % (STANDARD_VECTOR_SIZE / 3);
|
||||
if (delete_count == 0) {
|
||||
delete_count = 1;
|
||||
}
|
||||
|
||||
unordered_set<idx_t> unique_ids;
|
||||
for (idx_t i = 0; i < delete_count; i++) {
|
||||
unique_ids.insert(std::rand() % current_num_rows);
|
||||
}
|
||||
for (auto &id : unique_ids) {
|
||||
result.ids.push_back(id);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
case 7:
|
||||
case 8:
|
||||
result.type = AttachTaskType::DESCRIBE_TABLE;
|
||||
GetRandomTable(result);
|
||||
result.actual_describe = std::rand() % 2 == 0;
|
||||
break;
|
||||
default:
|
||||
result.type = AttachTaskType::CHECKPOINT;
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void AttachWorker::Work() {
|
||||
for (idx_t i = 0; i < iteration_count; i++) {
|
||||
if (!success) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
auto task = RandomTask();
|
||||
|
||||
db_pool.addWorker(*this, task.db_id.GetIndex());
|
||||
|
||||
switch (task.type) {
|
||||
case AttachTaskType::CREATE_TABLE:
|
||||
createTbl(task);
|
||||
break;
|
||||
case AttachTaskType::LOOKUP:
|
||||
lookup(task);
|
||||
break;
|
||||
case AttachTaskType::APPEND:
|
||||
append(task);
|
||||
break;
|
||||
case AttachTaskType::APPLY_CHANGES:
|
||||
apply_changes(task);
|
||||
break;
|
||||
case AttachTaskType::DESCRIBE_TABLE:
|
||||
describe_tbl(task);
|
||||
break;
|
||||
case AttachTaskType::CHECKPOINT:
|
||||
checkpoint_db(task);
|
||||
break;
|
||||
default:
|
||||
addLog("invalid task type");
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
db_pool.removeWorker(*this, task.db_id.GetIndex());
|
||||
|
||||
} catch (const std::exception &e) {
|
||||
addLog("Caught exception when running iterations: " + string(e.what()));
|
||||
success = false;
|
||||
return;
|
||||
} catch (...) {
|
||||
addLog("Caught unknown when using running iterations");
|
||||
success = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void workUnit(std::unique_ptr<AttachWorker> worker) {
|
||||
worker->Work();
|
||||
}
|
||||
|
||||
TEST_CASE("Run a concurrent ATTACH/DETACH scenario", "[interquery][.]") {
|
||||
test_dir_path = TestDirectoryPath();
|
||||
DBPoolMgr db_pool;
|
||||
DuckDB db(nullptr);
|
||||
Connection init_conn(db);
|
||||
|
||||
execQuery(init_conn, "SET catalog_error_max_schemas = '0'");
|
||||
execQuery(init_conn, "SET threads = '1'");
|
||||
execQuery(init_conn, "SET storage_compatibility_version = 'latest'");
|
||||
execQuery(init_conn, "CALL enable_logging()");
|
||||
execQuery(init_conn, "PRAGMA enable_profiling='no_output'");
|
||||
|
||||
logging.resize(worker_count);
|
||||
vector<std::thread> workers;
|
||||
for (idx_t i = 0; i < worker_count; i++) {
|
||||
auto worker = make_uniq<AttachWorker>(db, i, logging[i], db_pool);
|
||||
workers.emplace_back(workUnit, std::move(worker));
|
||||
}
|
||||
|
||||
for (auto &worker : workers) {
|
||||
worker.join();
|
||||
}
|
||||
if (!success) {
|
||||
for (idx_t worker_id = 0; worker_id < logging.size(); worker_id++) {
|
||||
for (auto &log : logging[worker_id]) {
|
||||
Printer::PrintF("thread %d; %s", worker_id, log);
|
||||
}
|
||||
}
|
||||
FAIL();
|
||||
}
|
||||
ClearTestDirectory();
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
20
external/duckdb/test/sql/parallelism/interquery/concurrent_batch_append.test_slow
vendored
Normal file
20
external/duckdb/test/sql/parallelism/interquery/concurrent_batch_append.test_slow
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_batch_append.test_slow
|
||||
# description: Test concurrent batch appends on persistent storage
|
||||
# group: [interquery]
|
||||
|
||||
load __TEST_DIR__/concurrent_batch_append.db
|
||||
|
||||
statement ok
|
||||
CREATE TABLE test(a INTEGER)
|
||||
|
||||
concurrentloop i 0 10
|
||||
|
||||
statement ok
|
||||
INSERT INTO test SELECT * FROM range(1000000)
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(a) FROM test
|
||||
----
|
||||
10000000 4999995000000
|
||||
28
external/duckdb/test/sql/parallelism/interquery/concurrent_batch_append_pk.test_slow
vendored
Normal file
28
external/duckdb/test/sql/parallelism/interquery/concurrent_batch_append_pk.test_slow
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_batch_append_pk.test_slow
|
||||
# description: Test concurrent batch appends on persistent storage with primary key
|
||||
# group: [interquery]
|
||||
|
||||
load __TEST_DIR__/concurrent_batch_append.db
|
||||
|
||||
statement ok
|
||||
CREATE TABLE test(a INTEGER PRIMARY KEY)
|
||||
|
||||
concurrentloop threadid 0 10
|
||||
|
||||
statement ok
|
||||
INSERT INTO test SELECT * FROM range(250000 * ${threadid}, 250000 * (${threadid} + 1))
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(a) FROM test
|
||||
----
|
||||
2500000 3124998750000
|
||||
|
||||
concurrentloop threadid 0 10
|
||||
|
||||
statement error
|
||||
INSERT INTO test VALUES ({${threadid} * 17171)
|
||||
----
|
||||
|
||||
endloop
|
||||
313
external/duckdb/test/sql/parallelism/interquery/concurrent_checkpoint.cpp
vendored
Normal file
313
external/duckdb/test/sql/parallelism/interquery/concurrent_checkpoint.cpp
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/value_operations/value_operations.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
#include "duckdb/main/appender.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
class ConcurrentCheckpoint {
|
||||
public:
|
||||
static constexpr int CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT = 200;
|
||||
static constexpr int CONCURRENT_UPDATE_TOTAL_ACCOUNTS = 10;
|
||||
static constexpr int CONCURRENT_UPDATE_MONEY_PER_ACCOUNT = 10;
|
||||
|
||||
static atomic<bool> finished;
|
||||
static atomic<size_t> finished_threads;
|
||||
|
||||
template <bool FORCE_CHECKPOINT>
|
||||
static void CheckpointThread(DuckDB *db, bool *read_correct) {
|
||||
Connection con(*db);
|
||||
while (!finished) {
|
||||
{
|
||||
// the total balance should remain constant regardless of updates and checkpoints
|
||||
auto result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
if (!CHECK_COLUMN(result, 0,
|
||||
{CONCURRENT_UPDATE_TOTAL_ACCOUNTS * CONCURRENT_UPDATE_MONEY_PER_ACCOUNT})) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
while (true) {
|
||||
auto result = con.Query(FORCE_CHECKPOINT ? "FORCE CHECKPOINT" : "CHECKPOINT");
|
||||
if (!result->HasError()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
// the total balance should remain constant regardless of updates and checkpoints
|
||||
auto result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
if (!CHECK_COLUMN(result, 0,
|
||||
{CONCURRENT_UPDATE_TOTAL_ACCOUNTS * CONCURRENT_UPDATE_MONEY_PER_ACCOUNT})) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void WriteRandomNumbers(DuckDB *db, bool *correct, size_t nr) {
|
||||
correct[nr] = true;
|
||||
Connection con(*db);
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT; i++) {
|
||||
// just make some changes to the total
|
||||
// the total amount of money after the commit is the same
|
||||
if (con.Query("BEGIN TRANSACTION")->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money + " + to_string(i * 2) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money - " + to_string(i) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money - " + to_string(i * 2) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money + " + to_string(i) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
// we test both commit and rollback
|
||||
// the result of both should be the same since the updates have a
|
||||
// net-zero effect
|
||||
if (con.Query(nr % 2 == 0 ? "COMMIT" : "ROLLBACK")->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
}
|
||||
finished_threads++;
|
||||
if (finished_threads == CONCURRENT_UPDATE_TOTAL_ACCOUNTS) {
|
||||
finished = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void NopUpdate(DuckDB *db) {
|
||||
Connection con(*db);
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("UPDATE accounts SET money = money");
|
||||
con.Query("COMMIT");
|
||||
}
|
||||
finished_threads++;
|
||||
if (finished_threads == CONCURRENT_UPDATE_TOTAL_ACCOUNTS) {
|
||||
finished = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
atomic<bool> ConcurrentCheckpoint::finished;
|
||||
atomic<size_t> ConcurrentCheckpoint::finished_threads;
|
||||
|
||||
TEST_CASE("Concurrent checkpoint with single updater", "[interquery][.]") {
|
||||
auto config = GetTestConfig();
|
||||
auto storage_database = TestCreatePath("concurrent_checkpoint");
|
||||
DeleteDatabase(storage_database);
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(storage_database, config.get());
|
||||
Connection con(db);
|
||||
|
||||
// fixed seed random numbers
|
||||
mt19937 generator;
|
||||
generator.seed(42);
|
||||
uniform_int_distribution<int> account_distribution(0, ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS - 1);
|
||||
auto random_account = bind(account_distribution, generator);
|
||||
|
||||
uniform_int_distribution<int> amount_distribution(0, ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT);
|
||||
auto random_amount = bind(amount_distribution, generator);
|
||||
|
||||
ConcurrentCheckpoint::finished = false;
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
con.Query("INSERT INTO accounts VALUES (" + to_string(i) + ", " +
|
||||
to_string(ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT) + ");");
|
||||
}
|
||||
con.Query("COMMIT");
|
||||
|
||||
bool read_correct = true;
|
||||
// launch separate thread for reading aggregate
|
||||
thread read_thread(ConcurrentCheckpoint::CheckpointThread<false>, &db, &read_correct);
|
||||
|
||||
// start vigorously updating balances in this thread
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT; i++) {
|
||||
int from = random_account();
|
||||
int to = random_account();
|
||||
while (to == from) {
|
||||
to = random_account();
|
||||
}
|
||||
int amount = random_amount();
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(from));
|
||||
Value money_from = result->GetValue(0, 0);
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(to));
|
||||
Value money_to = result->GetValue(0, 0);
|
||||
|
||||
REQUIRE_NO_FAIL(
|
||||
con.Query("UPDATE accounts SET money = money - " + to_string(amount) + " WHERE id = " + to_string(from)));
|
||||
REQUIRE_NO_FAIL(
|
||||
con.Query("UPDATE accounts SET money = money + " + to_string(amount) + " WHERE id = " + to_string(to)));
|
||||
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(from));
|
||||
Value new_money_from = result->GetValue(0, 0);
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(to));
|
||||
Value new_money_to = result->GetValue(0, 0);
|
||||
|
||||
Value expected_money_from, expected_money_to;
|
||||
|
||||
expected_money_from = Value::INTEGER(IntegerValue::Get(money_from) - amount);
|
||||
expected_money_to = Value::INTEGER(IntegerValue::Get(money_to) + amount);
|
||||
|
||||
REQUIRE(new_money_from == expected_money_from);
|
||||
REQUIRE(new_money_to == expected_money_to);
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("COMMIT"));
|
||||
}
|
||||
ConcurrentCheckpoint::finished = true;
|
||||
read_thread.join();
|
||||
REQUIRE(read_correct);
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent checkpoint with multiple updaters", "[interquery][.]") {
|
||||
auto config = GetTestConfig();
|
||||
auto storage_database = TestCreatePath("concurrent_checkpoint");
|
||||
DeleteDatabase(storage_database);
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(storage_database, config.get());
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
ConcurrentCheckpoint::finished = false;
|
||||
ConcurrentCheckpoint::finished_threads = 0;
|
||||
// initialize the database
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
con.Query("INSERT INTO accounts VALUES (" + to_string(i) + ", " +
|
||||
to_string(ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT) + ");");
|
||||
}
|
||||
con.Query("COMMIT");
|
||||
|
||||
bool correct[ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS];
|
||||
bool read_correct;
|
||||
std::thread write_threads[ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS];
|
||||
// launch a thread for reading and checkpointing the table
|
||||
thread read_thread(ConcurrentCheckpoint::CheckpointThread<false>, &db, &read_correct);
|
||||
// launch several threads for updating the table
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i] = thread(ConcurrentCheckpoint::WriteRandomNumbers, &db, correct, i);
|
||||
}
|
||||
read_thread.join();
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Force concurrent checkpoint with single updater", "[interquery][.]") {
|
||||
auto config = GetTestConfig();
|
||||
auto storage_database = TestCreatePath("concurrent_checkpoint");
|
||||
DeleteDatabase(storage_database);
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(storage_database, config.get());
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
ConcurrentCheckpoint::finished = false;
|
||||
// initialize the database
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
con.Query("INSERT INTO accounts VALUES (" + to_string(i) + ", " +
|
||||
to_string(ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT) + ");");
|
||||
}
|
||||
con.Query("COMMIT");
|
||||
|
||||
bool read_correct = true;
|
||||
// launch separate thread for reading aggregate
|
||||
thread read_thread(ConcurrentCheckpoint::CheckpointThread<true>, &db, &read_correct);
|
||||
|
||||
// start vigorously updating balances in this thread
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT; i++) {
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("UPDATE accounts SET money = money");
|
||||
con.Query("UPDATE accounts SET money = money");
|
||||
con.Query("UPDATE accounts SET money = money");
|
||||
con.Query("ROLLBACK");
|
||||
}
|
||||
ConcurrentCheckpoint::finished = true;
|
||||
read_thread.join();
|
||||
REQUIRE(read_correct);
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent commits on persistent database with automatic checkpoints", "[interquery][.]") {
|
||||
auto config = GetTestConfig();
|
||||
auto storage_database = TestCreatePath("concurrent_checkpoint");
|
||||
DeleteDatabase(storage_database);
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
config->options.checkpoint_wal_size = 1;
|
||||
DuckDB db(storage_database, config.get());
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
const int ACCOUNTS = 20000;
|
||||
ConcurrentCheckpoint::finished = false;
|
||||
ConcurrentCheckpoint::finished_threads = 0;
|
||||
// initialize the database
|
||||
con.Query("BEGIN TRANSACTION");
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
Appender appender(con, "accounts");
|
||||
for (size_t i = 0; i < ACCOUNTS; i++) {
|
||||
appender.AppendRow(int(i), int(ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT));
|
||||
}
|
||||
appender.Close();
|
||||
con.Query("COMMIT");
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("UPDATE accounts SET money = money"));
|
||||
REQUIRE_NO_FAIL(con.Query("UPDATE accounts SET money = money"));
|
||||
REQUIRE_NO_FAIL(con.Query("UPDATE accounts SET money = money"));
|
||||
result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {ACCOUNTS * ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT}));
|
||||
|
||||
std::thread write_threads[ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS];
|
||||
// launch several threads for updating the table
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i] = thread(ConcurrentCheckpoint::NopUpdate, &db);
|
||||
}
|
||||
for (size_t i = 0; i < ConcurrentCheckpoint::CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i].join();
|
||||
}
|
||||
result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {ACCOUNTS * ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT}));
|
||||
REQUIRE_NO_FAIL(con.Query("UPDATE accounts SET money = money"));
|
||||
result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {ACCOUNTS * ConcurrentCheckpoint::CONCURRENT_UPDATE_MONEY_PER_ACCOUNT}));
|
||||
}
|
||||
72
external/duckdb/test/sql/parallelism/interquery/concurrent_force_checkpoint.test
vendored
Normal file
72
external/duckdb/test/sql/parallelism/interquery/concurrent_force_checkpoint.test
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_force_checkpoint.test
|
||||
# description: Test concurrent force checkpoints with a mix of operations
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 21
|
||||
|
||||
# thread 1 is appending
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000 + ${i} * 100, 10100 + ${i} * 100);
|
||||
|
||||
endloop
|
||||
|
||||
# thread 1-10 are deleting rows between 0..1000
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=1&&threadid<=10
|
||||
statement ok
|
||||
DELETE FROM integers WHERE i=(${threadid}-1)*100+${i}
|
||||
|
||||
endloop
|
||||
|
||||
# thread 11-15 are updating rows between 1000..2000
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=11&&threadid<=15
|
||||
statement ok
|
||||
UPDATE integers SET i=100000+i WHERE i=(${threadid}-1)*100+${i}
|
||||
|
||||
endloop
|
||||
|
||||
# thread 16-20 are reading
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=16&&threadid<20
|
||||
statement ok
|
||||
BEGIN TRANSACTION READ ONLY
|
||||
|
||||
onlyif threadid>=16&&threadid<20
|
||||
statement ok
|
||||
SELECT COUNT(*), SUM(i) FROM integers;
|
||||
|
||||
onlyif threadid>=16&&threadid<20
|
||||
statement ok
|
||||
COMMIT
|
||||
|
||||
endloop
|
||||
|
||||
# thread 21 is force checkpointing
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid==20
|
||||
statement ok
|
||||
FORCE CHECKPOINT
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
11000 121494500
|
||||
|
||||
40
external/duckdb/test/sql/parallelism/interquery/concurrent_index_reads_while_updating.test_slow
vendored
Normal file
40
external/duckdb/test/sql/parallelism/interquery/concurrent_index_reads_while_updating.test_slow
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_index_reads_while_updating.test_slow
|
||||
# description: Test concurrent index reads while updating
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER PRIMARY KEY, value BIGINT)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT i, i%10 FROM range(10) t(i);
|
||||
|
||||
# 10 update threads, 10 reading threads
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 500
|
||||
|
||||
skipif threadid<=10
|
||||
statement ok
|
||||
SELECT * FROM integers WHERE i=(hash(${threadid} + ${i})%100)
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 100
|
||||
|
||||
skipif threadid>10
|
||||
statement ok
|
||||
UPDATE integers SET value = value + 1 WHERE i=${threadid}
|
||||
|
||||
skipif threadid>10
|
||||
statement ok
|
||||
UPDATE integers SET value = value - 1 WHERE i=${threadid}
|
||||
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
10 45
|
||||
34
external/duckdb/test/sql/parallelism/interquery/concurrent_index_scans_while_appending.test
vendored
Normal file
34
external/duckdb/test/sql/parallelism/interquery/concurrent_index_scans_while_appending.test
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_index_scans_while_appending.test
|
||||
# description: Test concurrent index scans while appending
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER PRIMARY KEY)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000 + ${i} * 100, 10100 + ${i} * 100);
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 100
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
SELECT * FROM integers WHERE i=${i} * (${threadid} * 300);
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
12000 71994000
|
||||
55
external/duckdb/test/sql/parallelism/interquery/concurrent_mix_operations.test_slow
vendored
Normal file
55
external/duckdb/test/sql/parallelism/interquery/concurrent_mix_operations.test_slow
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_mix_operations.test_slow
|
||||
# description: Test concurrent mix of operations
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
# thread 1 is appending
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000 + ${i} * 100, 10100 + ${i} * 100);
|
||||
|
||||
endloop
|
||||
|
||||
# thread 1-10 are deleting rows between 0..1000
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=1&&threadid<=10
|
||||
statement ok
|
||||
DELETE FROM integers WHERE i=(${threadid}-1)*100+${i}
|
||||
|
||||
endloop
|
||||
|
||||
# thread 11-15 are updating rows between 1000..2000
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=11&&threadid<=15
|
||||
statement ok
|
||||
UPDATE integers SET i=100000+i WHERE i=(${threadid}-1)*100+${i}
|
||||
|
||||
endloop
|
||||
|
||||
# thread 16-20 are reading
|
||||
loop i 0 100
|
||||
|
||||
onlyif threadid>=16
|
||||
statement ok
|
||||
SELECT COUNT(*), SUM(i) FROM integers;
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
11000 121494500
|
||||
|
||||
40
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_append_list.test_slow
vendored
Normal file
40
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_append_list.test_slow
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_append_list.test_slow
|
||||
# description: Test concurrent reads while appending
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE lists(l INTEGER[])
|
||||
|
||||
statement ok
|
||||
INSERT INTO lists SELECT [i, i + 1, i + 2] FROM range(10000) t(i);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO lists SELECT [i, i + 1, i + 2] FROM range(100) t(i);
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 200
|
||||
|
||||
skipif threadid=0
|
||||
query II
|
||||
SELECT COUNT(*) >= 30000 AND COUNT(*) <= 36000
|
||||
, SUM(i) >= 150015000 AND SUM(i) <= 150318000
|
||||
FROM
|
||||
(SELECT UNNEST(l) AS i FROM lists);
|
||||
----
|
||||
true true
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM (SELECT UNNEST(l) AS i FROM lists)
|
||||
----
|
||||
36000 150318000
|
||||
|
||||
47
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_altering.test
vendored
Normal file
47
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_altering.test
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_while_altering.test
|
||||
# description: Test concurrent reads while altering
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE OR REPLACE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
BEGIN;
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
ALTER TABLE integers ADD COLUMN newcol_${i} INTEGER
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers (i) SELECT * FROM range(10000 + ${i} * 100, 10100 + ${i} * 100);
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
COMMIT
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 20
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
SELECT * FROM integers
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
12000 71994000
|
||||
|
||||
38
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_appending.test_slow
vendored
Normal file
38
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_appending.test_slow
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_while_appending.test_slow
|
||||
# description: Test concurrent reads while appending
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(100);
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 200
|
||||
|
||||
skipif threadid=0
|
||||
query II
|
||||
SELECT COUNT(*)>=10000 AND COUNT(*)<=12000,
|
||||
SUM(i)>= 49995000 AND SUM(i) <= 50094000 FROM integers;
|
||||
----
|
||||
true true
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
12000 50094000
|
||||
|
||||
39
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_appending_nulls.test_slow
vendored
Normal file
39
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_appending_nulls.test_slow
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_while_appending_nulls.test_slow
|
||||
# description: Test concurrent reads while appending NULL values
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
INSERT INTO integers SELECT CASE WHEN i%2=0 THEN NULL ELSE i END FROM range(100) t(i);
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 200
|
||||
|
||||
skipif threadid=0
|
||||
query III
|
||||
SELECT COUNT(*)>=10000 AND COUNT(*)<=12000,
|
||||
COUNT(i) >= 10000 AND COUNT(i) <= 11000,
|
||||
SUM(i)>= 49995000 AND SUM(i) <= 50094000 FROM integers;
|
||||
----
|
||||
true true true
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query III
|
||||
SELECT COUNT(*), COUNT(i), SUM(i) FROM integers
|
||||
----
|
||||
12000 11000 50045000
|
||||
|
||||
40
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_renaming.test
vendored
Normal file
40
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_renaming.test
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_while_renaming.test
|
||||
# description: Test concurrent reads while renaming
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE OR REPLACE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
ALTER TABLE integers RENAME TO integers_${i}
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
ALTER TABLE integers_${i} RENAME TO integers
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 20
|
||||
|
||||
skipif threadid=0
|
||||
statement maybe
|
||||
SELECT * FROM integers
|
||||
----
|
||||
does not exist
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
10000 49995000
|
||||
38
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_updating.test_slow
vendored
Normal file
38
external/duckdb/test/sql/parallelism/interquery/concurrent_reads_while_updating.test_slow
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_reads_while_updating.test_slow
|
||||
# description: Test concurrent reads while updating
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE integers(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO integers SELECT * FROM range(10000);
|
||||
|
||||
concurrentloop threadid 0 20
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
statement ok
|
||||
UPDATE integers SET i=i+1;
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 200
|
||||
|
||||
skipif threadid=0
|
||||
query II
|
||||
SELECT COUNT(*)==10000,
|
||||
SUM(i)>= 49995000 AND SUM(i) <= 50195000 FROM integers;
|
||||
----
|
||||
true true
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
query II
|
||||
SELECT COUNT(*), SUM(i) FROM integers
|
||||
----
|
||||
10000 50195000
|
||||
|
||||
22
external/duckdb/test/sql/parallelism/interquery/concurrent_update_drop.test_slow
vendored
Normal file
22
external/duckdb/test/sql/parallelism/interquery/concurrent_update_drop.test_slow
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# name: test/sql/parallelism/interquery/concurrent_update_drop.test_slow
|
||||
# description: Test concurrent updates and drops
|
||||
# group: [interquery]
|
||||
|
||||
statement ok
|
||||
CREATE TABLE t1(i INTEGER)
|
||||
|
||||
statement ok
|
||||
INSERT INTO t1 VALUES (1), (2), (3)
|
||||
|
||||
concurrentloop threadid 0 2
|
||||
|
||||
onlyif threadid=0
|
||||
statement maybe
|
||||
UPDATE t1 SET i = 4 WHERE i = 2
|
||||
----
|
||||
|
||||
onlyif threadid=1
|
||||
statement ok
|
||||
DROP TABLE t1
|
||||
|
||||
endloop
|
||||
152
external/duckdb/test/sql/parallelism/interquery/test_concurrent_dependencies.cpp
vendored
Normal file
152
external/duckdb/test/sql/parallelism/interquery/test_concurrent_dependencies.cpp
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
#include "catch.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
#define CONCURRENT_DEPENDENCIES_REPETITIONS 100
|
||||
#define CONCURRENT_DEPENDENCIES_THREAD_COUNT 10
|
||||
|
||||
atomic<bool> finished;
|
||||
|
||||
static void RunQueryUntilSuccess(Connection &con, string query) {
|
||||
while (true) {
|
||||
auto result = con.Query(query);
|
||||
if (!result->HasError()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void create_drop_table(DuckDB *db) {
|
||||
Connection con(*db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
while (!finished) {
|
||||
// printf("[TABLE] Create table\n");
|
||||
// create the table: this should never fail
|
||||
(con.Query("BEGIN TRANSACTION"));
|
||||
(con.Query("CREATE TABLE integers(i INTEGER)"));
|
||||
(con.Query("INSERT INTO integers VALUES (1), (2), (3), (4), (5)"));
|
||||
(con.Query("COMMIT"));
|
||||
// now wait a bit
|
||||
this_thread::sleep_for(chrono::milliseconds(20));
|
||||
// printf("[TABLE] Drop table\n");
|
||||
// perform a cascade drop of the table
|
||||
// this can fail if a thread is still busy preparing a statement
|
||||
RunQueryUntilSuccess(con, "DROP TABLE integers CASCADE");
|
||||
}
|
||||
}
|
||||
|
||||
static void create_use_prepared_statement(DuckDB *db) {
|
||||
Connection con(*db);
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_REPETITIONS; i++) {
|
||||
// printf("[PREPARE] Prepare statement\n");
|
||||
RunQueryUntilSuccess(con, "PREPARE s1 AS SELECT SUM(i) FROM integers");
|
||||
// printf("[PREPARE] Query prepare\n");
|
||||
while (true) {
|
||||
// execute the prepared statement until the prepared statement is dropped because of the CASCADE in another
|
||||
// thread
|
||||
result = con.Query("EXECUTE s1");
|
||||
if (result->HasError()) {
|
||||
break;
|
||||
} else {
|
||||
D_ASSERT(CHECK_COLUMN(result, 0, {15}));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Test parallel dependencies in multiple connections", "[interquery][.]") {
|
||||
DuckDB db(nullptr);
|
||||
// disabled for now
|
||||
return;
|
||||
|
||||
// in this test we create and drop a table in one thread (with CASCADE drop)
|
||||
// in the other thread, we create a prepared statement and execute it
|
||||
// the prepared statement depends on the table
|
||||
// hence when the CASCADE drop is executed the prepared statement also needs to be dropped
|
||||
|
||||
thread table_thread = thread(create_drop_table, &db);
|
||||
thread seq_threads[CONCURRENT_DEPENDENCIES_THREAD_COUNT];
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_THREAD_COUNT; i++) {
|
||||
seq_threads[i] = thread(create_use_prepared_statement, &db);
|
||||
}
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_THREAD_COUNT; i++) {
|
||||
seq_threads[i].join();
|
||||
}
|
||||
finished = true;
|
||||
table_thread.join();
|
||||
}
|
||||
|
||||
static void create_drop_schema(DuckDB *db) {
|
||||
Connection con(*db);
|
||||
|
||||
while (!finished) {
|
||||
// create the schema: this should never fail
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE SCHEMA s1"));
|
||||
// now wait a bit
|
||||
this_thread::sleep_for(chrono::milliseconds(20));
|
||||
// perform a cascade drop of the schema
|
||||
// this can fail if a thread is still busy creating something inside the schema
|
||||
RunQueryUntilSuccess(con, "DROP SCHEMA s1 CASCADE");
|
||||
}
|
||||
}
|
||||
|
||||
static void create_use_table_view(DuckDB *db, int threadnr) {
|
||||
Connection con(*db);
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
string tname = "integers" + to_string(threadnr);
|
||||
string vname = "v" + to_string(threadnr);
|
||||
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_REPETITIONS; i++) {
|
||||
RunQueryUntilSuccess(con, "CREATE TABLE s1." + tname + "(i INTEGER)");
|
||||
con.Query("INSERT INTO s1." + tname + " VALUES (1), (2), (3), (4), (5)");
|
||||
RunQueryUntilSuccess(con, "CREATE VIEW s1." + vname + " AS SELECT 42");
|
||||
while (true) {
|
||||
result = con.Query("SELECT SUM(i) FROM s1." + tname);
|
||||
if (result->HasError()) {
|
||||
break;
|
||||
} else {
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {15}));
|
||||
}
|
||||
result = con.Query("SELECT * FROM s1." + vname);
|
||||
if (result->HasError()) {
|
||||
break;
|
||||
} else {
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {42}));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TEST_CASE("Test parallel dependencies with schemas and tables", "[interquery][.]") {
|
||||
DuckDB db(nullptr);
|
||||
// FIXME: this test crashes
|
||||
return;
|
||||
|
||||
// in this test we create and drop a schema in one thread (with CASCADE drop)
|
||||
// in other threads, we create tables and views and query those tables and views
|
||||
|
||||
thread table_thread = thread(create_drop_schema, &db);
|
||||
thread seq_threads[CONCURRENT_DEPENDENCIES_THREAD_COUNT];
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_THREAD_COUNT; i++) {
|
||||
seq_threads[i] = thread(create_use_table_view, &db, i);
|
||||
}
|
||||
for (int i = 0; i < CONCURRENT_DEPENDENCIES_THREAD_COUNT; i++) {
|
||||
seq_threads[i].join();
|
||||
}
|
||||
finished = true;
|
||||
table_thread.join();
|
||||
}
|
||||
391
external/duckdb/test/sql/parallelism/interquery/test_concurrent_index.cpp
vendored
Normal file
391
external/duckdb/test/sql/parallelism/interquery/test_concurrent_index.cpp
vendored
Normal file
@@ -0,0 +1,391 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/main/appender.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
//! Synchronize threads
|
||||
atomic<bool> concurrent_index_finished;
|
||||
|
||||
#define CONCURRENT_INDEX_THREAD_COUNT 10
|
||||
#define CONCURRENT_INDEX_INSERT_COUNT 2000
|
||||
|
||||
static void CreateIntegerTable(Connection *con, int64_t count) {
|
||||
REQUIRE_NO_FAIL(con->Query("CREATE TABLE integers AS SELECT range AS i FROM range ($1)", count));
|
||||
}
|
||||
|
||||
static void CheckConstraintViolation(const string &result_str) {
|
||||
auto constraint_violation =
|
||||
result_str.find("violat") != string::npos || result_str.find("Conflict on tuple deletion") != string::npos;
|
||||
if (!constraint_violation) {
|
||||
FAIL(result_str);
|
||||
}
|
||||
}
|
||||
|
||||
static void ReadFromIntegers(DuckDB *db, idx_t thread_idx, atomic<bool> *success) {
|
||||
|
||||
Connection con(*db);
|
||||
while (!concurrent_index_finished) {
|
||||
|
||||
auto expected_value = to_string(thread_idx * 10000);
|
||||
auto result = con.Query("SELECT i FROM integers WHERE i = " + expected_value);
|
||||
if (result->HasError()) {
|
||||
*success = false;
|
||||
} else if (!CHECK_COLUMN(result, 0, {Value::INTEGER(thread_idx * 10000)})) {
|
||||
*success = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent reads during index creation", "[index][.]") {
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
CreateIntegerTable(&con, 1000000);
|
||||
concurrent_index_finished = false;
|
||||
|
||||
atomic<bool> success(true);
|
||||
|
||||
// launch many reading threads
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(ReadFromIntegers, &db, i, &success);
|
||||
}
|
||||
|
||||
// create the index
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE INDEX i_index ON integers(i)"));
|
||||
concurrent_index_finished = true;
|
||||
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
REQUIRE(success);
|
||||
|
||||
// test that we can probe the index correctly
|
||||
auto result = con.Query("SELECT COUNT(*) FROM integers WHERE i=500000");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {1}));
|
||||
}
|
||||
|
||||
static void AppendToIntegers(DuckDB *db, atomic<bool> *success) {
|
||||
Connection con(*db);
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_INSERT_COUNT; i++) {
|
||||
auto result = con.Query("INSERT INTO integers VALUES (1)");
|
||||
if (result->HasError()) {
|
||||
*success = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent writes during index creation", "[index][.]") {
|
||||
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
CreateIntegerTable(&con, 1000000);
|
||||
|
||||
atomic<bool> success(true);
|
||||
|
||||
// launch many concurrently writing threads
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(AppendToIntegers, &db, &success);
|
||||
}
|
||||
|
||||
// create the index
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE INDEX i_index ON integers(i)"));
|
||||
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
REQUIRE(success);
|
||||
|
||||
// first scan the base table to verify the count, we avoid using a filter here to prevent the
|
||||
// optimizer from using an index scan
|
||||
auto result = con.Query("SELECT i, COUNT(*) FROM integers GROUP BY i ORDER BY i LIMIT 1 OFFSET 1");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {1}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1, {1 + CONCURRENT_INDEX_THREAD_COUNT * CONCURRENT_INDEX_INSERT_COUNT}));
|
||||
|
||||
// test that we can probe the index correctly
|
||||
result = con.Query("SELECT COUNT(*) FROM integers WHERE i = 1");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {1 + CONCURRENT_INDEX_THREAD_COUNT * CONCURRENT_INDEX_INSERT_COUNT}));
|
||||
}
|
||||
|
||||
static void AppendToPK(DuckDB *db) {
|
||||
|
||||
Connection con(*db);
|
||||
for (idx_t i = 0; i < 1000; i++) {
|
||||
auto result = con.Query("INSERT INTO integers VALUES ($1)", i);
|
||||
if (result->HasError()) {
|
||||
CheckConstraintViolation(result->ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent inserts to PRIMARY KEY", "[index][.]") {
|
||||
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
// create a table to append to
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers (i INTEGER PRIMARY KEY)"));
|
||||
|
||||
// launch many concurrently writing threads
|
||||
// each thread writes the numbers 1...1000, possibly causing a constraint violation
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(AppendToPK, &db);
|
||||
}
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
// test the result
|
||||
auto result = con.Query("SELECT COUNT(*), COUNT(DISTINCT i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {1000}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1, {1000}));
|
||||
}
|
||||
|
||||
static void UpdatePK(DuckDB *db) {
|
||||
|
||||
Connection con(*db);
|
||||
for (idx_t i = 0; i < 1000; i++) {
|
||||
auto result = con.Query("UPDATE integers SET i = 1000 + (i % 100) WHERE i = $1", i);
|
||||
if (result->HasError()) {
|
||||
CheckConstraintViolation(result->ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent updates to PRIMARY KEY", "[index][.]") {
|
||||
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
// create a table and insert values [1...1000]
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers (i INTEGER PRIMARY KEY)"));
|
||||
REQUIRE_NO_FAIL(con.Query("INSERT INTO integers SELECT range FROM range(1000)"));
|
||||
|
||||
// launch many concurrently updating threads
|
||||
// each thread updates numbers by incrementing them
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(UpdatePK, &db);
|
||||
}
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
// test the result
|
||||
auto result = con.Query("SELECT COUNT(*), COUNT(DISTINCT i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {1000}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1, {1000}));
|
||||
}
|
||||
|
||||
static void MixAppendToPK(DuckDB *db, atomic<idx_t> *count) {
|
||||
|
||||
Connection con(*db);
|
||||
for (idx_t i = 0; i < 100; i++) {
|
||||
|
||||
auto result = con.Query("INSERT INTO integers VALUES ($1)", i);
|
||||
if (!result->HasError()) {
|
||||
(*count)++;
|
||||
continue;
|
||||
}
|
||||
|
||||
CheckConstraintViolation(result->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
static void MixUpdatePK(DuckDB *db, idx_t thread_idx) {
|
||||
|
||||
std::uniform_int_distribution<> distribution(1, 100);
|
||||
std::mt19937 gen;
|
||||
gen.seed(thread_idx);
|
||||
|
||||
Connection con(*db);
|
||||
for (idx_t i = 0; i < 100; i++) {
|
||||
|
||||
idx_t old_value = distribution(gen);
|
||||
idx_t new_value = 100 + distribution(gen);
|
||||
|
||||
auto result =
|
||||
con.Query("UPDATE integers SET i =" + to_string(new_value) + " WHERE i = " + to_string(old_value));
|
||||
if (result->HasError()) {
|
||||
CheckConstraintViolation(result->ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Mix updates and inserts on PRIMARY KEY", "[index][.]") {
|
||||
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
atomic<idx_t> atomic_count;
|
||||
atomic_count = 0;
|
||||
|
||||
// create a table
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER PRIMARY KEY)"));
|
||||
|
||||
// launch a mix of updating and appending threads
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
if (i % 2) {
|
||||
threads[i] = thread(MixUpdatePK, &db, i);
|
||||
continue;
|
||||
}
|
||||
threads[i] = thread(MixAppendToPK, &db, &atomic_count);
|
||||
}
|
||||
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
// test the result
|
||||
auto result = con.Query("SELECT COUNT(*), COUNT(DISTINCT i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(atomic_count)}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1, {Value::BIGINT(atomic_count)}));
|
||||
}
|
||||
|
||||
static void TransactionalAppendToPK(DuckDB *db, idx_t thread_idx) {
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
|
||||
Connection con(*db);
|
||||
result = con.Query("BEGIN TRANSACTION");
|
||||
if (result->HasError()) {
|
||||
FAIL(result->GetError());
|
||||
}
|
||||
|
||||
// get the initial count
|
||||
result = con.Query("SELECT COUNT(*) FROM integers WHERE i >= 0");
|
||||
if (result->HasError()) {
|
||||
FAIL(result->GetError());
|
||||
}
|
||||
|
||||
auto chunk = result->Fetch();
|
||||
auto initial_count = chunk->GetValue(0, 0).GetValue<int32_t>();
|
||||
|
||||
for (idx_t i = 0; i < 50; i++) {
|
||||
|
||||
result = con.Query("INSERT INTO integers VALUES ($1)", (int32_t)(thread_idx * 1000 + i));
|
||||
if (result->HasError()) {
|
||||
FAIL(result->GetError());
|
||||
}
|
||||
|
||||
// check the count
|
||||
result = con.Query("SELECT COUNT(*), COUNT(DISTINCT i) FROM integers WHERE i >= 0");
|
||||
if (!CHECK_COLUMN(result, 0, {Value::INTEGER(initial_count + i + 1)})) {
|
||||
FAIL("Incorrect result in TransactionalAppendToPK");
|
||||
}
|
||||
}
|
||||
|
||||
result = con.Query("COMMIT");
|
||||
if (result->HasError()) {
|
||||
FAIL(result->GetError());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Parallel transactional appends to indexed table", "[index][.]") {
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
// create a table
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER PRIMARY KEY)"));
|
||||
|
||||
// launch many concurrently inserting threads
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(TransactionalAppendToPK, &db, i);
|
||||
}
|
||||
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
|
||||
// test that the counts are correct
|
||||
auto result = con.Query("SELECT COUNT(*), COUNT(DISTINCT i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(idx_t(CONCURRENT_INDEX_THREAD_COUNT * 50))}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1, {Value::BIGINT(idx_t(CONCURRENT_INDEX_THREAD_COUNT * 50))}));
|
||||
}
|
||||
|
||||
static void JoinIntegers(Connection *con) {
|
||||
for (idx_t i = 0; i < 10; i++) {
|
||||
auto result = con->Query("SELECT count(*) FROM integers INNER JOIN integers_2 ON (integers.i = integers_2.i)");
|
||||
if (result->HasError()) {
|
||||
FAIL();
|
||||
}
|
||||
if (!CHECK_COLUMN(result, 0, {Value::BIGINT(500000)})) {
|
||||
FAIL();
|
||||
}
|
||||
}
|
||||
|
||||
auto result = con->Query("COMMIT");
|
||||
if (result->HasError()) {
|
||||
FAIL();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent appends during joins", "[index][.]") {
|
||||
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
|
||||
// create join tables to append to
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers AS SELECT range AS i FROM range(1000000)"));
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers_2 AS SELECT range AS i FROM range(500000)"));
|
||||
|
||||
// create the index
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE INDEX i_index ON integers(i)"));
|
||||
|
||||
// we need to guarantee that this thread starts before the other threads
|
||||
Connection join_con_1(db);
|
||||
REQUIRE_NO_FAIL(join_con_1.Query("BEGIN TRANSACTION"));
|
||||
|
||||
Connection join_con_2(db);
|
||||
REQUIRE_NO_FAIL(con.Query("SET immediate_transaction_mode=true"));
|
||||
REQUIRE_NO_FAIL(join_con_2.Query("BEGIN TRANSACTION"));
|
||||
|
||||
thread threads[CONCURRENT_INDEX_THREAD_COUNT];
|
||||
|
||||
// join the data in join_con_1, which is an uncommitted transaction started
|
||||
// before appending any data
|
||||
threads[0] = thread(JoinIntegers, &join_con_1);
|
||||
|
||||
atomic<bool> success(true);
|
||||
// launch many concurrently writing threads
|
||||
for (idx_t i = 2; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(AppendToIntegers, &db, &success);
|
||||
}
|
||||
|
||||
// join the data in join_con_2, which is an uncommitted transaction started
|
||||
// before appending any data
|
||||
threads[1] = thread(JoinIntegers, &join_con_2);
|
||||
|
||||
for (idx_t i = 0; i < CONCURRENT_INDEX_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
REQUIRE(success);
|
||||
}
|
||||
46
external/duckdb/test/sql/parallelism/interquery/test_concurrent_prepared.cpp
vendored
Normal file
46
external/duckdb/test/sql/parallelism/interquery/test_concurrent_prepared.cpp
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
#include "catch.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
static void SelectTable(Connection con) {
|
||||
for (idx_t i = 0; i < 1000; i++) {
|
||||
auto prepare = con.Prepare("select * from foo");
|
||||
auto result = prepare->Execute();
|
||||
if (result->HasError()) {
|
||||
FAIL();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void RecreateTable(Connection con) {
|
||||
for (idx_t i = 0; i < 1000; i++) {
|
||||
auto prepare = con.Prepare("create or replace table foo as select * from foo");
|
||||
auto result = prepare->Execute();
|
||||
if (result->HasError()) {
|
||||
FAIL();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Test concurrent prepared", "[api][.]") {
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
con.EnableQueryVerification();
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("create table foo as select unnest(generate_series(1, 10));"));
|
||||
|
||||
Connection select_conn(db);
|
||||
Connection recreate_conn(db);
|
||||
select_conn.EnableQueryVerification();
|
||||
|
||||
std::thread select_function(SelectTable, std::move(select_conn));
|
||||
std::thread recreate_function(RecreateTable, std::move(recreate_conn));
|
||||
|
||||
select_function.join();
|
||||
recreate_function.join();
|
||||
}
|
||||
92
external/duckdb/test/sql/parallelism/interquery/test_concurrent_sequence.cpp
vendored
Normal file
92
external/duckdb/test/sql/parallelism/interquery/test_concurrent_sequence.cpp
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
#include "catch.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
struct ConcurrentData {
|
||||
DuckDB &db;
|
||||
mutex lock;
|
||||
duckdb::vector<int64_t> results;
|
||||
|
||||
ConcurrentData(DuckDB &db) : db(db) {
|
||||
}
|
||||
};
|
||||
|
||||
#define CONCURRENT_SEQUENCE_THREAD_COUNT 10
|
||||
#define CONCURRENT_SEQUENCE_INSERT_COUNT 100
|
||||
|
||||
static void append_values_from_sequence(ConcurrentData *data) {
|
||||
Connection con(data->db);
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_INSERT_COUNT; i++) {
|
||||
auto result = con.Query("SELECT nextval('seq')");
|
||||
int64_t res = result->GetValue(0, 0).GetValue<int64_t>();
|
||||
lock_guard<mutex> lock(data->lock);
|
||||
data->results.push_back(res);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Test Concurrent Usage of Sequences", "[interquery][.]") {
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
thread threads[CONCURRENT_SEQUENCE_THREAD_COUNT];
|
||||
ConcurrentData data(db);
|
||||
ConcurrentData seq_data(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// create a sequence
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE SEQUENCE seq;"));
|
||||
// fetch a number of values sequentially
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
append_values_from_sequence(&seq_data);
|
||||
}
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("DROP SEQUENCE seq;"));
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE SEQUENCE seq;"));
|
||||
// now launch threads that all use the sequence in parallel
|
||||
// each appends the values to a duckdb::vector "results"
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(append_values_from_sequence, &data);
|
||||
}
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
// now we sort the output data
|
||||
std::sort(seq_data.results.begin(), seq_data.results.end());
|
||||
std::sort(data.results.begin(), data.results.end());
|
||||
// the sequential and threaded data should be the same
|
||||
REQUIRE(seq_data.results == data.results);
|
||||
|
||||
seq_data.results.clear();
|
||||
data.results.clear();
|
||||
// now do the same but for a cyclic sequence
|
||||
REQUIRE_NO_FAIL(con.Query("DROP SEQUENCE seq;"));
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE SEQUENCE seq MAXVALUE 10 CYCLE;"));
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
append_values_from_sequence(&seq_data);
|
||||
}
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("DROP SEQUENCE seq;"));
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE SEQUENCE seq MAXVALUE 10 CYCLE;"));
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(append_values_from_sequence, &data);
|
||||
}
|
||||
for (size_t i = 0; i < CONCURRENT_SEQUENCE_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
// now we sort the output data
|
||||
std::sort(seq_data.results.begin(), seq_data.results.end());
|
||||
std::sort(data.results.begin(), data.results.end());
|
||||
// the sequential and threaded data should be the same
|
||||
REQUIRE(seq_data.results == data.results);
|
||||
}
|
||||
114
external/duckdb/test/sql/parallelism/interquery/test_concurrentappend.cpp
vendored
Normal file
114
external/duckdb/test/sql/parallelism/interquery/test_concurrentappend.cpp
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/value_operations/value_operations.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
static constexpr int CONCURRENT_APPEND_THREAD_COUNT = 10;
|
||||
static constexpr int CONCURRENT_APPEND_INSERT_ELEMENTS = 1000;
|
||||
|
||||
TEST_CASE("Sequential append", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
duckdb::vector<duckdb::unique_ptr<Connection>> connections;
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER);"));
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_THREAD_COUNT; i++) {
|
||||
connections.push_back(make_uniq<Connection>(db));
|
||||
connections[i]->Query("BEGIN TRANSACTION;");
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_THREAD_COUNT; i++) {
|
||||
result = connections[i]->Query("SELECT COUNT(*) FROM integers");
|
||||
D_ASSERT(result->RowCount() > 0);
|
||||
Value count = result->GetValue(0, 0);
|
||||
REQUIRE(count == 0);
|
||||
for (size_t j = 0; j < CONCURRENT_APPEND_INSERT_ELEMENTS; j++) {
|
||||
connections[i]->Query("INSERT INTO integers VALUES (3)");
|
||||
result = connections[i]->Query("SELECT COUNT(*) FROM integers");
|
||||
Value new_count = result->GetValue(0, 0);
|
||||
REQUIRE(new_count == j + 1);
|
||||
count = new_count;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_THREAD_COUNT; i++) {
|
||||
connections[i]->Query("COMMIT;");
|
||||
}
|
||||
result = con.Query("SELECT COUNT(*) FROM integers");
|
||||
Value count = result->GetValue(0, 0);
|
||||
REQUIRE(count == CONCURRENT_APPEND_THREAD_COUNT * CONCURRENT_APPEND_INSERT_ELEMENTS);
|
||||
}
|
||||
|
||||
static volatile std::atomic<int> append_finished_threads;
|
||||
|
||||
static void insert_random_elements(DuckDB *db, bool *correct, int threadnr) {
|
||||
correct[threadnr] = true;
|
||||
Connection con(*db);
|
||||
// initial count
|
||||
con.Query("BEGIN TRANSACTION;");
|
||||
auto result = con.Query("SELECT COUNT(*) FROM integers");
|
||||
Value count = result->GetValue(0, 0);
|
||||
auto start_count = count.GetValue<int64_t>();
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_INSERT_ELEMENTS; i++) {
|
||||
// count should increase by one for every append we do
|
||||
con.Query("INSERT INTO integers VALUES (3)");
|
||||
result = con.Query("SELECT COUNT(*) FROM integers");
|
||||
Value new_count = result->GetValue(0, 0);
|
||||
if (new_count != start_count + i + 1) {
|
||||
correct[threadnr] = false;
|
||||
}
|
||||
count = new_count;
|
||||
}
|
||||
append_finished_threads++;
|
||||
while (append_finished_threads != CONCURRENT_APPEND_THREAD_COUNT)
|
||||
;
|
||||
con.Query("COMMIT;");
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent append", "[interquery][.]") {
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER);"));
|
||||
|
||||
append_finished_threads = 0;
|
||||
|
||||
bool correct[CONCURRENT_APPEND_THREAD_COUNT];
|
||||
thread threads[CONCURRENT_APPEND_THREAD_COUNT];
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(insert_random_elements, &db, correct, i);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_APPEND_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
|
||||
result = con.Query("SELECT COUNT(*), SUM(i) FROM integers");
|
||||
REQUIRE(
|
||||
CHECK_COLUMN(result, 0, {Value::BIGINT(CONCURRENT_APPEND_THREAD_COUNT * CONCURRENT_APPEND_INSERT_ELEMENTS)}));
|
||||
REQUIRE(CHECK_COLUMN(result, 1,
|
||||
{Value::BIGINT(3 * CONCURRENT_APPEND_THREAD_COUNT * CONCURRENT_APPEND_INSERT_ELEMENTS)}));
|
||||
}
|
||||
228
external/duckdb/test/sql/parallelism/interquery/test_concurrentdelete.cpp
vendored
Normal file
228
external/duckdb/test/sql/parallelism/interquery/test_concurrentdelete.cpp
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/value_operations/value_operations.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
static constexpr int CONCURRENT_DELETE_THREAD_COUNT = 10;
|
||||
static constexpr int CONCURRENT_DELETE_INSERT_ELEMENTS = 100;
|
||||
|
||||
TEST_CASE("Single thread delete", "[interquery][.]") {
|
||||
duckdb::unique_ptr<QueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
duckdb::vector<duckdb::unique_ptr<Connection>> connections;
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE integers(i INTEGER);");
|
||||
int sum = 0;
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_INSERT_ELEMENTS; i++) {
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
con.Query("INSERT INTO integers VALUES (" + to_string(j + 1) + ");");
|
||||
sum += j + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// check the sum
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum}));
|
||||
|
||||
// simple delete, we should delete CONCURRENT_DELETE_INSERT_ELEMENTS elements
|
||||
result = con.Query("DELETE FROM integers WHERE i=2");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {CONCURRENT_DELETE_INSERT_ELEMENTS}));
|
||||
|
||||
// check sum again
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum - 2 * CONCURRENT_DELETE_INSERT_ELEMENTS}));
|
||||
}
|
||||
|
||||
TEST_CASE("Sequential delete", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
duckdb::vector<duckdb::unique_ptr<Connection>> connections;
|
||||
Value count;
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE integers(i INTEGER);");
|
||||
|
||||
int sum = 0;
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_INSERT_ELEMENTS; i++) {
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
con.Query("INSERT INTO integers VALUES (" + to_string(j + 1) + ");");
|
||||
sum += j + 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_THREAD_COUNT; i++) {
|
||||
connections.push_back(make_uniq<Connection>(db));
|
||||
connections[i]->Query("BEGIN TRANSACTION;");
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_THREAD_COUNT; i++) {
|
||||
// check the current count
|
||||
result = connections[i]->Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
count = result->GetValue(0, 0);
|
||||
REQUIRE(count == sum);
|
||||
// delete the elements for this thread
|
||||
REQUIRE_NO_FAIL(connections[i]->Query("DELETE FROM integers WHERE i=" + to_string(i + 1)));
|
||||
// check the updated count
|
||||
result = connections[i]->Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
count = result->GetValue(0, 0);
|
||||
REQUIRE(count == sum - (i + 1) * CONCURRENT_DELETE_INSERT_ELEMENTS);
|
||||
}
|
||||
// check the count on the original connection
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
count = result->GetValue(0, 0);
|
||||
REQUIRE(count == sum);
|
||||
|
||||
// commit everything
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_THREAD_COUNT; i++) {
|
||||
connections[i]->Query("COMMIT;");
|
||||
}
|
||||
|
||||
// check that the count is 0 now
|
||||
result = con.Query("SELECT COUNT(i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
count = result->GetValue(0, 0);
|
||||
REQUIRE(count == 0);
|
||||
}
|
||||
|
||||
TEST_CASE("Rollback delete", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
duckdb::vector<duckdb::unique_ptr<Connection>> connections;
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE integers(i INTEGER);");
|
||||
int sum = 0;
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_INSERT_ELEMENTS; i++) {
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
con.Query("INSERT INTO integers VALUES (" + to_string(j + 1) + ");");
|
||||
sum += j + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// begin transaction
|
||||
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
|
||||
|
||||
// check the sum
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum}));
|
||||
|
||||
// simple delete
|
||||
result = con.Query("DELETE FROM integers WHERE i=2");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {100}));
|
||||
|
||||
// check sum again
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum - 2 * CONCURRENT_DELETE_INSERT_ELEMENTS}));
|
||||
|
||||
// rollback transaction
|
||||
REQUIRE_NO_FAIL(con.Query("ROLLBACK"));
|
||||
|
||||
// check the sum again
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum}));
|
||||
}
|
||||
|
||||
static volatile std::atomic<int> delete_finished_threads;
|
||||
|
||||
static void delete_elements(DuckDB *db, bool *correct, size_t threadnr) {
|
||||
correct[threadnr] = true;
|
||||
Connection con(*db);
|
||||
// initial count
|
||||
con.Query("BEGIN TRANSACTION;");
|
||||
auto result = con.Query("SELECT COUNT(*) FROM integers");
|
||||
Value count = result->GetValue(0, 0);
|
||||
auto start_count = count.GetValue<int64_t>();
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_INSERT_ELEMENTS; i++) {
|
||||
// count should decrease by one for every delete we do
|
||||
auto element = CONCURRENT_DELETE_INSERT_ELEMENTS * threadnr + i;
|
||||
if (con.Query("DELETE FROM integers WHERE i=" + to_string(element))->HasError()) {
|
||||
correct[threadnr] = false;
|
||||
}
|
||||
result = con.Query("SELECT COUNT(*) FROM integers");
|
||||
if (result->HasError()) {
|
||||
correct[threadnr] = false;
|
||||
} else {
|
||||
Value new_count = result->GetValue(0, 0);
|
||||
if (new_count != start_count - (i + 1)) {
|
||||
correct[threadnr] = false;
|
||||
}
|
||||
count = new_count;
|
||||
}
|
||||
}
|
||||
delete_finished_threads++;
|
||||
while (delete_finished_threads != CONCURRENT_DELETE_THREAD_COUNT)
|
||||
;
|
||||
con.Query("COMMIT;");
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent delete", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER);"));
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_INSERT_ELEMENTS; i++) {
|
||||
for (size_t j = 0; j < CONCURRENT_DELETE_THREAD_COUNT; j++) {
|
||||
auto element = CONCURRENT_DELETE_INSERT_ELEMENTS * j + i;
|
||||
con.Query("INSERT INTO integers VALUES (" + to_string(element) + ");");
|
||||
}
|
||||
}
|
||||
|
||||
delete_finished_threads = 0;
|
||||
|
||||
bool correct[CONCURRENT_DELETE_THREAD_COUNT];
|
||||
thread threads[CONCURRENT_DELETE_THREAD_COUNT];
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(delete_elements, &db, correct, i);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < CONCURRENT_DELETE_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
|
||||
// check that the count is 0 now
|
||||
result = con.Query("SELECT COUNT(i) FROM integers");
|
||||
REQUIRE_NO_FAIL(*result);
|
||||
auto count = result->GetValue(0, 0);
|
||||
REQUIRE(count == 0);
|
||||
}
|
||||
213
external/duckdb/test/sql/parallelism/interquery/test_concurrentupdate.cpp
vendored
Normal file
213
external/duckdb/test/sql/parallelism/interquery/test_concurrentupdate.cpp
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/value_operations/value_operations.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
namespace test_concurrent_update {
|
||||
|
||||
static constexpr int CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT = 1000;
|
||||
static constexpr int CONCURRENT_UPDATE_TOTAL_ACCOUNTS = 10;
|
||||
static constexpr int CONCURRENT_UPDATE_MONEY_PER_ACCOUNT = 10;
|
||||
|
||||
TEST_CASE("Single thread update", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE integers(i INTEGER);");
|
||||
int sum = 0;
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
for (size_t j = 0; j < 10; j++) {
|
||||
con.Query("INSERT INTO integers VALUES (" + to_string(j + 1) + ");");
|
||||
sum += j + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// check the sum
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum}));
|
||||
|
||||
// simple update, we should update INSERT_ELEMENTS elements
|
||||
result = con.Query("UPDATE integers SET i=4 WHERE i=2");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {CONCURRENT_UPDATE_TOTAL_ACCOUNTS}));
|
||||
|
||||
// check updated sum
|
||||
result = con.Query("SELECT SUM(i) FROM integers");
|
||||
REQUIRE(CHECK_COLUMN(result, 0, {sum + 2 * CONCURRENT_UPDATE_TOTAL_ACCOUNTS}));
|
||||
}
|
||||
|
||||
atomic<bool> finished_updating;
|
||||
static void read_total_balance(DuckDB *db, bool *read_correct) {
|
||||
*read_correct = true;
|
||||
Connection con(*db);
|
||||
while (!finished_updating) {
|
||||
// the total balance should remain constant regardless of updates
|
||||
auto result = con.Query("SELECT SUM(money) FROM accounts");
|
||||
if (!CHECK_COLUMN(result, 0, {CONCURRENT_UPDATE_TOTAL_ACCOUNTS * CONCURRENT_UPDATE_MONEY_PER_ACCOUNT})) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent update", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
// fixed seed random numbers
|
||||
mt19937 generator;
|
||||
generator.seed(42);
|
||||
uniform_int_distribution<int> account_distribution(0, CONCURRENT_UPDATE_TOTAL_ACCOUNTS - 1);
|
||||
auto random_account = bind(account_distribution, generator);
|
||||
|
||||
uniform_int_distribution<int> amount_distribution(0, CONCURRENT_UPDATE_MONEY_PER_ACCOUNT);
|
||||
auto random_amount = bind(amount_distribution, generator);
|
||||
|
||||
finished_updating = false;
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
con.Query("INSERT INTO accounts VALUES (" + to_string(i) + ", " +
|
||||
to_string(CONCURRENT_UPDATE_MONEY_PER_ACCOUNT) + ");");
|
||||
}
|
||||
|
||||
bool read_correct;
|
||||
// launch separate thread for reading aggregate
|
||||
thread read_thread(read_total_balance, &db, &read_correct);
|
||||
|
||||
// start vigorously updating balances in this thread
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT; i++) {
|
||||
int from = random_account();
|
||||
int to = random_account();
|
||||
while (to == from) {
|
||||
to = random_account();
|
||||
}
|
||||
int amount = random_amount();
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(from));
|
||||
Value money_from = result->GetValue(0, 0);
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(to));
|
||||
Value money_to = result->GetValue(0, 0);
|
||||
|
||||
REQUIRE_NO_FAIL(
|
||||
con.Query("UPDATE accounts SET money = money - " + to_string(amount) + " WHERE id = " + to_string(from)));
|
||||
REQUIRE_NO_FAIL(
|
||||
con.Query("UPDATE accounts SET money = money + " + to_string(amount) + " WHERE id = " + to_string(to)));
|
||||
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(from));
|
||||
Value new_money_from = result->GetValue(0, 0);
|
||||
result = con.Query("SELECT money FROM accounts WHERE id=" + to_string(to));
|
||||
Value new_money_to = result->GetValue(0, 0);
|
||||
|
||||
Value expected_money_from, expected_money_to;
|
||||
|
||||
expected_money_from = Value::INTEGER(IntegerValue::Get(money_from) - amount);
|
||||
expected_money_to = Value::INTEGER(IntegerValue::Get(money_to) + amount);
|
||||
|
||||
REQUIRE(new_money_from == expected_money_from);
|
||||
REQUIRE(new_money_to == expected_money_to);
|
||||
|
||||
REQUIRE_NO_FAIL(con.Query("COMMIT"));
|
||||
}
|
||||
finished_updating = true;
|
||||
read_thread.join();
|
||||
REQUIRE(read_correct);
|
||||
}
|
||||
|
||||
static std::atomic<size_t> finished_threads;
|
||||
|
||||
static void write_random_numbers_to_account(DuckDB *db, bool *correct, size_t nr) {
|
||||
correct[nr] = true;
|
||||
Connection con(*db);
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TRANSACTION_UPDATE_COUNT; i++) {
|
||||
// just make some changes to the total
|
||||
// the total amount of money after the commit is the same
|
||||
if (con.Query("BEGIN TRANSACTION")->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money + " + to_string(i * 2) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money - " + to_string(i) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money - " + to_string(i * 2) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
if (con.Query("UPDATE accounts SET money = money + " + to_string(i) + " WHERE id = " + to_string(nr))
|
||||
->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
// we test both commit and rollback
|
||||
// the result of both should be the same since the updates have a
|
||||
// net-zero effect
|
||||
if (con.Query(nr % 2 == 0 ? "COMMIT" : "ROLLBACK")->HasError()) {
|
||||
correct[nr] = false;
|
||||
}
|
||||
}
|
||||
finished_threads++;
|
||||
if (finished_threads == CONCURRENT_UPDATE_TOTAL_ACCOUNTS) {
|
||||
finished_updating = true;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Multiple concurrent updaters", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
finished_updating = false;
|
||||
finished_threads = 0;
|
||||
// initialize the database
|
||||
con.Query("CREATE TABLE accounts(id INTEGER, money INTEGER)");
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
con.Query("INSERT INTO accounts VALUES (" + to_string(i) + ", " +
|
||||
to_string(CONCURRENT_UPDATE_MONEY_PER_ACCOUNT) + ");");
|
||||
}
|
||||
|
||||
bool correct[CONCURRENT_UPDATE_TOTAL_ACCOUNTS];
|
||||
bool read_correct;
|
||||
std::thread write_threads[CONCURRENT_UPDATE_TOTAL_ACCOUNTS];
|
||||
// launch a thread for reading the table
|
||||
thread read_thread(read_total_balance, &db, &read_correct);
|
||||
// launch several threads for updating the table
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i] = thread(write_random_numbers_to_account, &db, correct, i);
|
||||
}
|
||||
read_thread.join();
|
||||
for (size_t i = 0; i < CONCURRENT_UPDATE_TOTAL_ACCOUNTS; i++) {
|
||||
write_threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace test_concurrent_update
|
||||
148
external/duckdb/test/sql/parallelism/interquery/test_default_catalog.cpp
vendored
Normal file
148
external/duckdb/test/sql/parallelism/interquery/test_default_catalog.cpp
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/value_operations/value_operations.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
class ConcurrentDefaultCatalog {
|
||||
public:
|
||||
static constexpr int CONCURRENT_DEFAULT_THREAD_COUNT = 10;
|
||||
static constexpr int CONCURRENT_DEFAULT_ITERATION_COUNT = 10;
|
||||
|
||||
static void ScanDefaultCatalog(DuckDB *db, bool *read_correct) {
|
||||
Connection con(*db);
|
||||
*read_correct = true;
|
||||
for (idx_t i = 0; i < CONCURRENT_DEFAULT_ITERATION_COUNT; i++) {
|
||||
auto result = con.Query("SELECT * FROM pg_class");
|
||||
if (result->HasError()) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void QueryDefaultCatalog(DuckDB *db, bool *read_correct, int thread_id) {
|
||||
duckdb::vector<string> random_default_views {"pragma_database_list", "sqlite_master", "sqlite_schema",
|
||||
"sqlite_temp_master", "sqlite_temp_schema", "duckdb_constraints",
|
||||
"duckdb_columns", "duckdb_indexes", "duckdb_schemas",
|
||||
"duckdb_tables", "duckdb_types", "duckdb_views"};
|
||||
|
||||
Connection con(*db);
|
||||
*read_correct = true;
|
||||
for (idx_t i = 0; i < CONCURRENT_DEFAULT_ITERATION_COUNT; i++) {
|
||||
auto result = con.Query("SELECT * FROM " + random_default_views[rand() % random_default_views.size()]);
|
||||
if (result->HasError()) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void QueryDefaultCatalogFunctions(DuckDB *db, bool *read_correct, int thread_id) {
|
||||
duckdb::vector<string> random_queries {
|
||||
"SELECT pg_collation_is_visible(0)",
|
||||
"SELECT pg_conversion_is_visible(0)",
|
||||
"SELECT pg_function_is_visible(0)",
|
||||
"SELECT pg_opclass_is_visible(0)",
|
||||
"SELECT pg_operator_is_visible(0)",
|
||||
"SELECT pg_opfamily_is_visible(0)",
|
||||
"SELECT pg_table_is_visible(0)",
|
||||
"SELECT pg_ts_config_is_visible(0)",
|
||||
"SELECT pg_ts_dict_is_visible(0)",
|
||||
"SELECT pg_ts_parser_is_visible(0)",
|
||||
"SELECT pg_ts_template_is_visible(0)",
|
||||
"SELECT pg_type_is_visible(0)",
|
||||
"SELECT current_user",
|
||||
"SELECT current_catalog",
|
||||
"SELECT current_database()",
|
||||
"SELECT user",
|
||||
"SELECT session_user",
|
||||
"SELECT inet_client_addr()",
|
||||
"SELECT inet_client_port()",
|
||||
"SELECT inet_server_addr()",
|
||||
"SELECT inet_server_port()",
|
||||
"SELECT pg_my_temp_schema()",
|
||||
};
|
||||
|
||||
Connection con(*db);
|
||||
*read_correct = true;
|
||||
for (idx_t i = 0; i < CONCURRENT_DEFAULT_ITERATION_COUNT; i++) {
|
||||
auto result = con.Query(random_queries[rand() % random_queries.size()]);
|
||||
if (result->HasError()) {
|
||||
*read_correct = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_CASE("Concurrent default catalog using Scan", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
bool correct[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
thread threads[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(ConcurrentDefaultCatalog::ScanDefaultCatalog, &db, correct + i);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent default catalog using Queries", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
bool correct[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
thread threads[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(ConcurrentDefaultCatalog::QueryDefaultCatalog, &db, correct + i, i);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Concurrent default function creation", "[interquery][.]") {
|
||||
duckdb::unique_ptr<MaterializedQueryResult> result;
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
|
||||
// enable detailed profiling
|
||||
con.Query("PRAGMA enable_profiling");
|
||||
auto detailed_profiling_output = TestCreatePath("detailed_profiling_output");
|
||||
con.Query("PRAGMA profiling_output='" + detailed_profiling_output + "'");
|
||||
con.Query("PRAGMA profiling_mode = detailed");
|
||||
|
||||
bool correct[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
thread threads[ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT];
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i] = thread(ConcurrentDefaultCatalog::QueryDefaultCatalogFunctions, &db, correct + i, i);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ConcurrentDefaultCatalog::CONCURRENT_DEFAULT_THREAD_COUNT; i++) {
|
||||
threads[i].join();
|
||||
REQUIRE(correct[i]);
|
||||
}
|
||||
}
|
||||
45
external/duckdb/test/sql/parallelism/interquery/tpch_concurrent_checkpoints.test_slow
vendored
Normal file
45
external/duckdb/test/sql/parallelism/interquery/tpch_concurrent_checkpoints.test_slow
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
# name: test/sql/parallelism/interquery/tpch_concurrent_checkpoints.test_slow
|
||||
# description: Run queries that reference the same table multiple times while doing checkpoints
|
||||
# group: [interquery]
|
||||
|
||||
require tpch
|
||||
|
||||
statement ok
|
||||
CALL dbgen(sf=0.1);
|
||||
|
||||
concurrentloop threadid 0 5
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
INSERT INTO lineitem SELECT * FROM lineitem LIMIT 1000
|
||||
----
|
||||
1000
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
INSERT INTO orders SELECT * FROM orders LIMIT 1000
|
||||
----
|
||||
1000
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
CHECKPOINT
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 50
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
SELECT COUNT(*)
|
||||
FROM lineitem
|
||||
WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_shipdate >= DATE '1995-01-01') AND
|
||||
l_partkey IN (SELECT l_partkey FROM lineitem WHERE l_returnflag='R')
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
|
||||
63
external/duckdb/test/sql/parallelism/interquery/tpch_concurrent_operations.test_slow
vendored
Normal file
63
external/duckdb/test/sql/parallelism/interquery/tpch_concurrent_operations.test_slow
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# name: test/sql/parallelism/interquery/tpch_concurrent_operations.test_slow
|
||||
# description: Run TPC-H queries while doing concurrent operations
|
||||
# group: [interquery]
|
||||
|
||||
require tpch
|
||||
|
||||
statement ok
|
||||
CALL dbgen(sf=1);
|
||||
|
||||
concurrentloop threadid 0 5
|
||||
|
||||
loop i 0 20
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
INSERT INTO lineitem SELECT * REPLACE ('this is an extra row' AS l_comment) FROM lineitem USING SAMPLE (1000);
|
||||
----
|
||||
1000
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
UPDATE lineitem SET l_orderkey = l_orderkey + 100 WHERE l_comment = 'this is an extra row'
|
||||
----
|
||||
1000
|
||||
|
||||
onlyif threadid=0
|
||||
query I
|
||||
DELETE FROM lineitem WHERE l_comment = 'this is an extra row'
|
||||
----
|
||||
1000
|
||||
|
||||
endloop
|
||||
|
||||
loop i 0 30
|
||||
|
||||
skipif threadid=0
|
||||
statement ok
|
||||
PRAGMA tpch((${threadid} + ${i}) % 22 + 1)
|
||||
|
||||
endloop
|
||||
|
||||
endloop
|
||||
|
||||
# verify that all TPC-H results are correct after this
|
||||
|
||||
loop i 1 9
|
||||
|
||||
query I
|
||||
PRAGMA tpch(${i})
|
||||
----
|
||||
<FILE>:extension/tpch/dbgen/answers/sf1/q0${i}.csv
|
||||
|
||||
endloop
|
||||
|
||||
loop i 10 23
|
||||
|
||||
query I
|
||||
PRAGMA tpch(${i})
|
||||
----
|
||||
<FILE>:extension/tpch/dbgen/answers/sf1/q${i}.csv
|
||||
|
||||
endloop
|
||||
|
||||
Reference in New Issue
Block a user