should be it
This commit is contained in:
13
external/duckdb/test/sqlite/CMakeLists.txt
vendored
Normal file
13
external/duckdb/test/sqlite/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
add_extension_definitions()
|
||||
|
||||
add_definitions(-DDUCKDB_ROOT_DIRECTORY="${PROJECT_SOURCE_DIR}"
|
||||
-DDUCKDB_BUILD_DIRECTORY="${PROJECT_BINARY_DIR}")
|
||||
|
||||
set(SQLITE_TEST_RUNNER_SOURCES
|
||||
result_helper.cpp sqllogic_command.cpp sqllogic_test_runner.cpp
|
||||
sqllogic_parser.cpp test_sqllogictest.cpp sqllogic_test_logger.cpp)
|
||||
|
||||
add_library_unity(test_sqlite OBJECT ${SQLITE_TEST_RUNNER_SOURCES})
|
||||
set(ALL_OBJECT_FILES
|
||||
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_sqlite>
|
||||
PARENT_SCOPE)
|
||||
42
external/duckdb/test/sqlite/README.md
vendored
Normal file
42
external/duckdb/test/sqlite/README.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# SQLLogic Test Runner
|
||||
|
||||
## Origin
|
||||
|
||||
Here you'll find source code originating from
|
||||
[SQLite's SQLLogicTest](https://sqlite.org/sqllogictest/doc/trunk/about.wiki).
|
||||
DuckDB has extended functionality in several ways, including several new expressions
|
||||
(test_env, set/reset, tags).
|
||||
|
||||
## Usage Notes
|
||||
|
||||
### Environment: test_env and require-env
|
||||
|
||||
Environment variables can be managed in 2 ways: `test_env` which allows variables to have defaults set, and `require-env` which is a select/skip predicate for a test file.
|
||||
|
||||
For examples of `test_env` usage see the `duckdb/ducklake` extension tests.
|
||||
|
||||
When a file `require-env FOO`, or `require-env FOO=bar` a test will only execute if FOO is set, or in the latter case, set to `bar`.
|
||||
|
||||
### Tags: explicit and implicit
|
||||
|
||||
SQL test files also support a `tags` attribute of the form:
|
||||
|
||||
```text
|
||||
tags optimization memory>=64GB
|
||||
```
|
||||
|
||||
The tags are free-form, and can be used when executing tests for both selection and skipping, a la:
|
||||
|
||||
```bash
|
||||
build/release/test/unittest --skip-tag 'slow' --select-tag-set "['memory>=64GB', 'env[TEST_DATA]']"
|
||||
```
|
||||
|
||||
Tags can be specified individually, or as a set (which is treated as an `AND` predicate).
|
||||
Each specification is an `OR`, and selects are processed before skips.
|
||||
|
||||
Additionally some implicit tags are computed when an SQL test file is parsed.
|
||||
All `require-env` and `test_env` expressions will be added as tags of the form `env[VAR]`, and
|
||||
`env[VAR]=VALUE` (when specified).
|
||||
|
||||
For an extensive example of tag matching expectations, see the file
|
||||
`test/sqlite/validate_tags_usage.sh` which unit tests these behaviors.
|
||||
598
external/duckdb/test/sqlite/result_helper.cpp
vendored
Normal file
598
external/duckdb/test/sqlite/result_helper.cpp
vendored
Normal file
@@ -0,0 +1,598 @@
|
||||
#include "result_helper.hpp"
|
||||
|
||||
#include "catch.hpp"
|
||||
#include "duckdb/common/crypto/md5.hpp"
|
||||
#include "duckdb/parser/qualified_name.hpp"
|
||||
#include "re2/re2.h"
|
||||
#include "sqllogic_test_logger.hpp"
|
||||
#include "sqllogic_test_runner.hpp"
|
||||
#include "termcolor.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
void TestResultHelper::SortQueryResult(SortStyle sort_style, vector<string> &result, idx_t ncols) {
|
||||
if (sort_style == SortStyle::NO_SORT) {
|
||||
return;
|
||||
}
|
||||
if (sort_style == SortStyle::VALUE_SORT) {
|
||||
// sort values independently
|
||||
std::sort(result.begin(), result.end());
|
||||
return;
|
||||
}
|
||||
if (result.size() % ncols != 0) {
|
||||
// row-sort failed: result is not row-wise aligned, bail
|
||||
FAIL(StringUtil::Format("Failed to sort query result - result is not aligned. Found %d rows with %d columns",
|
||||
result.size(), ncols));
|
||||
return;
|
||||
}
|
||||
// row-oriented sorting
|
||||
idx_t nrows = result.size() / ncols;
|
||||
vector<vector<string>> rows;
|
||||
rows.reserve(nrows);
|
||||
for (idx_t row_idx = 0; row_idx < nrows; row_idx++) {
|
||||
vector<string> row;
|
||||
row.reserve(ncols);
|
||||
for (idx_t col_idx = 0; col_idx < ncols; col_idx++) {
|
||||
row.push_back(std::move(result[row_idx * ncols + col_idx]));
|
||||
}
|
||||
rows.push_back(std::move(row));
|
||||
}
|
||||
// sort the individual rows
|
||||
std::sort(rows.begin(), rows.end(), [](const vector<string> &a, const vector<string> &b) {
|
||||
for (idx_t col_idx = 0; col_idx < a.size(); col_idx++) {
|
||||
if (a[col_idx] != b[col_idx]) {
|
||||
return a[col_idx] < b[col_idx];
|
||||
}
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
// now reconstruct the values from the rows
|
||||
for (idx_t row_idx = 0; row_idx < nrows; row_idx++) {
|
||||
for (idx_t col_idx = 0; col_idx < ncols; col_idx++) {
|
||||
result[row_idx * ncols + col_idx] = std::move(rows[row_idx][col_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool TestResultHelper::CheckQueryResult(const Query &query, ExecuteContext &context,
|
||||
duckdb::unique_ptr<MaterializedQueryResult> owned_result) {
|
||||
auto &result = *owned_result;
|
||||
auto &runner = query.runner;
|
||||
auto expected_column_count = query.expected_column_count;
|
||||
auto &values = query.values;
|
||||
auto sort_style = query.sort_style;
|
||||
auto query_has_label = query.query_has_label;
|
||||
auto &query_label = query.query_label;
|
||||
|
||||
SQLLogicTestLogger logger(context, query);
|
||||
if (result.HasError()) {
|
||||
if (SkipErrorMessage(result.GetError())) {
|
||||
runner.finished_processing_file = true;
|
||||
return true;
|
||||
}
|
||||
if (!FailureSummary::SkipLoggingSameError(context.error_file)) {
|
||||
logger.UnexpectedFailure(result);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
idx_t row_count = result.RowCount();
|
||||
idx_t column_count = result.ColumnCount();
|
||||
idx_t total_value_count = row_count * column_count;
|
||||
bool compare_hash =
|
||||
query_has_label || (runner.hash_threshold > 0 && total_value_count > idx_t(runner.hash_threshold));
|
||||
bool result_is_hash = false;
|
||||
// check if the current line (the first line of the result) is a hash value
|
||||
if (values.size() == 1 && ResultIsHash(values[0])) {
|
||||
compare_hash = true;
|
||||
result_is_hash = true;
|
||||
}
|
||||
|
||||
vector<string> result_values_string;
|
||||
try {
|
||||
DuckDBConvertResult(result, runner.original_sqlite_test, result_values_string);
|
||||
if (runner.output_result_mode) {
|
||||
logger.OutputResult(result, result_values_string);
|
||||
}
|
||||
} catch (std::exception &ex) {
|
||||
ErrorData error(ex);
|
||||
auto &original_error = error.Message();
|
||||
logger.LogFailure(original_error);
|
||||
return false;
|
||||
}
|
||||
|
||||
SortQueryResult(sort_style, result_values_string, column_count);
|
||||
|
||||
vector<string> comparison_values;
|
||||
if (values.size() == 1 && ResultIsFile(values[0])) {
|
||||
auto fname = StringUtil::Replace(values[0], "<FILE>:", "");
|
||||
fname = runner.ReplaceKeywords(fname);
|
||||
fname = runner.LoopReplacement(fname, context.running_loops);
|
||||
string csv_error;
|
||||
comparison_values = LoadResultFromFile(fname, result.names, expected_column_count, csv_error);
|
||||
if (!csv_error.empty()) {
|
||||
string log_message;
|
||||
logger.PrintErrorHeader(csv_error);
|
||||
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
comparison_values = values;
|
||||
}
|
||||
|
||||
// compute the hash of the results if there is a hash label or we are past the hash threshold
|
||||
string hash_value;
|
||||
if (runner.output_hash_mode || compare_hash) {
|
||||
MD5Context context;
|
||||
for (idx_t i = 0; i < total_value_count; i++) {
|
||||
context.Add(result_values_string[i]);
|
||||
context.Add("\n");
|
||||
}
|
||||
string digest = context.FinishHex();
|
||||
hash_value = to_string(total_value_count) + " values hashing to " + digest;
|
||||
if (runner.output_hash_mode) {
|
||||
logger.OutputHash(hash_value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!compare_hash) {
|
||||
// check if the row/column count matches
|
||||
idx_t original_expected_columns = expected_column_count;
|
||||
bool column_count_mismatch = false;
|
||||
if (expected_column_count != result.ColumnCount()) {
|
||||
// expected column count is different from the count found in the result
|
||||
// we try to keep going with the number of columns in the result
|
||||
expected_column_count = result.ColumnCount();
|
||||
column_count_mismatch = true;
|
||||
}
|
||||
if (expected_column_count == 0) {
|
||||
return false;
|
||||
}
|
||||
idx_t expected_rows = comparison_values.size() / expected_column_count;
|
||||
// we first check the counts: if the values are equal to the amount of rows we expect the results to be row-wise
|
||||
bool row_wise = expected_column_count > 1 && comparison_values.size() == result.RowCount();
|
||||
if (!row_wise) {
|
||||
// the counts do not match up for it to be row-wise
|
||||
// however, this can also be because the query returned an incorrect # of rows
|
||||
// we make a guess: if everything contains tabs, we still treat the input as row wise
|
||||
bool all_tabs = true;
|
||||
for (auto &val : comparison_values) {
|
||||
if (val.find('\t') == string::npos) {
|
||||
all_tabs = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
row_wise = all_tabs;
|
||||
}
|
||||
if (row_wise) {
|
||||
// values are displayed row-wise, format row wise with a tab
|
||||
expected_rows = comparison_values.size();
|
||||
row_wise = true;
|
||||
} else if (comparison_values.size() % expected_column_count != 0) {
|
||||
if (column_count_mismatch) {
|
||||
logger.ColumnCountMismatch(result, query.values, original_expected_columns, row_wise);
|
||||
} else {
|
||||
logger.NotCleanlyDivisible(expected_column_count, comparison_values.size());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (expected_rows != result.RowCount()) {
|
||||
if (column_count_mismatch) {
|
||||
logger.ColumnCountMismatch(result, query.values, original_expected_columns, row_wise);
|
||||
} else {
|
||||
logger.WrongRowCount(expected_rows, result, comparison_values, expected_column_count, row_wise);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (row_wise) {
|
||||
// if the result is row-wise, turn it into a set of values by splitting it
|
||||
vector<string> expected_values;
|
||||
for (idx_t i = 0; i < total_value_count && i < comparison_values.size(); i++) {
|
||||
// split based on tab character
|
||||
auto splits = StringUtil::Split(comparison_values[i], "\t");
|
||||
if (splits.size() != expected_column_count) {
|
||||
if (column_count_mismatch) {
|
||||
logger.ColumnCountMismatch(result, query.values, original_expected_columns, row_wise);
|
||||
}
|
||||
logger.SplitMismatch(i + 1, expected_column_count, splits.size());
|
||||
return false;
|
||||
}
|
||||
for (auto &split : splits) {
|
||||
expected_values.push_back(std::move(split));
|
||||
}
|
||||
}
|
||||
comparison_values = std::move(expected_values);
|
||||
row_wise = false;
|
||||
}
|
||||
auto &test_config = TestConfiguration::Get();
|
||||
auto default_sort_style = test_config.GetDefaultSortStyle();
|
||||
idx_t check_it_count = column_count_mismatch || default_sort_style == SortStyle::NO_SORT ? 1 : 2;
|
||||
for (idx_t check_it = 0; check_it < check_it_count; check_it++) {
|
||||
bool final_iteration = check_it + 1 == check_it_count;
|
||||
idx_t current_row = 0, current_column = 0;
|
||||
bool success = true;
|
||||
for (idx_t i = 0; i < total_value_count && i < comparison_values.size(); i++) {
|
||||
success = CompareValues(logger, result,
|
||||
result_values_string[current_row * expected_column_count + current_column],
|
||||
comparison_values[i], current_row, current_column, comparison_values,
|
||||
expected_column_count, row_wise, result_values_string, final_iteration);
|
||||
if (!success) {
|
||||
break;
|
||||
}
|
||||
// we do this just to increment the assertion counter
|
||||
string success_log = StringUtil::Format("CheckQueryResult: %s:%d", query.file_name, query.query_line);
|
||||
REQUIRE(success_log.c_str());
|
||||
|
||||
current_column++;
|
||||
if (current_column == expected_column_count) {
|
||||
current_row++;
|
||||
current_column = 0;
|
||||
}
|
||||
}
|
||||
if (!success) {
|
||||
if (final_iteration) {
|
||||
return false;
|
||||
}
|
||||
SortQueryResult(default_sort_style, result_values_string, column_count);
|
||||
SortQueryResult(default_sort_style, comparison_values, query.expected_column_count);
|
||||
}
|
||||
}
|
||||
if (column_count_mismatch) {
|
||||
logger.ColumnCountMismatchCorrectResult(original_expected_columns, expected_column_count, result);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
bool hash_compare_error = false;
|
||||
if (query_has_label) {
|
||||
runner.hash_label_map.WithLock([&](unordered_map<string, CachedLabelData> &map) {
|
||||
// the query has a label: check if the hash has already been computed
|
||||
auto entry = map.find(query_label);
|
||||
if (entry == map.end()) {
|
||||
// not computed yet: add it tot he map
|
||||
map.emplace(query_label, CachedLabelData(hash_value, std::move(owned_result)));
|
||||
} else {
|
||||
hash_compare_error = entry->second.hash != hash_value;
|
||||
}
|
||||
});
|
||||
}
|
||||
string expected_hash;
|
||||
if (result_is_hash) {
|
||||
expected_hash = values[0];
|
||||
D_ASSERT(values.size() == 1);
|
||||
hash_compare_error = expected_hash != hash_value;
|
||||
}
|
||||
if (hash_compare_error) {
|
||||
QueryResult *expected_result = nullptr;
|
||||
runner.hash_label_map.WithLock([&](unordered_map<string, CachedLabelData> &map) {
|
||||
auto it = map.find(query_label);
|
||||
if (it != map.end()) {
|
||||
expected_result = it->second.result.get();
|
||||
}
|
||||
logger.WrongResultHash(expected_result, result, expected_hash, hash_value);
|
||||
});
|
||||
return false;
|
||||
}
|
||||
REQUIRE(!hash_compare_error);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TestResultHelper::CheckStatementResult(const Statement &statement, ExecuteContext &context,
|
||||
duckdb::unique_ptr<MaterializedQueryResult> owned_result) {
|
||||
auto &result = *owned_result;
|
||||
bool error = result.HasError();
|
||||
SQLLogicTestLogger logger(context, statement);
|
||||
if (runner.output_result_mode || runner.debug_mode) {
|
||||
result.Print();
|
||||
}
|
||||
|
||||
/* Check to see if we are expecting success or failure */
|
||||
auto expected_result = statement.expected_result;
|
||||
if (expected_result != ExpectedResult::RESULT_SUCCESS) {
|
||||
// even in the case of "statement error", we do not accept ALL errors
|
||||
// internal errors are never expected
|
||||
// neither are "unoptimized result differs from original result" errors
|
||||
|
||||
if (result.HasError() && TestIsInternalError(runner.always_fail_error_messages, result.GetError())) {
|
||||
logger.InternalException(result);
|
||||
return false;
|
||||
}
|
||||
if (expected_result == ExpectedResult::RESULT_UNKNOWN) {
|
||||
error = false;
|
||||
} else {
|
||||
error = !error;
|
||||
}
|
||||
if (result.HasError() && !statement.expected_error.empty()) {
|
||||
// We run both comparions on purpose, we might move to only the second but might require some changes in
|
||||
// tests
|
||||
// This is due to some errors containing absolute paths, some relatives
|
||||
if (!StringUtil::Contains(result.GetError(), statement.expected_error) &&
|
||||
!StringUtil::Contains(result.GetError(), runner.ReplaceKeywords(statement.expected_error))) {
|
||||
bool success = false;
|
||||
if (StringUtil::StartsWith(statement.expected_error, "<REGEX>:") ||
|
||||
StringUtil::StartsWith(statement.expected_error, "<!REGEX>:")) {
|
||||
success = MatchesRegex(logger, result.ToString(), statement.expected_error);
|
||||
}
|
||||
if (!success) {
|
||||
// don't log the same test failure many times:
|
||||
// e.g. log only the first failure in
|
||||
// `./build/debug/test/unittest --on-init "SET max_memory='400kb';"
|
||||
// test/fuzzer/pedro/concurrent_catalog_usage.test`
|
||||
if (!SkipErrorMessage(result.GetError()) &&
|
||||
!FailureSummary::SkipLoggingSameError(statement.file_name)) {
|
||||
logger.ExpectedErrorMismatch(statement.expected_error, result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
string success_log =
|
||||
StringUtil::Format("CheckStatementResult: %s:%d", statement.file_name, statement.query_line);
|
||||
REQUIRE(success_log.c_str());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Report an error if the results do not match expectation */
|
||||
if (error) {
|
||||
if (expected_result == ExpectedResult::RESULT_SUCCESS && SkipErrorMessage(result.GetError())) {
|
||||
runner.finished_processing_file = true;
|
||||
return true;
|
||||
}
|
||||
if (!FailureSummary::SkipLoggingSameError(statement.file_name)) {
|
||||
logger.UnexpectedStatement(expected_result == ExpectedResult::RESULT_SUCCESS, result);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (error) {
|
||||
REQUIRE(false);
|
||||
} else {
|
||||
string success_log =
|
||||
StringUtil::Format("CheckStatementResult: %s:%d", statement.file_name, statement.query_line);
|
||||
REQUIRE(success_log.c_str());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
vector<string> TestResultHelper::LoadResultFromFile(string fname, vector<string> names, idx_t &expected_column_count,
|
||||
string &error) {
|
||||
DuckDB db(nullptr);
|
||||
Connection con(db);
|
||||
auto threads = MaxValue<idx_t>(std::thread::hardware_concurrency(), 1);
|
||||
con.Query("PRAGMA threads=" + to_string(threads));
|
||||
|
||||
string struct_definition = "STRUCT_PACK(";
|
||||
for (idx_t i = 0; i < names.size(); i++) {
|
||||
if (i > 0) {
|
||||
struct_definition += ", ";
|
||||
}
|
||||
struct_definition += StringUtil::Format("%s := VARCHAR", SQLIdentifier(names[i]));
|
||||
}
|
||||
struct_definition += ")";
|
||||
|
||||
auto csv_result = con.Query("SELECT * FROM read_csv('" + fname +
|
||||
"', header=1, sep='|', columns=" + struct_definition + ", auto_detect=false)");
|
||||
if (csv_result->HasError()) {
|
||||
error = StringUtil::Format("Could not read CSV File \"%s\": %s", fname, csv_result->GetError());
|
||||
return vector<string>();
|
||||
}
|
||||
expected_column_count = csv_result->ColumnCount();
|
||||
|
||||
vector<string> values;
|
||||
while (true) {
|
||||
auto chunk = csv_result->Fetch();
|
||||
if (!chunk || chunk->size() == 0) {
|
||||
break;
|
||||
}
|
||||
for (idx_t r = 0; r < chunk->size(); r++) {
|
||||
for (idx_t c = 0; c < chunk->ColumnCount(); c++) {
|
||||
values.push_back(chunk->GetValue(c, r).CastAs(*runner.con->context, LogicalType::VARCHAR).ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
bool TestResultHelper::SkipErrorMessage(const string &message) {
|
||||
for (auto &error_message : runner.ignore_error_messages) {
|
||||
if (StringUtil::Contains(message, error_message)) {
|
||||
SKIP_TEST(string("skip on error_message matching '") + error_message + string("'"));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
string TestResultHelper::SQLLogicTestConvertValue(Value value, LogicalType sql_type, bool original_sqlite_test) {
|
||||
if (value.IsNull()) {
|
||||
return "NULL";
|
||||
} else {
|
||||
if (original_sqlite_test) {
|
||||
// sqlite test hashes want us to convert floating point numbers to integers
|
||||
switch (sql_type.id()) {
|
||||
case LogicalTypeId::DECIMAL:
|
||||
case LogicalTypeId::FLOAT:
|
||||
case LogicalTypeId::DOUBLE:
|
||||
return value.CastAs(*runner.con->context, LogicalType::BIGINT).ToString();
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (sql_type.id()) {
|
||||
case LogicalTypeId::BOOLEAN:
|
||||
return BooleanValue::Get(value) ? "1" : "0";
|
||||
default: {
|
||||
string str = value.CastAs(*runner.con->context, LogicalType::VARCHAR).ToString();
|
||||
if (str.empty()) {
|
||||
return "(empty)";
|
||||
} else {
|
||||
return StringUtil::Replace(str, string("\0", 1), "\\0");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// standard result conversion: one line per value
|
||||
void TestResultHelper::DuckDBConvertResult(MaterializedQueryResult &result, bool original_sqlite_test,
|
||||
vector<string> &out_result) {
|
||||
size_t r, c;
|
||||
idx_t row_count = result.RowCount();
|
||||
idx_t column_count = result.ColumnCount();
|
||||
|
||||
out_result.resize(row_count * column_count);
|
||||
for (r = 0; r < row_count; r++) {
|
||||
for (c = 0; c < column_count; c++) {
|
||||
auto value = result.GetValue(c, r);
|
||||
auto converted_value = SQLLogicTestConvertValue(value, result.types[c], original_sqlite_test);
|
||||
out_result[r * column_count + c] = converted_value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool TestResultHelper::ResultIsHash(const string &result) {
|
||||
idx_t pos = 0;
|
||||
// first parse the rows
|
||||
while (result[pos] >= '0' && result[pos] <= '9') {
|
||||
pos++;
|
||||
}
|
||||
if (pos == 0) {
|
||||
return false;
|
||||
}
|
||||
string constant_str = " values hashing to ";
|
||||
string example_hash = "acd848208cc35c7324ece9fcdd507823";
|
||||
if (pos + constant_str.size() + example_hash.size() != result.size()) {
|
||||
return false;
|
||||
}
|
||||
if (result.substr(pos, constant_str.size()) != constant_str) {
|
||||
return false;
|
||||
}
|
||||
pos += constant_str.size();
|
||||
// now parse the hash
|
||||
while ((result[pos] >= '0' && result[pos] <= '9') || (result[pos] >= 'a' && result[pos] <= 'z')) {
|
||||
pos++;
|
||||
}
|
||||
return pos == result.size();
|
||||
}
|
||||
|
||||
bool TestResultHelper::ResultIsFile(string result) {
|
||||
return StringUtil::StartsWith(result, "<FILE>:");
|
||||
}
|
||||
|
||||
bool TestResultHelper::CompareValues(SQLLogicTestLogger &logger, MaterializedQueryResult &result, string lvalue_str,
|
||||
string rvalue_str, idx_t current_row, idx_t current_column, vector<string> &values,
|
||||
idx_t expected_column_count, bool row_wise, vector<string> &result_values,
|
||||
bool print_error) {
|
||||
Value lvalue, rvalue;
|
||||
bool error = false;
|
||||
// simple first test: compare string value directly
|
||||
// We run both comparions on purpose, we might move to only the second but might require some changes in tests
|
||||
// This is due to some results containing absolute paths, some relatives
|
||||
if (lvalue_str == rvalue_str || lvalue_str == runner.ReplaceKeywords(rvalue_str)) {
|
||||
return true;
|
||||
}
|
||||
if (StringUtil::StartsWith(rvalue_str, "<REGEX>:") || StringUtil::StartsWith(rvalue_str, "<!REGEX>:")) {
|
||||
if (MatchesRegex(logger, lvalue_str, rvalue_str)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// some times require more checking (specifically floating point numbers because of inaccuracies)
|
||||
// if not equivalent we need to cast to the SQL type to verify
|
||||
auto sql_type = result.types[current_column];
|
||||
if (sql_type.IsNumeric()) {
|
||||
bool converted_lvalue = false;
|
||||
bool converted_rvalue = false;
|
||||
if (lvalue_str == "NULL") {
|
||||
lvalue = Value(sql_type);
|
||||
converted_lvalue = true;
|
||||
} else {
|
||||
lvalue = Value(lvalue_str);
|
||||
if (lvalue.TryCastAs(*runner.con->context, sql_type)) {
|
||||
converted_lvalue = true;
|
||||
}
|
||||
}
|
||||
if (rvalue_str == "NULL") {
|
||||
rvalue = Value(sql_type);
|
||||
converted_rvalue = true;
|
||||
} else {
|
||||
rvalue = Value(rvalue_str);
|
||||
if (rvalue.TryCastAs(*runner.con->context, sql_type)) {
|
||||
converted_rvalue = true;
|
||||
}
|
||||
}
|
||||
if (converted_lvalue && converted_rvalue) {
|
||||
error = !Value::ValuesAreEqual(*runner.con->context, lvalue, rvalue);
|
||||
} else {
|
||||
error = true;
|
||||
}
|
||||
} else if (sql_type == LogicalType::BOOLEAN) {
|
||||
auto low_r_val = StringUtil::Lower(rvalue_str);
|
||||
auto low_l_val = StringUtil::Lower(lvalue_str);
|
||||
|
||||
string true_str = "true";
|
||||
string false_str = "false";
|
||||
if (low_l_val == true_str || lvalue_str == "1") {
|
||||
lvalue = Value(1);
|
||||
} else if (low_l_val == false_str || lvalue_str == "0") {
|
||||
lvalue = Value(0);
|
||||
}
|
||||
if (low_r_val == true_str || rvalue_str == "1") {
|
||||
rvalue = Value(1);
|
||||
} else if (low_r_val == false_str || rvalue_str == "0") {
|
||||
rvalue = Value(0);
|
||||
}
|
||||
error = !Value::ValuesAreEqual(*runner.con->context, lvalue, rvalue);
|
||||
|
||||
} else {
|
||||
// for other types we just mark the result as incorrect
|
||||
error = true;
|
||||
}
|
||||
if (error) {
|
||||
if (print_error) {
|
||||
std::ostringstream oss;
|
||||
logger.PrintErrorHeader("Wrong result in query!");
|
||||
logger.PrintLineSep();
|
||||
logger.PrintSQL();
|
||||
logger.PrintLineSep();
|
||||
oss << termcolor::red << termcolor::bold << "Mismatch on row " << current_row + 1 << ", column "
|
||||
<< result.ColumnName(current_column) << "(index " << current_column + 1 << ")" << std::endl
|
||||
<< termcolor::reset;
|
||||
oss << lvalue_str << " <> " << rvalue_str << std::endl;
|
||||
logger.LogFailure(oss.str());
|
||||
logger.PrintLineSep();
|
||||
logger.PrintResultError(result_values, values, expected_column_count, row_wise);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TestResultHelper::MatchesRegex(SQLLogicTestLogger &logger, string lvalue_str, string rvalue_str) {
|
||||
bool want_match = StringUtil::StartsWith(rvalue_str, "<REGEX>:");
|
||||
string regex_str = StringUtil::Replace(StringUtil::Replace(rvalue_str, "<REGEX>:", ""), "<!REGEX>:", "");
|
||||
RE2::Options options;
|
||||
options.set_dot_nl(true);
|
||||
RE2 re(regex_str, options);
|
||||
if (!re.ok()) {
|
||||
std::ostringstream oss;
|
||||
logger.PrintErrorHeader("Test error!");
|
||||
logger.PrintLineSep();
|
||||
oss << termcolor::red << termcolor::bold << "Failed to parse regex: " << re.error() << termcolor::reset
|
||||
<< std::endl;
|
||||
logger.LogFailure(oss.str());
|
||||
logger.PrintLineSep();
|
||||
return false;
|
||||
}
|
||||
bool regex_matches = RE2::FullMatch(lvalue_str, re);
|
||||
if ((want_match && regex_matches) || (!want_match && !regex_matches)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
48
external/duckdb/test/sqlite/result_helper.hpp
vendored
Normal file
48
external/duckdb/test/sqlite/result_helper.hpp
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// DuckDB
|
||||
//
|
||||
// result_comparison.hpp
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "duckdb.hpp"
|
||||
#include "sqllogic_command.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
class SQLLogicTestRunner;
|
||||
class SQLLogicTestLogger;
|
||||
|
||||
class TestResultHelper {
|
||||
public:
|
||||
TestResultHelper(SQLLogicTestRunner &runner) : runner(runner) {
|
||||
}
|
||||
|
||||
SQLLogicTestRunner &runner;
|
||||
|
||||
public:
|
||||
bool CheckQueryResult(const Query &query, ExecuteContext &context,
|
||||
duckdb::unique_ptr<MaterializedQueryResult> owned_result);
|
||||
bool CheckStatementResult(const Statement &statement, ExecuteContext &context,
|
||||
duckdb::unique_ptr<MaterializedQueryResult> owned_result);
|
||||
string SQLLogicTestConvertValue(Value value, LogicalType sql_type, bool original_sqlite_test);
|
||||
void DuckDBConvertResult(MaterializedQueryResult &result, bool original_sqlite_test, vector<string> &out_result);
|
||||
|
||||
static bool ResultIsHash(const string &result);
|
||||
static bool ResultIsFile(string result);
|
||||
void SortQueryResult(SortStyle sort_style, vector<string> &result, idx_t ncols);
|
||||
|
||||
bool MatchesRegex(SQLLogicTestLogger &logger, string lvalue_str, string rvalue_str);
|
||||
bool CompareValues(SQLLogicTestLogger &logger, MaterializedQueryResult &result, string lvalue_str,
|
||||
string rvalue_str, idx_t current_row, idx_t current_column, vector<string> &values,
|
||||
idx_t expected_column_count, bool row_wise, vector<string> &result_values,
|
||||
bool print_error = true);
|
||||
bool SkipErrorMessage(const string &message);
|
||||
bool SkipLoggingSameError(const string &file_name);
|
||||
|
||||
vector<string> LoadResultFromFile(string fname, vector<string> names, idx_t &expected_column_count, string &error);
|
||||
};
|
||||
|
||||
} // namespace duckdb
|
||||
12191
external/duckdb/test/sqlite/select1.test_slow
vendored
Normal file
12191
external/duckdb/test/sqlite/select1.test_slow
vendored
Normal file
File diff suppressed because it is too large
Load Diff
11221
external/duckdb/test/sqlite/select2.test_slow
vendored
Normal file
11221
external/duckdb/test/sqlite/select2.test_slow
vendored
Normal file
File diff suppressed because it is too large
Load Diff
40772
external/duckdb/test/sqlite/select3.test_slow
vendored
Normal file
40772
external/duckdb/test/sqlite/select3.test_slow
vendored
Normal file
File diff suppressed because it is too large
Load Diff
48302
external/duckdb/test/sqlite/select4.test_slow
vendored
Normal file
48302
external/duckdb/test/sqlite/select4.test_slow
vendored
Normal file
File diff suppressed because it is too large
Load Diff
655
external/duckdb/test/sqlite/sqllogic_command.cpp
vendored
Normal file
655
external/duckdb/test/sqlite/sqllogic_command.cpp
vendored
Normal file
@@ -0,0 +1,655 @@
|
||||
#include "sqllogic_command.hpp"
|
||||
#include "sqllogic_test_runner.hpp"
|
||||
#include "result_helper.hpp"
|
||||
#include "duckdb/main/connection_manager.hpp"
|
||||
#include "duckdb/parser/statement/create_statement.hpp"
|
||||
#include "duckdb/main/client_data.hpp"
|
||||
#include "duckdb/catalog/catalog_search_path.hpp"
|
||||
#include "duckdb/main/stream_query_result.hpp"
|
||||
#include "duckdb/main/attached_database.hpp"
|
||||
#include "duckdb/catalog/duck_catalog.hpp"
|
||||
#include "duckdb/catalog/catalog_entry/duck_schema_entry.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
#include "test_config.hpp"
|
||||
#include "sqllogic_test_logger.hpp"
|
||||
#include "catch.hpp"
|
||||
#include <list>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
static void query_break(int line) {
|
||||
(void)line;
|
||||
}
|
||||
|
||||
static Connection *GetConnection(SQLLogicTestRunner &runner, DuckDB &db,
|
||||
unordered_map<string, duckdb::unique_ptr<Connection>> &named_connection_map,
|
||||
string con_name) {
|
||||
auto entry = named_connection_map.find(con_name);
|
||||
if (entry == named_connection_map.end()) {
|
||||
// not found: create a new connection
|
||||
auto con = make_uniq<Connection>(db);
|
||||
|
||||
auto &test_config = TestConfiguration::Get();
|
||||
auto init_cmd = test_config.OnConnectionCommand();
|
||||
if (!init_cmd.empty()) {
|
||||
auto res = con->Query(runner.ReplaceKeywords(init_cmd));
|
||||
if (res->HasError()) {
|
||||
FAIL("Startup queries provided via on_new_connection failed: " + res->GetError());
|
||||
}
|
||||
}
|
||||
auto res = con.get();
|
||||
|
||||
named_connection_map[con_name] = std::move(con);
|
||||
return res;
|
||||
}
|
||||
return entry->second.get();
|
||||
}
|
||||
|
||||
Command::Command(SQLLogicTestRunner &runner) : runner(runner) {
|
||||
}
|
||||
|
||||
Command::~Command() {
|
||||
}
|
||||
|
||||
Connection *Command::CommandConnection(ExecuteContext &context) const {
|
||||
if (connection_name.empty()) {
|
||||
if (context.is_parallel) {
|
||||
D_ASSERT(context.con);
|
||||
|
||||
auto &test_config = TestConfiguration::Get();
|
||||
auto init_cmd = test_config.OnConnectionCommand();
|
||||
if (!init_cmd.empty()) {
|
||||
auto res = context.con->Query(runner.ReplaceKeywords(init_cmd));
|
||||
if (res->HasError()) {
|
||||
string error_msg = "Startup queries provided via on_new_connection failed: " + res->GetError();
|
||||
if (context.is_parallel) {
|
||||
throw std::runtime_error(error_msg);
|
||||
} else {
|
||||
FAIL(error_msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return context.con;
|
||||
}
|
||||
D_ASSERT(!context.con);
|
||||
|
||||
return runner.con.get();
|
||||
} else {
|
||||
if (context.is_parallel) {
|
||||
throw std::runtime_error("Named connections not supported in parallel loop");
|
||||
}
|
||||
return GetConnection(runner, *runner.db, runner.named_connection_map, connection_name);
|
||||
}
|
||||
}
|
||||
|
||||
bool CanRestart(Connection &conn) {
|
||||
auto &connection_manager = conn.context->db->GetConnectionManager();
|
||||
auto &db_manager = DatabaseManager::Get(*conn.context->db);
|
||||
auto &connection_list = connection_manager.GetConnectionListReference();
|
||||
|
||||
// do we have any databases attached (aside from the main database)?
|
||||
auto databases = db_manager.GetDatabases();
|
||||
idx_t database_count = 0;
|
||||
for (auto &db_ref : databases) {
|
||||
auto &db = *db_ref;
|
||||
if (db.IsSystem()) {
|
||||
continue;
|
||||
}
|
||||
database_count++;
|
||||
}
|
||||
if (database_count > 1) {
|
||||
return false;
|
||||
}
|
||||
for (auto &conn_ref : connection_list) {
|
||||
auto &conn = conn_ref.first.get();
|
||||
// do we have any prepared statements?
|
||||
if (!conn.client_data->prepared_statements.empty()) {
|
||||
return false;
|
||||
}
|
||||
// we are currently inside a transaction?
|
||||
if (conn.transaction.HasActiveTransaction()) {
|
||||
return false;
|
||||
}
|
||||
// do we have any temporary objects?
|
||||
auto &temp = conn.client_data->temporary_objects;
|
||||
auto &temp_catalog = temp->GetCatalog().Cast<DuckCatalog>();
|
||||
vector<reference<DuckSchemaEntry>> schemas;
|
||||
temp_catalog.ScanSchemas(
|
||||
[&](SchemaCatalogEntry &schema) { schemas.push_back(schema.Cast<DuckSchemaEntry>()); });
|
||||
if (schemas.size() != 1) {
|
||||
return false;
|
||||
}
|
||||
auto &temp_schema = schemas[0].get();
|
||||
vector<CatalogType> catalog_types {CatalogType::TABLE_ENTRY, CatalogType::VIEW_ENTRY,
|
||||
CatalogType::INDEX_ENTRY, CatalogType::SEQUENCE_ENTRY,
|
||||
CatalogType::COLLATION_ENTRY, CatalogType::TYPE_ENTRY,
|
||||
CatalogType::MACRO_ENTRY, CatalogType::TABLE_MACRO_ENTRY};
|
||||
bool found_temp_object = false;
|
||||
for (auto &catalog_type : catalog_types) {
|
||||
temp_schema.Scan(catalog_type, [&](CatalogEntry &entry) {
|
||||
if (entry.internal) {
|
||||
return;
|
||||
}
|
||||
found_temp_object = true;
|
||||
});
|
||||
if (found_temp_object) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Command::RestartDatabase(ExecuteContext &context, Connection *&connection, string sql_query) const {
|
||||
if (context.is_parallel) {
|
||||
// cannot restart in parallel
|
||||
return;
|
||||
}
|
||||
bool query_fail = false;
|
||||
try {
|
||||
connection->context->ParseStatements(sql_query);
|
||||
} catch (...) {
|
||||
query_fail = true;
|
||||
}
|
||||
bool can_restart = CanRestart(*connection);
|
||||
if (!query_fail && can_restart && !runner.skip_reload && !runner.dbpath.empty()) {
|
||||
// We basically restart the database if no transaction is active and if the query is valid
|
||||
auto command = make_uniq<RestartCommand>(runner, true);
|
||||
runner.ExecuteCommand(std::move(command));
|
||||
connection = CommandConnection(context);
|
||||
}
|
||||
}
|
||||
|
||||
unique_ptr<MaterializedQueryResult> Command::ExecuteQuery(ExecuteContext &context, Connection *connection,
|
||||
string file_name, idx_t query_line) const {
|
||||
query_break(query_line);
|
||||
|
||||
if (TestConfiguration::TestForceReload() && TestConfiguration::TestForceStorage()) {
|
||||
RestartDatabase(context, connection, context.sql_query);
|
||||
}
|
||||
|
||||
#ifdef DUCKDB_ALTERNATIVE_VERIFY
|
||||
auto ccontext = connection->context;
|
||||
auto result = ccontext->Query(context.sql_query, true);
|
||||
if (result->type == QueryResultType::STREAM_RESULT) {
|
||||
auto &stream_result = result->Cast<StreamQueryResult>();
|
||||
return stream_result.Materialize();
|
||||
} else {
|
||||
D_ASSERT(result->type == QueryResultType::MATERIALIZED_RESULT);
|
||||
return unique_ptr_cast<QueryResult, MaterializedQueryResult>(std::move(result));
|
||||
}
|
||||
#else
|
||||
return connection->Query(context.sql_query);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool CheckLoopCondition(ExecuteContext &context, const vector<Condition> &conditions) {
|
||||
if (conditions.empty()) {
|
||||
// no conditions
|
||||
return true;
|
||||
}
|
||||
if (context.running_loops.empty()) {
|
||||
throw BinderException("Conditions (onlyif/skipif) on loop parameters can only occur within a loop");
|
||||
}
|
||||
for (auto &condition : conditions) {
|
||||
bool condition_holds = false;
|
||||
bool found_loop = false;
|
||||
for (auto &loop : context.running_loops) {
|
||||
if (loop.loop_iterator_name != condition.keyword) {
|
||||
continue;
|
||||
}
|
||||
found_loop = true;
|
||||
|
||||
string loop_value;
|
||||
if (loop.tokens.empty()) {
|
||||
loop_value = to_string(loop.loop_idx);
|
||||
} else {
|
||||
loop_value = loop.tokens[loop.loop_idx];
|
||||
}
|
||||
if (condition.comparison == ExpressionType::COMPARE_EQUAL ||
|
||||
condition.comparison == ExpressionType::COMPARE_NOTEQUAL) {
|
||||
// equality/non-equality is done on the string value
|
||||
if (condition.comparison == ExpressionType::COMPARE_EQUAL) {
|
||||
condition_holds = loop_value == condition.value;
|
||||
} else {
|
||||
condition_holds = loop_value != condition.value;
|
||||
}
|
||||
} else {
|
||||
// > >= < <= are done on numeric values
|
||||
int64_t loop_val = std::stoll(loop_value);
|
||||
int64_t condition_val = std::stoll(condition.value);
|
||||
switch (condition.comparison) {
|
||||
case ExpressionType::COMPARE_GREATERTHAN:
|
||||
condition_holds = loop_val > condition_val;
|
||||
break;
|
||||
case ExpressionType::COMPARE_LESSTHAN:
|
||||
condition_holds = loop_val < condition_val;
|
||||
break;
|
||||
case ExpressionType::COMPARE_GREATERTHANOREQUALTO:
|
||||
condition_holds = loop_val >= condition_val;
|
||||
break;
|
||||
case ExpressionType::COMPARE_LESSTHANOREQUALTO:
|
||||
condition_holds = loop_val <= condition_val;
|
||||
break;
|
||||
default:
|
||||
throw BinderException("Unrecognized comparison for loop condition");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found_loop) {
|
||||
throw BinderException("Condition in onlyif/skipif not found: %s must be a loop iterator name",
|
||||
condition.keyword);
|
||||
}
|
||||
if (condition_holds) {
|
||||
// the condition holds
|
||||
if (condition.skip_if) {
|
||||
// skip on condition holding
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// the condition does not hold
|
||||
if (!condition.skip_if) {
|
||||
// skip on condition not holding
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// all conditions pass - execute
|
||||
return true;
|
||||
}
|
||||
|
||||
void Command::Execute(ExecuteContext &context) const {
|
||||
if (runner.finished_processing_file) {
|
||||
return;
|
||||
}
|
||||
if (!CheckLoopCondition(context, conditions)) {
|
||||
// condition excludes this file
|
||||
return;
|
||||
}
|
||||
if (context.running_loops.empty()) {
|
||||
context.sql_query = base_sql_query;
|
||||
ExecuteInternal(context);
|
||||
return;
|
||||
}
|
||||
// perform the string replacement
|
||||
context.sql_query = runner.LoopReplacement(base_sql_query, context.running_loops);
|
||||
// execute the iterated statement
|
||||
ExecuteInternal(context);
|
||||
}
|
||||
|
||||
Statement::Statement(SQLLogicTestRunner &runner) : Command(runner) {
|
||||
}
|
||||
|
||||
Query::Query(SQLLogicTestRunner &runner) : Command(runner) {
|
||||
}
|
||||
|
||||
ResetLabel::ResetLabel(SQLLogicTestRunner &runner) : Command(runner) {
|
||||
}
|
||||
|
||||
void ResetLabel::ExecuteInternal(ExecuteContext &context) const {
|
||||
runner.hash_label_map.WithLock([&](unordered_map<string, CachedLabelData> &map) {
|
||||
auto it = map.find(query_label);
|
||||
//! should we allow this to be missing at all?
|
||||
if (it == map.end()) {
|
||||
FAIL_LINE(file_name, query_line, 0);
|
||||
}
|
||||
map.erase(it);
|
||||
});
|
||||
}
|
||||
|
||||
RestartCommand::RestartCommand(SQLLogicTestRunner &runner, bool load_extensions_p)
|
||||
: Command(runner), load_extensions(load_extensions_p) {
|
||||
}
|
||||
|
||||
ReconnectCommand::ReconnectCommand(SQLLogicTestRunner &runner) : Command(runner) {
|
||||
}
|
||||
|
||||
LoopCommand::LoopCommand(SQLLogicTestRunner &runner, LoopDefinition definition_p)
|
||||
: Command(runner), definition(std::move(definition_p)) {
|
||||
}
|
||||
|
||||
ModeCommand::ModeCommand(SQLLogicTestRunner &runner, string parameter_p)
|
||||
: Command(runner), parameter(std::move(parameter_p)) {
|
||||
}
|
||||
|
||||
SleepCommand::SleepCommand(SQLLogicTestRunner &runner, idx_t duration, SleepUnit unit)
|
||||
: Command(runner), duration(duration), unit(unit) {
|
||||
}
|
||||
|
||||
UnzipCommand::UnzipCommand(SQLLogicTestRunner &runner, string &input, string &output)
|
||||
: Command(runner), input_path(input), extraction_path(output) {
|
||||
}
|
||||
|
||||
LoadCommand::LoadCommand(SQLLogicTestRunner &runner, string dbpath_p, bool readonly, const string &version)
|
||||
: Command(runner), dbpath(std::move(dbpath_p)), readonly(readonly), version(version) {
|
||||
}
|
||||
|
||||
struct ParallelExecuteContext {
|
||||
ParallelExecuteContext(SQLLogicTestRunner &runner, const vector<duckdb::unique_ptr<Command>> &loop_commands,
|
||||
LoopDefinition definition)
|
||||
: runner(runner), loop_commands(loop_commands), definition(std::move(definition)), success(true) {
|
||||
}
|
||||
|
||||
SQLLogicTestRunner &runner;
|
||||
const vector<duckdb::unique_ptr<Command>> &loop_commands;
|
||||
LoopDefinition definition;
|
||||
atomic<bool> success;
|
||||
string error_message;
|
||||
string error_file;
|
||||
int error_line;
|
||||
};
|
||||
|
||||
static void ParallelExecuteLoop(ParallelExecuteContext *execute_context) {
|
||||
try {
|
||||
auto &runner = execute_context->runner;
|
||||
|
||||
// construct a new connection to the database
|
||||
Connection con(*runner.db);
|
||||
// create a new parallel execute context
|
||||
vector<LoopDefinition> running_loops {execute_context->definition};
|
||||
ExecuteContext context(&con, std::move(running_loops));
|
||||
for (auto &command : execute_context->loop_commands) {
|
||||
execute_context->error_file = command->file_name;
|
||||
execute_context->error_line = command->query_line;
|
||||
command->Execute(context);
|
||||
}
|
||||
if (!context.error_file.empty()) {
|
||||
execute_context->error_message = string();
|
||||
execute_context->success = false;
|
||||
execute_context->error_file = context.error_file;
|
||||
execute_context->error_line = context.error_line;
|
||||
}
|
||||
} catch (std::exception &ex) {
|
||||
execute_context->error_message = StringUtil::Format("Failure at %s:%d: %s", execute_context->error_file,
|
||||
execute_context->error_line, ex.what());
|
||||
execute_context->success = false;
|
||||
} catch (...) {
|
||||
execute_context->error_message = StringUtil::Format("Failure at %s:%d: Unknown error message",
|
||||
execute_context->error_file, execute_context->error_line);
|
||||
execute_context->success = false;
|
||||
}
|
||||
}
|
||||
|
||||
void LoopCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
LoopDefinition loop_def = definition;
|
||||
loop_def.loop_idx = definition.loop_start;
|
||||
if (loop_def.is_parallel) {
|
||||
for (auto &running_loop : context.running_loops) {
|
||||
if (running_loop.is_parallel) {
|
||||
throw std::runtime_error("Nested parallel loop commands not allowed");
|
||||
}
|
||||
}
|
||||
for (auto &command : loop_commands) {
|
||||
if (!command->SupportsConcurrent()) {
|
||||
throw std::runtime_error("Concurrent loop is not supported over this command");
|
||||
}
|
||||
}
|
||||
// parallel loop: launch threads
|
||||
std::list<ParallelExecuteContext> contexts;
|
||||
while (true) {
|
||||
contexts.emplace_back(runner, loop_commands, loop_def);
|
||||
loop_def.loop_idx++;
|
||||
if (loop_def.loop_idx >= loop_def.loop_end) {
|
||||
// finished
|
||||
break;
|
||||
}
|
||||
}
|
||||
std::list<std::thread> threads;
|
||||
for (auto &context : contexts) {
|
||||
threads.emplace_back(ParallelExecuteLoop, &context);
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
for (auto &context : contexts) {
|
||||
if (!context.success) {
|
||||
if (!context.error_message.empty()) {
|
||||
FAIL(context.error_message);
|
||||
} else {
|
||||
FAIL_LINE(context.error_file, context.error_line, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bool finished = false;
|
||||
while (!finished && !runner.finished_processing_file) {
|
||||
// execute the current iteration of the loop
|
||||
context.running_loops.push_back(loop_def);
|
||||
for (auto &statement : loop_commands) {
|
||||
statement->Execute(context);
|
||||
}
|
||||
context.running_loops.pop_back();
|
||||
loop_def.loop_idx++;
|
||||
if (loop_def.loop_idx >= loop_def.loop_end) {
|
||||
// finished
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool LoopCommand::SupportsConcurrent() const {
|
||||
for (auto &command : loop_commands) {
|
||||
if (!command->SupportsConcurrent()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void Query::ExecuteInternal(ExecuteContext &context) const {
|
||||
auto connection = CommandConnection(context);
|
||||
|
||||
{
|
||||
SQLLogicTestLogger logger(context, *this);
|
||||
if (runner.output_result_mode || runner.debug_mode) {
|
||||
logger.PrintLineSep();
|
||||
logger.PrintFileHeader();
|
||||
logger.PrintSQLFormatted();
|
||||
logger.PrintLineSep();
|
||||
}
|
||||
|
||||
if (runner.output_sql) {
|
||||
logger.PrintSQL();
|
||||
return;
|
||||
}
|
||||
}
|
||||
auto result = ExecuteQuery(context, connection, file_name, query_line);
|
||||
|
||||
TestResultHelper helper(runner);
|
||||
if (!helper.CheckQueryResult(*this, context, std::move(result))) {
|
||||
if (context.is_parallel) {
|
||||
runner.finished_processing_file = true;
|
||||
context.error_file = file_name;
|
||||
context.error_line = query_line;
|
||||
} else {
|
||||
FAIL_LINE(file_name, query_line, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RestartCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
if (context.is_parallel) {
|
||||
throw std::runtime_error("Cannot restart database in parallel");
|
||||
}
|
||||
if (runner.dbpath.empty()) {
|
||||
throw std::runtime_error("cannot restart an in-memory database, did you forget to call \"load\"?");
|
||||
}
|
||||
// We save the main connection configurations to pass it to the new connection
|
||||
runner.config->options = runner.con->context->db->config.options;
|
||||
auto client_config = runner.con->context->config;
|
||||
auto catalog_search_paths = runner.con->context->client_data->catalog_search_path->GetSetPaths();
|
||||
string low_query_writer_path;
|
||||
if (runner.con->context->client_data->log_query_writer) {
|
||||
low_query_writer_path = runner.con->context->client_data->log_query_writer->path;
|
||||
}
|
||||
|
||||
runner.LoadDatabase(runner.dbpath, load_extensions);
|
||||
|
||||
runner.con->context->config = client_config;
|
||||
|
||||
runner.con->BeginTransaction();
|
||||
runner.con->context->client_data->catalog_search_path->Set(catalog_search_paths, CatalogSetPathType::SET_SCHEMAS);
|
||||
runner.con->Commit();
|
||||
if (!low_query_writer_path.empty()) {
|
||||
runner.con->context->client_data->log_query_writer = make_uniq<BufferedFileWriter>(
|
||||
FileSystem::GetFileSystem(*runner.con->context), low_query_writer_path, 1 << 1 | 1 << 5);
|
||||
}
|
||||
}
|
||||
|
||||
void ReconnectCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
if (context.is_parallel) {
|
||||
throw std::runtime_error("Cannot reconnect in parallel");
|
||||
}
|
||||
runner.Reconnect();
|
||||
}
|
||||
|
||||
void ModeCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
if (parameter == "output_hash") {
|
||||
runner.output_hash_mode = true;
|
||||
} else if (parameter == "output_result") {
|
||||
runner.output_result_mode = true;
|
||||
} else if (parameter == "no_output") {
|
||||
runner.output_hash_mode = false;
|
||||
runner.output_result_mode = false;
|
||||
} else if (parameter == "debug") {
|
||||
runner.debug_mode = true;
|
||||
} else {
|
||||
throw std::runtime_error("unrecognized mode: " + parameter);
|
||||
}
|
||||
}
|
||||
|
||||
void SleepCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
switch (unit) {
|
||||
case SleepUnit::NANOSECOND:
|
||||
std::this_thread::sleep_for(std::chrono::duration<double, std::nano>(duration));
|
||||
break;
|
||||
case SleepUnit::MICROSECOND:
|
||||
std::this_thread::sleep_for(std::chrono::duration<double, std::micro>(duration));
|
||||
break;
|
||||
case SleepUnit::MILLISECOND:
|
||||
std::this_thread::sleep_for(std::chrono::duration<double, std::milli>(duration));
|
||||
break;
|
||||
case SleepUnit::SECOND:
|
||||
std::this_thread::sleep_for(std::chrono::duration<double, std::milli>(duration * 1000));
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("Unrecognized sleep unit");
|
||||
}
|
||||
}
|
||||
|
||||
SleepUnit SleepCommand::ParseUnit(const string &unit) {
|
||||
if (unit == "second" || unit == "seconds" || unit == "sec") {
|
||||
return SleepUnit::SECOND;
|
||||
} else if (unit == "millisecond" || unit == "milliseconds" || unit == "milli") {
|
||||
return SleepUnit::MILLISECOND;
|
||||
} else if (unit == "microsecond" || unit == "microseconds" || unit == "micro") {
|
||||
return SleepUnit::MICROSECOND;
|
||||
} else if (unit == "nanosecond" || unit == "nanoseconds" || unit == "nano") {
|
||||
return SleepUnit::NANOSECOND;
|
||||
} else {
|
||||
throw std::runtime_error("Unrecognized sleep mode - expected second/millisecond/microescond/nanosecond");
|
||||
}
|
||||
}
|
||||
|
||||
void Statement::ExecuteInternal(ExecuteContext &context) const {
|
||||
auto connection = CommandConnection(context);
|
||||
{
|
||||
SQLLogicTestLogger logger(context, *this);
|
||||
if (runner.output_result_mode || runner.debug_mode) {
|
||||
logger.PrintLineSep();
|
||||
logger.PrintFileHeader();
|
||||
logger.PrintSQLFormatted();
|
||||
logger.PrintLineSep();
|
||||
}
|
||||
|
||||
query_break(query_line);
|
||||
if (runner.output_sql) {
|
||||
logger.PrintSQL();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
auto result = ExecuteQuery(context, connection, file_name, query_line);
|
||||
|
||||
TestResultHelper helper(runner);
|
||||
if (!helper.CheckStatementResult(*this, context, std::move(result))) {
|
||||
if (context.is_parallel) {
|
||||
runner.finished_processing_file = true;
|
||||
context.error_file = file_name;
|
||||
context.error_line = query_line;
|
||||
} else {
|
||||
FAIL_LINE(file_name, query_line, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UnzipCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
VirtualFileSystem vfs;
|
||||
|
||||
// input
|
||||
FileOpenFlags in_flags(FileFlags::FILE_FLAGS_READ);
|
||||
in_flags.SetCompression(FileCompressionType::GZIP);
|
||||
auto compressed_file_handle = vfs.OpenFile(input_path, in_flags);
|
||||
if (compressed_file_handle == nullptr) {
|
||||
throw CatalogException("Cannot open the file \"%s\"", input_path);
|
||||
}
|
||||
|
||||
// output
|
||||
FileOpenFlags out_flags(FileOpenFlags::FILE_FLAGS_FILE_CREATE | FileOpenFlags::FILE_FLAGS_WRITE);
|
||||
auto output_file = vfs.OpenFile(extraction_path, out_flags);
|
||||
if (!output_file) {
|
||||
throw CatalogException("Cannot open the file \"%s\"", extraction_path);
|
||||
}
|
||||
|
||||
// read the compressed data from the file
|
||||
while (true) {
|
||||
duckdb::unique_ptr<char[]> compressed_buffer(new char[BUFFER_SIZE]);
|
||||
int64_t bytes_read = vfs.Read(*compressed_file_handle, compressed_buffer.get(), BUFFER_SIZE);
|
||||
if (bytes_read == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
vfs.Write(*output_file, compressed_buffer.get(), bytes_read);
|
||||
}
|
||||
}
|
||||
|
||||
void LoadCommand::ExecuteInternal(ExecuteContext &context) const {
|
||||
auto resolved_path = runner.LoopReplacement(dbpath, context.running_loops);
|
||||
if (!readonly) {
|
||||
// delete the target database file, if it exists
|
||||
DeleteDatabase(resolved_path);
|
||||
}
|
||||
runner.dbpath = resolved_path;
|
||||
|
||||
// set up the config file
|
||||
if (readonly) {
|
||||
runner.config->options.use_temporary_directory = false;
|
||||
runner.config->options.access_mode = AccessMode::READ_ONLY;
|
||||
} else {
|
||||
runner.config->options.use_temporary_directory = true;
|
||||
runner.config->options.access_mode = AccessMode::AUTOMATIC;
|
||||
}
|
||||
if (runner.db) {
|
||||
if (version.empty()) {
|
||||
//! No version was provided, use the default of the main db.
|
||||
runner.config->options.serialization_compatibility =
|
||||
runner.db->instance->config.options.serialization_compatibility;
|
||||
} else {
|
||||
try {
|
||||
runner.config->options.serialization_compatibility = SerializationCompatibility::FromString(version);
|
||||
} catch (std::exception &ex) {
|
||||
ErrorData err(ex);
|
||||
SQLLogicTestLogger::LoadDatabaseFail(runner.file_name, dbpath, err.Message());
|
||||
FAIL();
|
||||
}
|
||||
}
|
||||
}
|
||||
// now create the database file
|
||||
runner.LoadDatabase(resolved_path, true);
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
211
external/duckdb/test/sqlite/sqllogic_command.hpp
vendored
Normal file
211
external/duckdb/test/sqlite/sqllogic_command.hpp
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// DuckDB
|
||||
//
|
||||
// sqllogic_command.hpp
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "duckdb.hpp"
|
||||
#include "duckdb/common/virtual_file_system.hpp"
|
||||
#include "test_config.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
class SQLLogicTestRunner;
|
||||
|
||||
enum class ExpectedResult : uint8_t { RESULT_SUCCESS, RESULT_ERROR, RESULT_UNKNOWN };
|
||||
|
||||
struct LoopDefinition {
|
||||
string loop_iterator_name;
|
||||
int loop_idx;
|
||||
int loop_start;
|
||||
int loop_end;
|
||||
bool is_parallel;
|
||||
vector<string> tokens;
|
||||
};
|
||||
|
||||
struct ExecuteContext {
|
||||
ExecuteContext() : con(nullptr), is_parallel(false) {
|
||||
}
|
||||
ExecuteContext(Connection *con, vector<LoopDefinition> running_loops_p)
|
||||
: con(con), running_loops(std::move(running_loops_p)), is_parallel(true) {
|
||||
}
|
||||
|
||||
Connection *con;
|
||||
vector<LoopDefinition> running_loops;
|
||||
bool is_parallel;
|
||||
string sql_query;
|
||||
string error_file;
|
||||
int error_line;
|
||||
};
|
||||
|
||||
struct Condition {
|
||||
string keyword;
|
||||
string value;
|
||||
ExpressionType comparison;
|
||||
bool skip_if;
|
||||
};
|
||||
|
||||
class Command {
|
||||
public:
|
||||
Command(SQLLogicTestRunner &runner);
|
||||
virtual ~Command();
|
||||
|
||||
SQLLogicTestRunner &runner;
|
||||
string connection_name;
|
||||
int query_line;
|
||||
string base_sql_query;
|
||||
string file_name;
|
||||
vector<Condition> conditions;
|
||||
|
||||
public:
|
||||
Connection *CommandConnection(ExecuteContext &context) const;
|
||||
|
||||
duckdb::unique_ptr<MaterializedQueryResult> ExecuteQuery(ExecuteContext &context, Connection *connection,
|
||||
string file_name, idx_t query_line) const;
|
||||
|
||||
virtual void ExecuteInternal(ExecuteContext &context) const = 0;
|
||||
void Execute(ExecuteContext &context) const;
|
||||
|
||||
virtual bool SupportsConcurrent() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
void RestartDatabase(ExecuteContext &context, Connection *&connection, string sql_query) const;
|
||||
};
|
||||
|
||||
class Statement : public Command {
|
||||
public:
|
||||
Statement(SQLLogicTestRunner &runner);
|
||||
|
||||
ExpectedResult expected_result;
|
||||
string expected_error;
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
bool SupportsConcurrent() const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class ResetLabel : public Command {
|
||||
public:
|
||||
ResetLabel(SQLLogicTestRunner &runner);
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
bool SupportsConcurrent() const override {
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
string query_label;
|
||||
};
|
||||
|
||||
class Query : public Command {
|
||||
public:
|
||||
Query(SQLLogicTestRunner &runner);
|
||||
|
||||
idx_t expected_column_count = 0;
|
||||
SortStyle sort_style = SortStyle::NO_SORT;
|
||||
vector<string> values;
|
||||
bool query_has_label = false;
|
||||
string query_label;
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
bool SupportsConcurrent() const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class RestartCommand : public Command {
|
||||
public:
|
||||
bool load_extensions;
|
||||
RestartCommand(SQLLogicTestRunner &runner, bool load_extensions);
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
};
|
||||
|
||||
class ReconnectCommand : public Command {
|
||||
public:
|
||||
ReconnectCommand(SQLLogicTestRunner &runner);
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
};
|
||||
|
||||
class LoopCommand : public Command {
|
||||
public:
|
||||
LoopCommand(SQLLogicTestRunner &runner, LoopDefinition definition_p);
|
||||
|
||||
public:
|
||||
LoopDefinition definition;
|
||||
vector<duckdb::unique_ptr<Command>> loop_commands;
|
||||
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
bool SupportsConcurrent() const override;
|
||||
};
|
||||
|
||||
class ModeCommand : public Command {
|
||||
public:
|
||||
ModeCommand(SQLLogicTestRunner &runner, string parameter);
|
||||
|
||||
public:
|
||||
string parameter;
|
||||
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
};
|
||||
|
||||
enum class SleepUnit : uint8_t { NANOSECOND, MICROSECOND, MILLISECOND, SECOND };
|
||||
|
||||
class SleepCommand : public Command {
|
||||
public:
|
||||
SleepCommand(SQLLogicTestRunner &runner, idx_t duration, SleepUnit unit);
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
static SleepUnit ParseUnit(const string &unit);
|
||||
|
||||
private:
|
||||
idx_t duration;
|
||||
SleepUnit unit;
|
||||
};
|
||||
|
||||
class UnzipCommand : public Command {
|
||||
public:
|
||||
// 1 MB
|
||||
static constexpr const int64_t BUFFER_SIZE = 1u << 20;
|
||||
|
||||
public:
|
||||
UnzipCommand(SQLLogicTestRunner &runner, string &input, string &output);
|
||||
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
|
||||
private:
|
||||
string input_path;
|
||||
string extraction_path;
|
||||
};
|
||||
|
||||
class LoadCommand : public Command {
|
||||
public:
|
||||
LoadCommand(SQLLogicTestRunner &runner, string dbpath, bool readonly, const string &version = "");
|
||||
|
||||
string dbpath;
|
||||
bool readonly;
|
||||
string version;
|
||||
|
||||
public:
|
||||
void ExecuteInternal(ExecuteContext &context) const override;
|
||||
};
|
||||
|
||||
} // namespace duckdb
|
||||
275
external/duckdb/test/sqlite/sqllogic_parser.cpp
vendored
Normal file
275
external/duckdb/test/sqlite/sqllogic_parser.cpp
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
#include "sqllogic_parser.hpp"
|
||||
#include "catch.hpp"
|
||||
|
||||
#include <fstream>
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
bool SQLLogicParser::OpenFile(const string &path) {
|
||||
this->file_name = path;
|
||||
|
||||
std::ifstream infile(file_name);
|
||||
if (infile.bad() || infile.fail()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
string line;
|
||||
while (std::getline(infile, line)) {
|
||||
lines.push_back(StringUtil::Replace(line, "\r", ""));
|
||||
}
|
||||
return !infile.bad();
|
||||
}
|
||||
|
||||
bool SQLLogicParser::EmptyOrComment(const string &line) {
|
||||
return line.empty() || StringUtil::StartsWith(line, "#");
|
||||
}
|
||||
|
||||
bool SQLLogicParser::NextLineEmptyOrComment() {
|
||||
if (current_line + 1 >= lines.size()) {
|
||||
return true;
|
||||
} else {
|
||||
return EmptyOrComment(lines[current_line + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
bool SQLLogicParser::NextStatement() {
|
||||
if (seen_statement) {
|
||||
// skip the current statement
|
||||
// but only if we have already seen a statement in the file
|
||||
while (current_line < lines.size() && !EmptyOrComment(lines[current_line])) {
|
||||
current_line++;
|
||||
}
|
||||
}
|
||||
seen_statement = true;
|
||||
// now look for the first non-empty line
|
||||
while (current_line < lines.size() && EmptyOrComment(lines[current_line])) {
|
||||
current_line++;
|
||||
}
|
||||
// return whether or not we reached the end of the file
|
||||
return current_line < lines.size();
|
||||
}
|
||||
|
||||
void SQLLogicParser::NextLine() {
|
||||
current_line++;
|
||||
}
|
||||
|
||||
string SQLLogicParser::ExtractStatement() {
|
||||
string statement;
|
||||
|
||||
bool first_line = true;
|
||||
while (current_line < lines.size() && !EmptyOrComment(lines[current_line])) {
|
||||
if (lines[current_line] == "----") {
|
||||
break;
|
||||
}
|
||||
if (!first_line) {
|
||||
statement += "\n";
|
||||
}
|
||||
statement += lines[current_line];
|
||||
first_line = false;
|
||||
|
||||
current_line++;
|
||||
}
|
||||
|
||||
return statement;
|
||||
}
|
||||
|
||||
vector<string> SQLLogicParser::ExtractExpectedResult() {
|
||||
vector<string> result;
|
||||
// skip the result line (----) if we are still reading that
|
||||
if (current_line < lines.size() && lines[current_line] == "----") {
|
||||
current_line++;
|
||||
}
|
||||
// read the expected result until we encounter a new line
|
||||
while (current_line < lines.size() && !lines[current_line].empty()) {
|
||||
result.push_back(lines[current_line]);
|
||||
current_line++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
string SQLLogicParser::ExtractExpectedError(bool expect_ok, bool original_sqlite_test) {
|
||||
// check if there is an expected error at all
|
||||
if (current_line >= lines.size() || lines[current_line] != "----") {
|
||||
if (!expect_ok && !original_sqlite_test) {
|
||||
Fail("Failed to parse statement: statement error needs to have an expected error message");
|
||||
}
|
||||
return string();
|
||||
}
|
||||
if (expect_ok) {
|
||||
Fail("Failed to parse statement: only statement error can have an expected error message, not statement ok");
|
||||
}
|
||||
current_line++;
|
||||
string error;
|
||||
vector<string> error_lines;
|
||||
while (current_line < lines.size() && !lines[current_line].empty()) {
|
||||
error_lines.push_back(lines[current_line]);
|
||||
current_line++;
|
||||
}
|
||||
error = StringUtil::Join(error_lines, "\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
void SQLLogicParser::FailRecursive(const string &msg, vector<ExceptionFormatValue> &values) {
|
||||
auto error_message =
|
||||
file_name + ":" + to_string(current_line + 1) + ": " + ExceptionFormatValue::Format(msg, values);
|
||||
FAIL(error_message.c_str());
|
||||
}
|
||||
|
||||
SQLLogicToken SQLLogicParser::Tokenize() {
|
||||
SQLLogicToken result;
|
||||
if (current_line >= lines.size()) {
|
||||
result.type = SQLLogicTokenType::SQLLOGIC_INVALID;
|
||||
return result;
|
||||
}
|
||||
|
||||
vector<string> argument_list;
|
||||
auto &line = lines[current_line];
|
||||
idx_t last_pos = 0;
|
||||
for (idx_t i = 0; i < line.size(); i++) {
|
||||
if (StringUtil::CharacterIsSpace(line[i])) {
|
||||
if (i == last_pos) {
|
||||
last_pos++;
|
||||
} else {
|
||||
argument_list.push_back(line.substr(last_pos, i - last_pos));
|
||||
last_pos = i + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (last_pos != line.size()) {
|
||||
argument_list.push_back(line.substr(last_pos, line.size() - last_pos));
|
||||
}
|
||||
if (argument_list.empty()) {
|
||||
Fail("Empty line!?");
|
||||
}
|
||||
result.type = CommandToToken(argument_list[0]);
|
||||
for (idx_t i = 1; i < argument_list.size(); i++) {
|
||||
result.parameters.push_back(std::move(argument_list[i]));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Single line statements should throw a parser error if the next line is not a comment or a newline
|
||||
bool SQLLogicParser::IsSingleLineStatement(SQLLogicToken &token) {
|
||||
switch (token.type) {
|
||||
case SQLLogicTokenType::SQLLOGIC_HASH_THRESHOLD:
|
||||
case SQLLogicTokenType::SQLLOGIC_HALT:
|
||||
case SQLLogicTokenType::SQLLOGIC_MODE:
|
||||
case SQLLogicTokenType::SQLLOGIC_SET:
|
||||
case SQLLogicTokenType::SQLLOGIC_RESET:
|
||||
case SQLLogicTokenType::SQLLOGIC_LOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_FOREACH:
|
||||
case SQLLogicTokenType::SQLLOGIC_CONCURRENT_LOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_CONCURRENT_FOREACH:
|
||||
case SQLLogicTokenType::SQLLOGIC_ENDLOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_REQUIRE:
|
||||
case SQLLogicTokenType::SQLLOGIC_REQUIRE_ENV:
|
||||
case SQLLogicTokenType::SQLLOGIC_TEST_ENV:
|
||||
case SQLLogicTokenType::SQLLOGIC_LOAD:
|
||||
case SQLLogicTokenType::SQLLOGIC_RESTART:
|
||||
case SQLLogicTokenType::SQLLOGIC_RECONNECT:
|
||||
case SQLLogicTokenType::SQLLOGIC_SLEEP:
|
||||
case SQLLogicTokenType::SQLLOGIC_UNZIP:
|
||||
case SQLLogicTokenType::SQLLOGIC_TAGS:
|
||||
return true;
|
||||
|
||||
case SQLLogicTokenType::SQLLOGIC_SKIP_IF:
|
||||
case SQLLogicTokenType::SQLLOGIC_ONLY_IF:
|
||||
case SQLLogicTokenType::SQLLOGIC_INVALID:
|
||||
case SQLLogicTokenType::SQLLOGIC_STATEMENT:
|
||||
case SQLLogicTokenType::SQLLOGIC_QUERY:
|
||||
return false;
|
||||
|
||||
default:
|
||||
throw std::runtime_error("Unknown SQLLogic token found!");
|
||||
}
|
||||
}
|
||||
|
||||
// (All) Context statements must precede all non-header statements
|
||||
bool SQLLogicParser::IsTestCommand(SQLLogicTokenType &type) {
|
||||
switch (type) {
|
||||
case SQLLogicTokenType::SQLLOGIC_QUERY:
|
||||
case SQLLogicTokenType::SQLLOGIC_STATEMENT:
|
||||
return true;
|
||||
|
||||
case SQLLogicTokenType::SQLLOGIC_CONCURRENT_FOREACH:
|
||||
case SQLLogicTokenType::SQLLOGIC_CONCURRENT_LOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_ENDLOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_FOREACH:
|
||||
case SQLLogicTokenType::SQLLOGIC_HALT:
|
||||
case SQLLogicTokenType::SQLLOGIC_HASH_THRESHOLD:
|
||||
case SQLLogicTokenType::SQLLOGIC_INVALID:
|
||||
case SQLLogicTokenType::SQLLOGIC_LOAD:
|
||||
case SQLLogicTokenType::SQLLOGIC_LOOP:
|
||||
case SQLLogicTokenType::SQLLOGIC_MODE:
|
||||
case SQLLogicTokenType::SQLLOGIC_ONLY_IF:
|
||||
case SQLLogicTokenType::SQLLOGIC_RECONNECT:
|
||||
case SQLLogicTokenType::SQLLOGIC_REQUIRE:
|
||||
case SQLLogicTokenType::SQLLOGIC_REQUIRE_ENV:
|
||||
case SQLLogicTokenType::SQLLOGIC_RESET:
|
||||
case SQLLogicTokenType::SQLLOGIC_RESTART:
|
||||
case SQLLogicTokenType::SQLLOGIC_SET:
|
||||
case SQLLogicTokenType::SQLLOGIC_SKIP_IF:
|
||||
case SQLLogicTokenType::SQLLOGIC_SLEEP:
|
||||
case SQLLogicTokenType::SQLLOGIC_TAGS:
|
||||
case SQLLogicTokenType::SQLLOGIC_TEST_ENV:
|
||||
case SQLLogicTokenType::SQLLOGIC_UNZIP:
|
||||
return false;
|
||||
|
||||
default:
|
||||
throw std::runtime_error("Unknown SQLLogic token found!");
|
||||
}
|
||||
}
|
||||
|
||||
SQLLogicTokenType SQLLogicParser::CommandToToken(const string &token) {
|
||||
if (token == "skipif") {
|
||||
return SQLLogicTokenType::SQLLOGIC_SKIP_IF;
|
||||
} else if (token == "onlyif") {
|
||||
return SQLLogicTokenType::SQLLOGIC_ONLY_IF;
|
||||
} else if (token == "statement") {
|
||||
return SQLLogicTokenType::SQLLOGIC_STATEMENT;
|
||||
} else if (token == "query") {
|
||||
return SQLLogicTokenType::SQLLOGIC_QUERY;
|
||||
} else if (token == "hash-threshold") {
|
||||
return SQLLogicTokenType::SQLLOGIC_HASH_THRESHOLD;
|
||||
} else if (token == "halt") {
|
||||
return SQLLogicTokenType::SQLLOGIC_HALT;
|
||||
} else if (token == "mode") {
|
||||
return SQLLogicTokenType::SQLLOGIC_MODE;
|
||||
} else if (token == "set") {
|
||||
return SQLLogicTokenType::SQLLOGIC_SET;
|
||||
} else if (token == "reset") {
|
||||
return SQLLogicTokenType::SQLLOGIC_RESET;
|
||||
} else if (token == "loop") {
|
||||
return SQLLogicTokenType::SQLLOGIC_LOOP;
|
||||
} else if (token == "concurrentloop") {
|
||||
return SQLLogicTokenType::SQLLOGIC_CONCURRENT_LOOP;
|
||||
} else if (token == "foreach") {
|
||||
return SQLLogicTokenType::SQLLOGIC_FOREACH;
|
||||
} else if (token == "concurrentforeach") {
|
||||
return SQLLogicTokenType::SQLLOGIC_CONCURRENT_FOREACH;
|
||||
} else if (token == "endloop") {
|
||||
return SQLLogicTokenType::SQLLOGIC_ENDLOOP;
|
||||
} else if (token == "require") {
|
||||
return SQLLogicTokenType::SQLLOGIC_REQUIRE;
|
||||
} else if (token == "require-env") {
|
||||
return SQLLogicTokenType::SQLLOGIC_REQUIRE_ENV;
|
||||
} else if (token == "test-env") {
|
||||
return SQLLogicTokenType::SQLLOGIC_TEST_ENV;
|
||||
} else if (token == "load") {
|
||||
return SQLLogicTokenType::SQLLOGIC_LOAD;
|
||||
} else if (token == "restart") {
|
||||
return SQLLogicTokenType::SQLLOGIC_RESTART;
|
||||
} else if (token == "reconnect") {
|
||||
return SQLLogicTokenType::SQLLOGIC_RECONNECT;
|
||||
} else if (token == "sleep") {
|
||||
return SQLLogicTokenType::SQLLOGIC_SLEEP;
|
||||
} else if (token == "unzip") {
|
||||
return SQLLogicTokenType::SQLLOGIC_UNZIP;
|
||||
} else if (token == "tags") {
|
||||
return SQLLogicTokenType::SQLLOGIC_TAGS;
|
||||
}
|
||||
Fail("Unrecognized parameter %s", token);
|
||||
return SQLLogicTokenType::SQLLOGIC_INVALID;
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
112
external/duckdb/test/sqlite/sqllogic_parser.hpp
vendored
Normal file
112
external/duckdb/test/sqlite/sqllogic_parser.hpp
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// DuckDB
|
||||
//
|
||||
// test_parser.hpp
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "duckdb.hpp"
|
||||
#include "duckdb/common/types.hpp"
|
||||
#include "duckdb/common/exception_format_value.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
enum class SQLLogicTokenType {
|
||||
SQLLOGIC_INVALID,
|
||||
SQLLOGIC_SKIP_IF,
|
||||
SQLLOGIC_ONLY_IF,
|
||||
SQLLOGIC_STATEMENT,
|
||||
SQLLOGIC_QUERY,
|
||||
SQLLOGIC_HASH_THRESHOLD,
|
||||
SQLLOGIC_HALT,
|
||||
SQLLOGIC_MODE,
|
||||
SQLLOGIC_SET,
|
||||
SQLLOGIC_RESET,
|
||||
SQLLOGIC_LOOP,
|
||||
SQLLOGIC_FOREACH,
|
||||
SQLLOGIC_CONCURRENT_LOOP,
|
||||
SQLLOGIC_CONCURRENT_FOREACH,
|
||||
SQLLOGIC_ENDLOOP,
|
||||
SQLLOGIC_REQUIRE,
|
||||
SQLLOGIC_REQUIRE_ENV,
|
||||
SQLLOGIC_TEST_ENV,
|
||||
SQLLOGIC_LOAD,
|
||||
SQLLOGIC_RESTART,
|
||||
SQLLOGIC_RECONNECT,
|
||||
SQLLOGIC_SLEEP,
|
||||
SQLLOGIC_UNZIP,
|
||||
SQLLOGIC_TAGS
|
||||
};
|
||||
|
||||
class SQLLogicToken {
|
||||
public:
|
||||
SQLLogicTokenType type;
|
||||
vector<string> parameters;
|
||||
};
|
||||
|
||||
class SQLLogicParser {
|
||||
public:
|
||||
string file_name;
|
||||
//! The lines of the current text file
|
||||
vector<string> lines;
|
||||
//! The current line number
|
||||
idx_t current_line = 0;
|
||||
//! Whether or not the input should be printed to stdout as it is executed
|
||||
bool print_input = false;
|
||||
//! Whether or not we have seen a statement
|
||||
bool seen_statement = false;
|
||||
|
||||
public:
|
||||
static bool EmptyOrComment(const string &line);
|
||||
static bool IsSingleLineStatement(SQLLogicToken &token);
|
||||
static bool IsTestCommand(SQLLogicTokenType &type);
|
||||
|
||||
//! Does the next line contain a comment, empty line, or is the end of the file
|
||||
bool NextLineEmptyOrComment();
|
||||
|
||||
//! Opens the file, returns whether or not reading was successful
|
||||
bool OpenFile(const string &path);
|
||||
|
||||
//! Moves the current line to the beginning of the next statement
|
||||
//! Returns false if there is no next statement (i.e. we reached the end of the file)
|
||||
bool NextStatement();
|
||||
|
||||
//! Move to the next line
|
||||
void NextLine();
|
||||
|
||||
//! Extract a statement and move the current_line pointer forward
|
||||
//! if "is_query" is false, the statement stops at the next empty line
|
||||
//! if "is_query" is true, the statement stops at the next empty line or the next ----
|
||||
string ExtractStatement();
|
||||
|
||||
//! Extract the expected result
|
||||
vector<string> ExtractExpectedResult();
|
||||
|
||||
//! Extract the expected error (in case of statement error)
|
||||
string ExtractExpectedError(bool expect_ok, bool original_sqlite_test);
|
||||
|
||||
//! Tokenize the current line
|
||||
SQLLogicToken Tokenize();
|
||||
|
||||
template <typename... Args>
|
||||
void Fail(const string &msg, Args... params) {
|
||||
vector<ExceptionFormatValue> values;
|
||||
FailRecursive(msg, values, params...);
|
||||
}
|
||||
|
||||
private:
|
||||
SQLLogicTokenType CommandToToken(const string &token);
|
||||
|
||||
void FailRecursive(const string &msg, vector<ExceptionFormatValue> &values);
|
||||
|
||||
template <class T, typename... Args>
|
||||
void FailRecursive(const string &msg, vector<ExceptionFormatValue> &values, T param, Args... params) {
|
||||
values.push_back(ExceptionFormatValue::CreateFormatValue<T>(param));
|
||||
FailRecursive(msg, values, params...);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace duckdb
|
||||
350
external/duckdb/test/sqlite/sqllogic_test_logger.cpp
vendored
Normal file
350
external/duckdb/test/sqlite/sqllogic_test_logger.cpp
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
#include "sqllogic_test_logger.hpp"
|
||||
#include "duckdb/parser/parser.hpp"
|
||||
#include "termcolor.hpp"
|
||||
#include "result_helper.hpp"
|
||||
#include "sqllogic_test_runner.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
SQLLogicTestLogger::SQLLogicTestLogger(ExecuteContext &context, const Command &command)
|
||||
: log_lock(command.runner.log_lock), file_name(command.file_name), query_line(command.query_line),
|
||||
sql_query(context.sql_query) {
|
||||
}
|
||||
|
||||
SQLLogicTestLogger::~SQLLogicTestLogger() {
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::Log(const string &annotation, const string &str) {
|
||||
std::cerr << annotation << str;
|
||||
AppendFailure(str);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::AppendFailure(const string &log_message) {
|
||||
FailureSummary::Log(log_message);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::LogFailure(const string &log_message) {
|
||||
Log("", log_message);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::LogFailureAnnotation(const string &log_message) {
|
||||
const char *ci = std::getenv("CI");
|
||||
// check the value is "true" otherwise you'll see the prefix in local run outputs
|
||||
auto prefix = (ci && string(ci) == "true") ? "\n::error::" : "";
|
||||
Log(prefix, log_message);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintSummaryHeader(const std::string &file_name, idx_t query_line) {
|
||||
auto failures_count = to_string(FailureSummary::GetSummaryCounter());
|
||||
if (std::getenv("NO_DUPLICATING_HEADERS") == 0) {
|
||||
LogFailure("\n" + failures_count + ". " + file_name + ":" + to_string(query_line) + "\n");
|
||||
PrintLineSep();
|
||||
}
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintExpectedResult(const vector<string> &values, idx_t columns, bool row_wise) {
|
||||
if (row_wise) {
|
||||
for (idx_t r = 0; r < values.size(); r++) {
|
||||
LogFailure("\n" + values[r]);
|
||||
}
|
||||
} else {
|
||||
idx_t c = 0;
|
||||
for (idx_t r = 0; r < values.size(); r++) {
|
||||
if (c != 0) {
|
||||
LogFailure("\t");
|
||||
}
|
||||
LogFailure(values[r]);
|
||||
c++;
|
||||
if (c >= columns) {
|
||||
LogFailure("\n");
|
||||
c = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
LogFailure("\n");
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintLineSep() {
|
||||
string line_sep = string(80, '=');
|
||||
std::ostringstream oss;
|
||||
oss << termcolor::color<128, 128, 128> << line_sep << termcolor::reset << std::endl;
|
||||
LogFailure(oss.str());
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintHeader(string header) {
|
||||
std::ostringstream oss;
|
||||
oss << termcolor::bold << header << termcolor::reset << std::endl;
|
||||
LogFailure(oss.str());
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintFileHeader() {
|
||||
PrintHeader("File " + file_name + ":" + to_string(query_line) + ")");
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintSQL() {
|
||||
string query = sql_query;
|
||||
if (StringUtil::EndsWith(sql_query, "\n")) {
|
||||
// ends with a newline: don't add one
|
||||
if (!StringUtil::EndsWith(sql_query, ";\n")) {
|
||||
// no semicolon though
|
||||
query[query.size() - 1] = ';';
|
||||
}
|
||||
} else {
|
||||
if (!StringUtil::EndsWith(sql_query, ";")) {
|
||||
query += ";";
|
||||
}
|
||||
}
|
||||
Log("", query + "\n");
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintSQLFormatted() {
|
||||
std::cerr << termcolor::bold << "SQL Query" << termcolor::reset << std::endl;
|
||||
auto tokens = Parser::Tokenize(sql_query);
|
||||
for (idx_t i = 0; i < tokens.size(); i++) {
|
||||
auto &token = tokens[i];
|
||||
idx_t next = i + 1 < tokens.size() ? tokens[i + 1].start : sql_query.size();
|
||||
// adjust the highlighting based on the type
|
||||
switch (token.type) {
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_IDENTIFIER:
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_ERROR:
|
||||
break;
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_NUMERIC_CONSTANT:
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_STRING_CONSTANT:
|
||||
std::cerr << termcolor::yellow;
|
||||
break;
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_OPERATOR:
|
||||
break;
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_KEYWORD:
|
||||
std::cerr << termcolor::green << termcolor::bold;
|
||||
break;
|
||||
case SimplifiedTokenType::SIMPLIFIED_TOKEN_COMMENT:
|
||||
std::cerr << termcolor::grey;
|
||||
break;
|
||||
}
|
||||
// print the current token
|
||||
std::cerr << sql_query.substr(token.start, next - token.start);
|
||||
// reset and move to the next token
|
||||
std::cerr << termcolor::reset;
|
||||
}
|
||||
std::cerr << std::endl;
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintErrorHeader(const string &file_name, idx_t query_line, const string &description) {
|
||||
std::ostringstream oss;
|
||||
PrintSummaryHeader(file_name, query_line);
|
||||
oss << termcolor::red << termcolor::bold << description << " " << termcolor::reset;
|
||||
if (!file_name.empty()) {
|
||||
oss << termcolor::bold << "(" << file_name << ":" << query_line << ")!" << termcolor::reset;
|
||||
}
|
||||
LogFailureAnnotation(oss.str() + "\n");
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintErrorHeader(const string &description) {
|
||||
PrintErrorHeader(file_name, query_line, description);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintResultError(const vector<string> &result_values, const vector<string> &values,
|
||||
idx_t expected_column_count, bool row_wise) {
|
||||
PrintHeader("Expected result:");
|
||||
PrintLineSep();
|
||||
PrintExpectedResult(values, expected_column_count, row_wise);
|
||||
PrintLineSep();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintExpectedResult(result_values, expected_column_count, false);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintResultString(MaterializedQueryResult &result) {
|
||||
LogFailure(result.ToString());
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::PrintResultError(MaterializedQueryResult &result, const vector<string> &values,
|
||||
idx_t expected_column_count, bool row_wise) {
|
||||
PrintHeader("Expected result:");
|
||||
PrintLineSep();
|
||||
PrintExpectedResult(values, expected_column_count, row_wise);
|
||||
PrintLineSep();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::UnexpectedFailure(MaterializedQueryResult &result) {
|
||||
std::ostringstream oss;
|
||||
PrintErrorHeader("Query unexpectedly failed (" + file_name + ":" + to_string(query_line) + ")\n");
|
||||
LogFailure(oss.str());
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
void SQLLogicTestLogger::OutputResult(MaterializedQueryResult &result, const vector<string> &result_values_string) {
|
||||
// names
|
||||
for (idx_t c = 0; c < result.ColumnCount(); c++) {
|
||||
if (c != 0) {
|
||||
LogFailure("\t");
|
||||
}
|
||||
LogFailure(result.names[c]);
|
||||
}
|
||||
LogFailure("\n");
|
||||
// types
|
||||
for (idx_t c = 0; c < result.ColumnCount(); c++) {
|
||||
if (c != 0) {
|
||||
LogFailure("\t");
|
||||
}
|
||||
LogFailure(result.types[c].ToString());
|
||||
}
|
||||
LogFailure("\n");
|
||||
PrintLineSep();
|
||||
for (idx_t r = 0; r < result.RowCount(); r++) {
|
||||
for (idx_t c = 0; c < result.ColumnCount(); c++) {
|
||||
if (c != 0) {
|
||||
LogFailure("\t");
|
||||
}
|
||||
LogFailure(result_values_string[r * result.ColumnCount() + c]);
|
||||
}
|
||||
LogFailure("\n");
|
||||
}
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::OutputHash(const string &hash_value) {
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
LogFailure(hash_value + "\n");
|
||||
PrintLineSep();
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::ColumnCountMismatch(MaterializedQueryResult &result,
|
||||
const vector<string> &result_values_string, idx_t expected_column_count,
|
||||
bool row_wise) {
|
||||
std::ostringstream oss;
|
||||
PrintErrorHeader("Wrong column count in query!");
|
||||
oss << "Expected " << termcolor::bold << expected_column_count << termcolor::reset << " columns, but got "
|
||||
<< termcolor::bold << result.ColumnCount() << termcolor::reset << " columns" << std::endl;
|
||||
LogFailure(oss.str());
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintResultError(result, result_values_string, expected_column_count, row_wise);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::NotCleanlyDivisible(idx_t expected_column_count, idx_t actual_column_count) {
|
||||
PrintLineSep();
|
||||
PrintErrorHeader("Error in test!");
|
||||
PrintLineSep();
|
||||
LogFailure("Expected " + to_string(expected_column_count) + " columns, but " + to_string(actual_column_count) +
|
||||
" values were supplied\nThis is not cleanly divisible (i.e. the last row does not have enough values)");
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::WrongRowCount(idx_t expected_rows, MaterializedQueryResult &result,
|
||||
const vector<string> &comparison_values, idx_t expected_column_count,
|
||||
bool row_wise) {
|
||||
std::ostringstream oss;
|
||||
PrintErrorHeader("Wrong row count in query!");
|
||||
oss << "Expected " << termcolor::bold << expected_rows << termcolor::reset << " rows, but got " << termcolor::bold
|
||||
<< result.RowCount() << termcolor::reset << " rows" << std::endl;
|
||||
LogFailure(oss.str());
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintResultError(result, comparison_values, expected_column_count, row_wise);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::ColumnCountMismatchCorrectResult(idx_t original_expected_columns, idx_t expected_column_count,
|
||||
MaterializedQueryResult &result) {
|
||||
|
||||
std::ostringstream oss;
|
||||
PrintErrorHeader("Wrong column count in query!");
|
||||
oss << "Expected " << termcolor::bold << original_expected_columns << termcolor::reset << " columns, but got "
|
||||
<< termcolor::bold << expected_column_count << termcolor::reset << " columns" << std::endl;
|
||||
LogFailure(oss.str());
|
||||
oss.str("");
|
||||
oss.clear();
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
oss << "The expected result " << termcolor::bold << "matched" << termcolor::reset << " the query result."
|
||||
<< std::endl;
|
||||
LogFailure(oss.str());
|
||||
oss.str("");
|
||||
oss.clear();
|
||||
oss << termcolor::bold << "Suggested fix: modify header to \"" << termcolor::green << "query "
|
||||
<< string(result.ColumnCount(), 'I') << termcolor::reset << termcolor::bold << "\"" << termcolor::reset
|
||||
<< std::endl;
|
||||
LogFailure(oss.str());
|
||||
PrintLineSep();
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::SplitMismatch(idx_t row_number, idx_t expected_column_count, idx_t split_count) {
|
||||
|
||||
std::ostringstream oss;
|
||||
PrintLineSep();
|
||||
PrintErrorHeader("Error in test! Column count mismatch after splitting on tab on row " + to_string(row_number) +
|
||||
"!");
|
||||
oss << "Expected " << termcolor::bold << expected_column_count << termcolor::reset << " columns, but got "
|
||||
<< termcolor::bold << split_count << termcolor::reset << " columns" << std::endl;
|
||||
LogFailure(oss.str());
|
||||
LogFailure("Does the result contain tab values? In that case, place every value on a single row.\n");
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::WrongResultHash(QueryResult *expected_result, MaterializedQueryResult &result,
|
||||
const string &expected_hash, const string &actual_hash) {
|
||||
if (expected_result) {
|
||||
expected_result->Print();
|
||||
expected_result->ToString();
|
||||
} else {
|
||||
LogFailure("???\n");
|
||||
}
|
||||
PrintErrorHeader("Wrong result hash!");
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintHeader("Expected result:");
|
||||
PrintLineSep();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::UnexpectedStatement(bool expect_ok, MaterializedQueryResult &result) {
|
||||
PrintErrorHeader(!expect_ok ? "Query unexpectedly succeeded!" : "Query unexpectedly failed!");
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::ExpectedErrorMismatch(const string &expected_error, MaterializedQueryResult &result) {
|
||||
PrintErrorHeader("Query failed, but error message did not match expected error message: " + expected_error);
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintLineSep();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::InternalException(MaterializedQueryResult &result) {
|
||||
PrintErrorHeader("Query failed with internal exception!");
|
||||
PrintLineSep();
|
||||
PrintSQL();
|
||||
PrintHeader("Actual result:");
|
||||
PrintLineSep();
|
||||
PrintResultString(result);
|
||||
}
|
||||
|
||||
void SQLLogicTestLogger::LoadDatabaseFail(const string &file_name, const string &dbpath, const string &message) {
|
||||
PrintErrorHeader(file_name, 0, "Failed to load database " + dbpath);
|
||||
PrintLineSep();
|
||||
LogFailure("Error message: " + message + "\n");
|
||||
PrintLineSep();
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
68
external/duckdb/test/sqlite/sqllogic_test_logger.hpp
vendored
Normal file
68
external/duckdb/test/sqlite/sqllogic_test_logger.hpp
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// DuckDB
|
||||
//
|
||||
// sqllogic_test_logger.hpp
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "duckdb.hpp"
|
||||
#include "duckdb/common/mutex.hpp"
|
||||
#include "sqllogic_command.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
class Command;
|
||||
class LoopCommand;
|
||||
|
||||
class SQLLogicTestLogger {
|
||||
public:
|
||||
SQLLogicTestLogger(ExecuteContext &context, const Command &command);
|
||||
~SQLLogicTestLogger();
|
||||
|
||||
static void Log(const string &annotation, const string &str);
|
||||
void PrintExpectedResult(const vector<string> &values, idx_t columns, bool row_wise);
|
||||
static void PrintLineSep();
|
||||
static void PrintHeader(string header);
|
||||
void PrintFileHeader();
|
||||
void PrintSQL();
|
||||
void PrintSQLFormatted();
|
||||
void PrintErrorHeader(const string &description);
|
||||
static void PrintErrorHeader(const string &file_name, idx_t query_line, const string &description);
|
||||
void PrintResultError(const vector<string> &result_values, const vector<string> &values,
|
||||
idx_t expected_column_count, bool row_wise);
|
||||
static void PrintSummaryHeader(const std::string &file_name, idx_t query_line);
|
||||
void PrintResultError(MaterializedQueryResult &result, const vector<string> &values, idx_t expected_column_count,
|
||||
bool row_wise);
|
||||
void PrintResultString(MaterializedQueryResult &result);
|
||||
void UnexpectedFailure(MaterializedQueryResult &result);
|
||||
void OutputResult(MaterializedQueryResult &result, const vector<string> &result_values_string);
|
||||
void OutputHash(const string &hash_value);
|
||||
void ColumnCountMismatch(MaterializedQueryResult &result, const vector<string> &result_values_string,
|
||||
idx_t expected_column_count, bool row_wise);
|
||||
void NotCleanlyDivisible(idx_t expected_column_count, idx_t actual_column_count);
|
||||
void WrongRowCount(idx_t expected_rows, MaterializedQueryResult &result, const vector<string> &comparison_values,
|
||||
idx_t expected_column_count, bool row_wise);
|
||||
void ColumnCountMismatchCorrectResult(idx_t original_expected_columns, idx_t expected_column_count,
|
||||
MaterializedQueryResult &result);
|
||||
void SplitMismatch(idx_t row_number, idx_t expected_column_count, idx_t split_count);
|
||||
void WrongResultHash(QueryResult *expected_result, MaterializedQueryResult &result, const string &expected_hash,
|
||||
const string &actual_hash);
|
||||
void UnexpectedStatement(bool expect_ok, MaterializedQueryResult &result);
|
||||
void ExpectedErrorMismatch(const string &expected_error, MaterializedQueryResult &result);
|
||||
void InternalException(MaterializedQueryResult &result);
|
||||
static void LoadDatabaseFail(const string &file_name, const string &dbpath, const string &message);
|
||||
|
||||
static void AppendFailure(const string &log_message);
|
||||
static void LogFailure(const string &log_message);
|
||||
static void LogFailureAnnotation(const string &log_message);
|
||||
|
||||
private:
|
||||
lock_guard<mutex> log_lock;
|
||||
string file_name;
|
||||
int query_line;
|
||||
string sql_query;
|
||||
};
|
||||
} // namespace duckdb
|
||||
1237
external/duckdb/test/sqlite/sqllogic_test_runner.cpp
vendored
Normal file
1237
external/duckdb/test/sqlite/sqllogic_test_runner.cpp
vendored
Normal file
File diff suppressed because it is too large
Load Diff
106
external/duckdb/test/sqlite/sqllogic_test_runner.hpp
vendored
Normal file
106
external/duckdb/test/sqlite/sqllogic_test_runner.hpp
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// DuckDB
|
||||
//
|
||||
// sqllogic_test_runner.hpp
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "duckdb.hpp"
|
||||
#include "duckdb/common/mutex.hpp"
|
||||
#include "sqllogic_command.hpp"
|
||||
#include "test_config.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
class Command;
|
||||
class LoopCommand;
|
||||
class SQLLogicParser;
|
||||
|
||||
enum class RequireResult { PRESENT, MISSING };
|
||||
|
||||
struct CachedLabelData {
|
||||
public:
|
||||
CachedLabelData(const string &hash, unique_ptr<QueryResult> result) : hash(hash), result(std::move(result)) {
|
||||
}
|
||||
|
||||
public:
|
||||
string hash;
|
||||
unique_ptr<QueryResult> result;
|
||||
};
|
||||
|
||||
struct HashLabelMap {
|
||||
public:
|
||||
void WithLock(std::function<void(unordered_map<string, CachedLabelData> &map)> cb) {
|
||||
std::lock_guard<std::mutex> guard(lock);
|
||||
cb(map);
|
||||
}
|
||||
|
||||
public:
|
||||
std::mutex lock;
|
||||
unordered_map<string, CachedLabelData> map;
|
||||
};
|
||||
|
||||
class SQLLogicTestRunner {
|
||||
public:
|
||||
SQLLogicTestRunner(string dbpath);
|
||||
~SQLLogicTestRunner();
|
||||
|
||||
string file_name;
|
||||
string dbpath;
|
||||
vector<string> loaded_databases;
|
||||
duckdb::unique_ptr<DuckDB> db;
|
||||
duckdb::unique_ptr<Connection> con;
|
||||
duckdb::unique_ptr<DBConfig> config;
|
||||
unordered_set<string> extensions;
|
||||
unordered_map<string, duckdb::unique_ptr<Connection>> named_connection_map;
|
||||
bool output_hash_mode = false;
|
||||
bool output_result_mode = false;
|
||||
bool debug_mode = false;
|
||||
atomic<bool> finished_processing_file;
|
||||
int32_t hash_threshold = 0;
|
||||
vector<LoopCommand *> active_loops;
|
||||
duckdb::unique_ptr<Command> top_level_loop;
|
||||
bool original_sqlite_test = false;
|
||||
bool output_sql = false;
|
||||
bool enable_verification = false;
|
||||
bool skip_reload = false;
|
||||
unordered_map<string, string> environment_variables;
|
||||
string local_extension_repo;
|
||||
TestConfiguration::ExtensionAutoLoadingMode autoloading_mode;
|
||||
bool autoinstall_is_checked;
|
||||
|
||||
// If these error msgs occur in a test, the test will abort but still count as passed
|
||||
unordered_set<string> ignore_error_messages = {"HTTP", "Unable to connect"};
|
||||
// If these error msgs occur a statement that is expected to fail, the test will fail
|
||||
unordered_set<string> always_fail_error_messages = {"differs from original result!", "INTERNAL"};
|
||||
|
||||
//! The map converting the labels to the hash values
|
||||
HashLabelMap hash_label_map;
|
||||
mutex log_lock;
|
||||
|
||||
public:
|
||||
void ExecuteFile(string script);
|
||||
virtual void LoadDatabase(string dbpath, bool load_extensions);
|
||||
|
||||
string ReplaceKeywords(string input);
|
||||
|
||||
bool InLoop() {
|
||||
return !active_loops.empty();
|
||||
}
|
||||
void ExecuteCommand(unique_ptr<Command> command);
|
||||
void Reconnect();
|
||||
void StartLoop(LoopDefinition loop);
|
||||
void EndLoop();
|
||||
string ReplaceLoopIterator(string text, string loop_iterator_name, string replacement);
|
||||
string LoopReplacement(string text, const vector<LoopDefinition> &loops);
|
||||
bool ForEachTokenReplace(const string ¶meter, vector<string> &result);
|
||||
static ExtensionLoadResult LoadExtension(DuckDB &db, const std::string &extension);
|
||||
|
||||
private:
|
||||
RequireResult CheckRequire(SQLLogicParser &parser, const vector<string> ¶ms);
|
||||
};
|
||||
|
||||
} // namespace duckdb
|
||||
9
external/duckdb/test/sqlite/tags/tags-1-2-3.test_slow
vendored
Normal file
9
external/duckdb/test/sqlite/tags/tags-1-2-3.test_slow
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# name: test/sqlite/tags/tags-1-2-3.test_slow
|
||||
# group: [tags]
|
||||
|
||||
tags 1 2 3
|
||||
|
||||
require-env VALIDATE_TAGS
|
||||
|
||||
statement ok
|
||||
SELECT 'tagged 1 2 3';
|
||||
9
external/duckdb/test/sqlite/tags/tags-1-2.test
vendored
Normal file
9
external/duckdb/test/sqlite/tags/tags-1-2.test
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# name: test/sqlite/tags/tags-1-2.test
|
||||
# group: [tags]
|
||||
|
||||
tags 1 2
|
||||
|
||||
require-env VALIDATE_TAGS
|
||||
|
||||
statement ok
|
||||
SELECT 'tagged 1 2';
|
||||
9
external/duckdb/test/sqlite/tags/tags-1.test
vendored
Normal file
9
external/duckdb/test/sqlite/tags/tags-1.test
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# name: test/sqlite/tags/tags-1.test
|
||||
# group: [tags]
|
||||
|
||||
tags 1
|
||||
|
||||
require-env VALIDATE_TAGS
|
||||
|
||||
statement ok
|
||||
SELECT 'tagged 1';
|
||||
9
external/duckdb/test/sqlite/tags/tags-a.test
vendored
Normal file
9
external/duckdb/test/sqlite/tags/tags-a.test
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
# name: test/sqlite/tags/tags-a.test
|
||||
# group: [tags]
|
||||
|
||||
tags a
|
||||
|
||||
require-env VALIDATE_TAGS
|
||||
|
||||
statement ok
|
||||
SELECT 'tagged a';
|
||||
494
external/duckdb/test/sqlite/termcolor.hpp
vendored
Normal file
494
external/duckdb/test/sqlite/termcolor.hpp
vendored
Normal file
@@ -0,0 +1,494 @@
|
||||
//!
|
||||
//! termcolor
|
||||
//! ~~~~~~~~~
|
||||
//!
|
||||
//! termcolor is a header-only c++ library for printing colored messages
|
||||
//! to the terminal. Written just for fun with a help of the Force.
|
||||
//!
|
||||
//! :copyright: (c) 2013 by Ihor Kalnytskyi
|
||||
//! :license: BSD, see LICENSE for details
|
||||
//!
|
||||
|
||||
#ifndef TERMCOLOR_HPP_
|
||||
#define TERMCOLOR_HPP_
|
||||
|
||||
// the following snippet of code detects the current OS and
|
||||
// defines the appropriate macro that is used to wrap some
|
||||
// platform specific things
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
#define TERMCOLOR_OS_WINDOWS
|
||||
#elif defined(__APPLE__)
|
||||
#define TERMCOLOR_OS_MACOS
|
||||
#elif defined(__unix__) || defined(__unix) || defined(__MVS__)
|
||||
#define TERMCOLOR_OS_LINUX
|
||||
#else
|
||||
#error unsupported platform
|
||||
#endif
|
||||
|
||||
// This headers provides the `isatty()`/`fileno()` functions,
|
||||
// which are used for testing whether a standard stream refers
|
||||
// to the terminal. As for Windows, we also need WinApi funcs
|
||||
// for changing colors attributes of the terminal.
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
#include <unistd.h>
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#include <io.h>
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdio>
|
||||
|
||||
namespace termcolor {
|
||||
// Forward declaration of the `_internal` namespace.
|
||||
// All comments are below.
|
||||
namespace _internal {
|
||||
// An index to be used to access a private storage of I/O streams. See
|
||||
// colorize / nocolorize I/O manipulators for details.
|
||||
static int colorize_index = std::ios_base::xalloc();
|
||||
|
||||
inline FILE *get_standard_stream(const std::ostream &stream);
|
||||
inline bool is_colorized(std::ostream &stream);
|
||||
inline bool is_atty(const std::ostream &stream);
|
||||
|
||||
#if defined(TERMCOLOR_OS_WINDOWS)
|
||||
inline void win_change_attributes(std::ostream &stream, int foreground, int background = -1);
|
||||
#endif
|
||||
} // namespace _internal
|
||||
|
||||
inline std::ostream &colorize(std::ostream &stream) {
|
||||
stream.iword(_internal::colorize_index) = 1L;
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &nocolorize(std::ostream &stream) {
|
||||
stream.iword(_internal::colorize_index) = 0L;
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &reset(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[00m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, -1);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &bold(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[1m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &dark(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[2m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &italic(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[3m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &underline(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[4m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &blink(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[5m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &reverse(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[7m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &concealed(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[8m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &crossed(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[9m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
template <uint8_t code> inline std::ostream &color(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
char command[12];
|
||||
std::snprintf(command, sizeof(command), "\033[38;5;%dm", code);
|
||||
stream << command;
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
template <uint8_t code> inline std::ostream &on_color(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
char command[12];
|
||||
std::snprintf(command, sizeof(command), "\033[48;5;%dm", code);
|
||||
stream << command;
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
template <uint8_t r, uint8_t g, uint8_t b> inline std::ostream &color(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
char command[20];
|
||||
std::snprintf(command, sizeof(command), "\033[38;2;%d;%d;%dm", r, g, b);
|
||||
stream << command;
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
template <uint8_t r, uint8_t g, uint8_t b> inline std::ostream &on_color(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
char command[20];
|
||||
std::snprintf(command, sizeof(command), "\033[48;2;%d;%d;%dm", r, g, b);
|
||||
stream << command;
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &grey(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[30m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream,
|
||||
0 // grey (black)
|
||||
);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &red(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[31m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &green(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[32m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_GREEN);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &yellow(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[33m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_GREEN | FOREGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &blue(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[34m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_BLUE);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &magenta(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[35m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_BLUE | FOREGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &cyan(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[36m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_BLUE | FOREGROUND_GREEN);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &white(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[37m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_grey(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[40m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1,
|
||||
0 // grey (black)
|
||||
);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_red(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[41m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_green(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[42m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_GREEN);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_yellow(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[43m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_GREEN | BACKGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_blue(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[44m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_BLUE);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_magenta(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[45m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_BLUE | BACKGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_cyan(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[46m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_GREEN | BACKGROUND_BLUE);
|
||||
#endif
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
inline std::ostream &on_white(std::ostream &stream) {
|
||||
if (_internal::is_colorized(stream)) {
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
stream << "\033[47m";
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
_internal::win_change_attributes(stream, -1, BACKGROUND_GREEN | BACKGROUND_BLUE | BACKGROUND_RED);
|
||||
#endif
|
||||
}
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
//! Since C++ hasn't a way to hide something in the header from
|
||||
//! the outer access, I have to introduce this namespace which
|
||||
//! is used for internal purpose and should't be access from
|
||||
//! the user code.
|
||||
namespace _internal {
|
||||
//! Since C++ hasn't a true way to extract stream handler
|
||||
//! from the a given `std::ostream` object, I have to write
|
||||
//! this kind of hack.
|
||||
inline FILE *get_standard_stream(const std::ostream &stream) {
|
||||
if (&stream == &std::cout)
|
||||
return stdout;
|
||||
else if ((&stream == &std::cerr) || (&stream == &std::clog))
|
||||
return stderr;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Say whether a given stream should be colorized or not. It's always
|
||||
// true for ATTY streams and may be true for streams marked with
|
||||
// colorize flag.
|
||||
inline bool is_colorized(std::ostream &stream) {
|
||||
return is_atty(stream) || static_cast<bool>(stream.iword(colorize_index));
|
||||
}
|
||||
|
||||
//! Test whether a given `std::ostream` object refers to
|
||||
//! a terminal.
|
||||
inline bool is_atty(const std::ostream &stream) {
|
||||
FILE *std_stream = get_standard_stream(stream);
|
||||
|
||||
// Unfortunately, fileno() ends with segmentation fault
|
||||
// if invalid file descriptor is passed. So we need to
|
||||
// handle this case gracefully and assume it's not a tty
|
||||
// if standard stream is not detected, and 0 is returned.
|
||||
if (!std_stream)
|
||||
return false;
|
||||
|
||||
#if defined(TERMCOLOR_OS_MACOS) || defined(TERMCOLOR_OS_LINUX)
|
||||
return ::isatty(fileno(std_stream));
|
||||
#elif defined(TERMCOLOR_OS_WINDOWS)
|
||||
return ::_isatty(_fileno(std_stream));
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(TERMCOLOR_OS_WINDOWS)
|
||||
//! Change Windows Terminal colors attribute. If some
|
||||
//! parameter is `-1` then attribute won't changed.
|
||||
inline void win_change_attributes(std::ostream &stream, int foreground, int background) {
|
||||
// yeah, i know.. it's ugly, it's windows.
|
||||
static WORD defaultAttributes = 0;
|
||||
|
||||
// Windows doesn't have ANSI escape sequences and so we use special
|
||||
// API to change Terminal output color. That means we can't
|
||||
// manipulate colors by means of "std::stringstream" and hence
|
||||
// should do nothing in this case.
|
||||
if (!_internal::is_atty(stream))
|
||||
return;
|
||||
|
||||
// get terminal handle
|
||||
HANDLE hTerminal = INVALID_HANDLE_VALUE;
|
||||
if (&stream == &std::cout)
|
||||
hTerminal = GetStdHandle(STD_OUTPUT_HANDLE);
|
||||
else if (&stream == &std::cerr)
|
||||
hTerminal = GetStdHandle(STD_ERROR_HANDLE);
|
||||
|
||||
// save default terminal attributes if it unsaved
|
||||
if (!defaultAttributes) {
|
||||
CONSOLE_SCREEN_BUFFER_INFO info;
|
||||
if (!GetConsoleScreenBufferInfo(hTerminal, &info))
|
||||
return;
|
||||
defaultAttributes = info.wAttributes;
|
||||
}
|
||||
|
||||
// restore all default settings
|
||||
if (foreground == -1 && background == -1) {
|
||||
SetConsoleTextAttribute(hTerminal, defaultAttributes);
|
||||
return;
|
||||
}
|
||||
|
||||
// get current settings
|
||||
CONSOLE_SCREEN_BUFFER_INFO info;
|
||||
if (!GetConsoleScreenBufferInfo(hTerminal, &info))
|
||||
return;
|
||||
|
||||
if (foreground != -1) {
|
||||
info.wAttributes &= ~(info.wAttributes & 0x0F);
|
||||
info.wAttributes |= static_cast<WORD>(foreground);
|
||||
}
|
||||
|
||||
if (background != -1) {
|
||||
info.wAttributes &= ~(info.wAttributes & 0xF0);
|
||||
info.wAttributes |= static_cast<WORD>(background);
|
||||
}
|
||||
|
||||
SetConsoleTextAttribute(hTerminal, info.wAttributes);
|
||||
}
|
||||
#endif // TERMCOLOR_OS_WINDOWS
|
||||
|
||||
} // namespace _internal
|
||||
|
||||
} // namespace termcolor
|
||||
|
||||
#undef TERMCOLOR_OS_WINDOWS
|
||||
#undef TERMCOLOR_OS_MACOS
|
||||
#undef TERMCOLOR_OS_LINUX
|
||||
|
||||
#endif // TERMCOLOR_HPP_
|
||||
267
external/duckdb/test/sqlite/test_sqllogictest.cpp
vendored
Normal file
267
external/duckdb/test/sqlite/test_sqllogictest.cpp
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
#include "catch.hpp"
|
||||
#include "duckdb.hpp"
|
||||
#include "duckdb/common/string_util.hpp"
|
||||
#include "duckdb/main/extension/generated_extension_loader.hpp"
|
||||
#include "duckdb/parser/parser.hpp"
|
||||
#include "sqllogic_test_runner.hpp"
|
||||
#include "test_helpers.hpp"
|
||||
#include "test_config.hpp"
|
||||
|
||||
#include <functional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using namespace duckdb;
|
||||
using namespace std;
|
||||
|
||||
// code below traverses the test directory and makes individual test cases out
|
||||
// of each script
|
||||
static void listFiles(FileSystem &fs, const string &path, std::function<void(const string &)> cb) {
|
||||
fs.ListFiles(path, [&](string fname, bool is_dir) {
|
||||
string full_path = fs.JoinPath(path, fname);
|
||||
if (is_dir) {
|
||||
// recurse into directory
|
||||
listFiles(fs, full_path, cb);
|
||||
} else {
|
||||
cb(full_path);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static bool endsWith(const string &mainStr, const string &toMatch) {
|
||||
return (mainStr.size() >= toMatch.size() &&
|
||||
mainStr.compare(mainStr.size() - toMatch.size(), toMatch.size(), toMatch) == 0);
|
||||
}
|
||||
|
||||
template <bool VERIFICATION, bool AUTO_SWITCH_TEST_DIR = false>
|
||||
static void testRunner() {
|
||||
// this is an ugly hack that uses the test case name to pass the script file
|
||||
// name if someone has a better idea...
|
||||
auto name = Catch::getResultCapture().getCurrentTestName();
|
||||
|
||||
auto &test_config = TestConfiguration::Get();
|
||||
|
||||
string initial_dbpath = test_config.GetInitialDBPath();
|
||||
test_config.ProcessPath(initial_dbpath, name);
|
||||
if (!initial_dbpath.empty()) {
|
||||
auto test_path = StringUtil::Replace(initial_dbpath, TestDirectoryPath(), string());
|
||||
test_path = StringUtil::Replace(test_path, "\\", "/");
|
||||
auto components = StringUtil::Split(test_path, "/");
|
||||
components.pop_back();
|
||||
string total_path = TestDirectoryPath();
|
||||
for (auto &component : components) {
|
||||
if (component.empty()) {
|
||||
continue;
|
||||
}
|
||||
total_path = TestJoinPath(total_path, component);
|
||||
TestCreateDirectory(total_path);
|
||||
}
|
||||
}
|
||||
SQLLogicTestRunner runner(std::move(initial_dbpath));
|
||||
runner.output_sql = Catch::getCurrentContext().getConfig()->outputSQL();
|
||||
runner.enable_verification = VERIFICATION;
|
||||
|
||||
// Copy configured env vars
|
||||
for (auto &kv : test_config.GetTestEnvMap()) {
|
||||
runner.environment_variables[kv.first] = kv.second;
|
||||
}
|
||||
|
||||
string prev_directory;
|
||||
|
||||
// We assume the test working dir for extensions to be one dir above the test/sql. Note that this is very hacky.
|
||||
// however for now it suffices: we use it to run tests from out-of-tree extensions that are based on the extension
|
||||
// template which adheres to this convention.
|
||||
if (AUTO_SWITCH_TEST_DIR) {
|
||||
prev_directory = TestGetCurrentDirectory();
|
||||
|
||||
std::size_t found = name.rfind("test/sql");
|
||||
if (found == std::string::npos) {
|
||||
throw InvalidInputException("Failed to auto detect working dir for test '" + name +
|
||||
"' because a non-standard path was used!");
|
||||
}
|
||||
auto test_working_dir = name.substr(0, found);
|
||||
|
||||
// Parse the test dir automatically
|
||||
TestChangeDirectory(test_working_dir);
|
||||
}
|
||||
try {
|
||||
runner.ExecuteFile(name);
|
||||
} catch (...) {
|
||||
// This is to allow cleanup to be executed, failure is already logged
|
||||
}
|
||||
|
||||
if (AUTO_SWITCH_TEST_DIR) {
|
||||
TestChangeDirectory(prev_directory);
|
||||
}
|
||||
|
||||
auto on_cleanup = test_config.OnCleanupCommand();
|
||||
if (!on_cleanup.empty()) {
|
||||
// perform clean-up if any is defined
|
||||
try {
|
||||
if (!runner.con) {
|
||||
runner.Reconnect();
|
||||
}
|
||||
auto res = runner.con->Query(on_cleanup);
|
||||
if (res->HasError()) {
|
||||
res->GetErrorObject().Throw();
|
||||
}
|
||||
} catch (std::exception &ex) {
|
||||
string cleanup_failure = "Error while running clean-up routine:\n";
|
||||
ErrorData error(ex);
|
||||
cleanup_failure += error.Message();
|
||||
FAIL(cleanup_failure);
|
||||
}
|
||||
}
|
||||
|
||||
// clear test directory after running tests
|
||||
ClearTestDirectory();
|
||||
}
|
||||
|
||||
static string ParseGroupFromPath(string file) {
|
||||
string extension = "";
|
||||
if (file.find(".test_slow") != std::string::npos) {
|
||||
// "slow" in the name indicates a slow test (i.e. only run as part of allunit)
|
||||
extension = "[.]";
|
||||
}
|
||||
if (file.find(".test_coverage") != std::string::npos) {
|
||||
// "coverage" in the name indicates a coverage test (i.e. only run as part of coverage)
|
||||
return "[coverage][.]";
|
||||
}
|
||||
// move backwards to the last slash
|
||||
int group_begin = -1, group_end = -1;
|
||||
for (idx_t i = file.size(); i > 0; i--) {
|
||||
if (file[i - 1] == '/' || file[i - 1] == '\\') {
|
||||
if (group_end == -1) {
|
||||
group_end = i - 1;
|
||||
} else {
|
||||
group_begin = i;
|
||||
return "[" + file.substr(group_begin, group_end - group_begin) + "]" + extension;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (group_end == -1) {
|
||||
return "[" + file + "]" + extension;
|
||||
}
|
||||
return "[" + file.substr(0, group_end) + "]" + extension;
|
||||
}
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
void RegisterSqllogictests() {
|
||||
vector<string> enable_verification_excludes = {
|
||||
// too slow for verification
|
||||
"test/select5.test",
|
||||
"test/index",
|
||||
// optimization masks int32 overflow
|
||||
"test/random/aggregates/slt_good_102.test",
|
||||
"test/random/aggregates/slt_good_11.test",
|
||||
"test/random/aggregates/slt_good_115.test",
|
||||
"test/random/aggregates/slt_good_116.test",
|
||||
"test/random/aggregates/slt_good_118.test",
|
||||
"test/random/aggregates/slt_good_119.test",
|
||||
"test/random/aggregates/slt_good_122.test",
|
||||
"test/random/aggregates/slt_good_17.test",
|
||||
"test/random/aggregates/slt_good_20.test",
|
||||
"test/random/aggregates/slt_good_23.test",
|
||||
"test/random/aggregates/slt_good_25.test",
|
||||
"test/random/aggregates/slt_good_3.test",
|
||||
"test/random/aggregates/slt_good_30.test",
|
||||
"test/random/aggregates/slt_good_31.test",
|
||||
"test/random/aggregates/slt_good_38.test",
|
||||
"test/random/aggregates/slt_good_39.test",
|
||||
"test/random/aggregates/slt_good_4.test",
|
||||
"test/random/aggregates/slt_good_43.test",
|
||||
"test/random/aggregates/slt_good_46.test",
|
||||
"test/random/aggregates/slt_good_51.test",
|
||||
"test/random/aggregates/slt_good_56.test",
|
||||
"test/random/aggregates/slt_good_66.test",
|
||||
"test/random/aggregates/slt_good_7.test",
|
||||
"test/random/aggregates/slt_good_72.test",
|
||||
"test/random/aggregates/slt_good_82.test",
|
||||
"test/random/aggregates/slt_good_84.test",
|
||||
"test/random/aggregates/slt_good_85.test",
|
||||
"test/random/aggregates/slt_good_91.test",
|
||||
"test/random/expr/slt_good_15.test",
|
||||
"test/random/expr/slt_good_66.test",
|
||||
"test/random/expr/slt_good_91.test",
|
||||
};
|
||||
vector<string> excludes = {
|
||||
// tested separately
|
||||
"test/select1.test", "test/select2.test", "test/select3.test", "test/select4.test",
|
||||
// feature not supported
|
||||
"evidence/slt_lang_replace.test", // INSERT OR REPLACE
|
||||
"evidence/slt_lang_reindex.test", // REINDEX
|
||||
"evidence/slt_lang_update.test", // Multiple assignments to same column "x" in update
|
||||
"evidence/slt_lang_createtrigger.test", // TRIGGER
|
||||
"evidence/slt_lang_droptrigger.test", // TRIGGER
|
||||
// no + for varchar columns
|
||||
"test/index/random/10/slt_good_14.test", "test/index/random/10/slt_good_1.test",
|
||||
"test/index/random/10/slt_good_0.test", "test/index/random/10/slt_good_12.test",
|
||||
"test/index/random/10/slt_good_6.test", "test/index/random/10/slt_good_13.test",
|
||||
"test/index/random/10/slt_good_5.test", "test/index/random/10/slt_good_10.test",
|
||||
"test/index/random/10/slt_good_11.test", "test/index/random/10/slt_good_4.test",
|
||||
"test/index/random/10/slt_good_8.test", "test/index/random/10/slt_good_3.test",
|
||||
"test/index/random/10/slt_good_2.test", "test/index/random/100/slt_good_1.test",
|
||||
"test/index/random/100/slt_good_0.test", "test/index/random/1000/slt_good_0.test",
|
||||
"test/index/random/1000/slt_good_7.test", "test/index/random/1000/slt_good_6.test",
|
||||
"test/index/random/1000/slt_good_5.test", "test/index/random/1000/slt_good_8.test",
|
||||
// overflow in 32-bit integer multiplication (sqlite does automatic upcasting)
|
||||
"test/random/aggregates/slt_good_96.test", "test/random/aggregates/slt_good_75.test",
|
||||
"test/random/aggregates/slt_good_64.test", "test/random/aggregates/slt_good_9.test",
|
||||
"test/random/aggregates/slt_good_110.test", "test/random/aggregates/slt_good_101.test",
|
||||
"test/random/expr/slt_good_55.test", "test/random/expr/slt_good_115.test", "test/random/expr/slt_good_103.test",
|
||||
"test/random/expr/slt_good_80.test", "test/random/expr/slt_good_75.test", "test/random/expr/slt_good_42.test",
|
||||
"test/random/expr/slt_good_49.test", "test/random/expr/slt_good_24.test", "test/random/expr/slt_good_30.test",
|
||||
"test/random/expr/slt_good_8.test", "test/random/expr/slt_good_61.test",
|
||||
// dependencies between tables/views prevent dropping in DuckDB without CASCADE
|
||||
"test/index/view/1000/slt_good_0.test", "test/index/view/100/slt_good_0.test",
|
||||
"test/index/view/100/slt_good_5.test", "test/index/view/100/slt_good_1.test",
|
||||
"test/index/view/100/slt_good_3.test", "test/index/view/100/slt_good_4.test",
|
||||
"test/index/view/100/slt_good_2.test", "test/index/view/10000/slt_good_0.test",
|
||||
"test/index/view/10/slt_good_5.test", "test/index/view/10/slt_good_7.test",
|
||||
"test/index/view/10/slt_good_1.test", "test/index/view/10/slt_good_3.test",
|
||||
"test/index/view/10/slt_good_4.test", "test/index/view/10/slt_good_6.test",
|
||||
"test/index/view/10/slt_good_2.test",
|
||||
// strange error in hash comparison, results appear correct...
|
||||
"test/index/random/10/slt_good_7.test", "test/index/random/10/slt_good_9.test"};
|
||||
duckdb::unique_ptr<FileSystem> fs = FileSystem::CreateLocal();
|
||||
listFiles(*fs, fs->JoinPath(fs->JoinPath("third_party", "sqllogictest"), "test"), [&](const string &path) {
|
||||
if (endsWith(path, ".test")) {
|
||||
for (auto &excl : excludes) {
|
||||
if (path.find(excl) != string::npos) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
bool enable_verification = true;
|
||||
for (auto &excl : enable_verification_excludes) {
|
||||
if (path.find(excl) != string::npos) {
|
||||
enable_verification = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (enable_verification) {
|
||||
REGISTER_TEST_CASE(testRunner<true>, StringUtil::Replace(path, "\\", "/"), "[sqlitelogic][.]");
|
||||
} else {
|
||||
REGISTER_TEST_CASE(testRunner<false>, StringUtil::Replace(path, "\\", "/"), "[sqlitelogic][.]");
|
||||
}
|
||||
}
|
||||
});
|
||||
listFiles(*fs, "test", [&](const string &path) {
|
||||
if (endsWith(path, ".test") || endsWith(path, ".test_slow") || endsWith(path, ".test_coverage")) {
|
||||
// parse the name / group from the test
|
||||
REGISTER_TEST_CASE(testRunner<false>, StringUtil::Replace(path, "\\", "/"), ParseGroupFromPath(path));
|
||||
}
|
||||
});
|
||||
|
||||
#if defined(GENERATED_EXTENSION_HEADERS) && GENERATED_EXTENSION_HEADERS && !defined(DUCKDB_AMALGAMATION)
|
||||
for (const auto &extension_test_path : LoadedExtensionTestPaths()) {
|
||||
listFiles(*fs, extension_test_path, [&](const string &path) {
|
||||
if (endsWith(path, ".test") || endsWith(path, ".test_slow") || endsWith(path, ".test_coverage")) {
|
||||
auto fun = testRunner<false, true>;
|
||||
REGISTER_TEST_CASE(fun, StringUtil::Replace(path, "\\", "/"), ParseGroupFromPath(path));
|
||||
}
|
||||
});
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} // namespace duckdb
|
||||
111
external/duckdb/test/sqlite/validate_tags_usage.sh
vendored
Executable file
111
external/duckdb/test/sqlite/validate_tags_usage.sh
vendored
Executable file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
##
|
||||
# assumes $SCRIPT_DIR/../../build/debug/test/unittest to be ready to run
|
||||
#
|
||||
|
||||
ROOT=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." &>/dev/null && pwd)
|
||||
cd "$ROOT"
|
||||
|
||||
: ${UNITTEST:="build/debug/test/unittest --output-sql=true"}
|
||||
: ${TESTS_SPEC:='test/sqlite/tags/*'}
|
||||
export VALIDATE_TAGS=1
|
||||
|
||||
run() {
|
||||
$UNITTEST "$@" "$TESTS_SPEC" 2>&1 | grep "SELECT 'tagged"
|
||||
}
|
||||
|
||||
expect() {
|
||||
output=$(cat)
|
||||
local errs=0
|
||||
if [[ "$#" -eq 0 ]]; then
|
||||
[[ $(echo -n "$output" | wc -l) -eq 0 ]] && {
|
||||
echo -n "✅ - ok"
|
||||
} || {
|
||||
printf "\n ❌ - error - matches found but none expected:\n%s" "$output"
|
||||
}
|
||||
else
|
||||
for elt in "$@"; do
|
||||
echo -n "$output" | grep -q "'tagged $elt'" || {
|
||||
printf "\n ❌ - error - missing %s" "$elt"
|
||||
errs=$(($errs + 1))
|
||||
}
|
||||
done
|
||||
[[ $errs -eq 0 ]] && {
|
||||
echo -n " ✅ - ok"
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
test() {
|
||||
local args="$1"
|
||||
shift
|
||||
echo -n "test $args -- "
|
||||
run $args | expect "$@"
|
||||
echo
|
||||
}
|
||||
|
||||
test_select() {
|
||||
# select tags
|
||||
test "--select-tag 1" "1" "1 2" "1 2 3"
|
||||
test "--select-tag-set ['1']" "1" "1 2" "1 2 3"
|
||||
|
||||
test "--select-tag 2" "1 2" "1 2 3"
|
||||
test "--select-tag-set ['2']" "1 2" "1 2 3"
|
||||
|
||||
test "--select-tag 3" "1 2 3"
|
||||
test "--select-tag-set ['3']" "1 2 3"
|
||||
|
||||
test "--select-tag-set ['1','3']" "1 2 3"
|
||||
test "--select-tag-set ['2','3']" "1 2 3"
|
||||
test "--select-tag-set ['1','2']" "1 2" "1 2 3"
|
||||
test "--select-tag-set ['1','2','3']" "1 2 3"
|
||||
test "--select-tag-set ['1','2','3','4']"
|
||||
}
|
||||
|
||||
test_skip() {
|
||||
# skip tags
|
||||
test "--skip-tag 1"
|
||||
test "--skip-tag 1"
|
||||
|
||||
test "--skip-tag 1"
|
||||
test "--skip-tag-set ['1']"
|
||||
|
||||
test "--skip-tag 2" "1"
|
||||
test "--skip-tag-set ['2']" "1"
|
||||
|
||||
test "--skip-tag 3" "1" "1 2"
|
||||
test "--skip-tag-set ['3']" "1" "1 2"
|
||||
|
||||
test "--skip-tag-set ['1','3']" "1" "1 2"
|
||||
test "--skip-tag-set ['2','3']" "1" "1 2"
|
||||
test "--skip-tag-set ['1','2']" "1"
|
||||
test "--skip-tag-set ['1','2','3']" "1" "1 2"
|
||||
test "--skip-tag-set ['1','2','3','a']" "1" "1 2" "1 2 3" "a"
|
||||
}
|
||||
|
||||
test_combo() {
|
||||
# crossover
|
||||
test "--select-tag 1 --skip-tag 2" "1"
|
||||
test "--skip-tag-set ['1','2','3'] --select-tag 2" "1 2"
|
||||
test "--select-tag 1 --skip-tag 1"
|
||||
test "--select-tag noexist --skip-tag 1"
|
||||
test "--select-tag 3 --skip-tag noexist" "1 2 3"
|
||||
|
||||
# confirm BNF behavior
|
||||
test "--select-tag 3 --select-tag a" "1 2 3" "a"
|
||||
test "--skip-tag 3 --skip-tag a" "1" "1 2"
|
||||
}
|
||||
|
||||
test_implicit_env() {
|
||||
test "--select-tag env[VALIDATE_TAGS]" "1" "1" "1 2" "1 2 3" "a"
|
||||
test "--select-tag env[VALIDATE_TAGS]=0"
|
||||
# NOTE: =1 not set because it's a require, not a test-env
|
||||
test "--select-tag env[VALIDATE_TAGS]=1"
|
||||
}
|
||||
|
||||
test "" "1" "1 2" "1 2 3"
|
||||
test_select
|
||||
test_skip
|
||||
test_combo
|
||||
test_implicit_env
|
||||
Reference in New Issue
Block a user