should be it

This commit is contained in:
2025-10-24 19:21:19 -05:00
parent a4b23fc57c
commit f09560c7b1
14047 changed files with 3161551 additions and 1 deletions

55
external/duckdb/test/api/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,55 @@
if(NOT CLANG_TIDY)
add_subdirectory(adbc)
endif()
if(${DISABLE_THREADS})
add_definitions(-DDUCKDB_NO_THREADS)
endif()
add_subdirectory(capi)
add_subdirectory(udf_function)
set(TEST_API_OBJECTS
test_api.cpp
test_config.cpp
test_custom_allocator.cpp
test_extension_setting_autoload.cpp
test_instance_cache.cpp
test_results.cpp
test_reset.cpp
test_get_table_names.cpp
test_prepared_api.cpp
test_table_info.cpp
test_appender_api.cpp
test_lifecycle_hooks.cpp
test_pending_query.cpp
test_plan_serialization.cpp
test_relation_api.cpp
test_query_profiler.cpp
test_dbdir.cpp
test_pending_with_parameters.cpp
test_progress_bar.cpp
test_uuid.cpp
test_insertion_order_preserving_map.cpp
test_bignum.cpp
test_threads.cpp
test_windows_header_compatibility.cpp
test_windows_unicode_path.cpp
test_object_cache.cpp)
if(NOT WIN32)
set(TEST_API_OBJECTS ${TEST_API_OBJECTS} test_read_only.cpp)
endif()
if(DUCKDB_EXTENSION_TPCH_SHOULD_LINK)
include_directories(../../extension/tpch/include)
set(TEST_API_OBJECTS
${TEST_API_OBJECTS} test_tpch_with_relations.cpp
test_tpch_with_streaming.cpp
serialized_plans/test_plan_serialization_bwc.cpp)
endif()
add_library(test_api OBJECT ${TEST_API_OBJECTS})
set(ALL_OBJECT_FILES
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_api>
PARENT_SCOPE)

View File

@@ -0,0 +1,5 @@
add_library_unity(test_sql_adbc OBJECT test_adbc.cpp)
set(ALL_OBJECT_FILES
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_sql_adbc>
PARENT_SCOPE)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
add_library_unity(
test_sql_capi
OBJECT
capi_aggregate_functions.cpp
capi_custom_type.cpp
capi_file_system.cpp
capi_scalar_functions.cpp
capi_table_functions.cpp
test_capi.cpp
test_capi_any_invalid_type.cpp
test_capi_append_data_chunk.cpp
test_starting_database.cpp
test_capi_appender.cpp
test_capi_arrow.cpp
test_capi_data_chunk.cpp
test_capi_extract.cpp
test_capi_instance_cache.cpp
test_capi_pending.cpp
test_capi_prepared.cpp
test_capi_profiling.cpp
test_capi_website.cpp
test_capi_complex_types.cpp
test_capi_to_decimal.cpp
test_capi_values.cpp
test_capi_vector.cpp
test_capi_replacement_scan.cpp
test_capi_streaming.cpp
test_capi_table_description.cpp
test_without_disabled_functions.cpp)
set(ALL_OBJECT_FILES
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_sql_capi>
PARENT_SCOPE)

View File

@@ -0,0 +1,440 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
struct WeightedSumState {
int64_t sum;
uint64_t count;
};
idx_t WeightedSumSize(duckdb_function_info info) {
return sizeof(WeightedSumState);
}
void WeightedSumInit(duckdb_function_info info, duckdb_aggregate_state state_p) {
auto state = reinterpret_cast<WeightedSumState *>(state_p);
state->sum = 0;
state->count = 0;
}
void WeightedSumUpdate(duckdb_function_info info, duckdb_data_chunk input, duckdb_aggregate_state *states) {
auto state = reinterpret_cast<WeightedSumState **>(states);
auto row_count = duckdb_data_chunk_get_size(input);
auto input_vector = duckdb_data_chunk_get_vector(input, 0);
auto input_data = static_cast<int64_t *>(duckdb_vector_get_data(input_vector));
auto input_validity = duckdb_vector_get_validity(input_vector);
if (duckdb_data_chunk_get_column_count(input) == 1) {
// single argument
for (idx_t i = 0; i < row_count; i++) {
if (duckdb_validity_row_is_valid(input_validity, i)) {
state[i]->sum += input_data[i];
state[i]->count++;
}
}
} else {
// two arguments
auto weight_vector = duckdb_data_chunk_get_vector(input, 1);
auto weight_data = static_cast<int64_t *>(duckdb_vector_get_data(weight_vector));
auto weight_validity = duckdb_vector_get_validity(weight_vector);
for (idx_t i = 0; i < row_count; i++) {
if (duckdb_validity_row_is_valid(input_validity, i) && duckdb_validity_row_is_valid(weight_validity, i)) {
state[i]->sum += input_data[i] * weight_data[i];
state[i]->count++;
}
}
}
}
void WeightedSumCombine(duckdb_function_info info, duckdb_aggregate_state *source_p, duckdb_aggregate_state *target_p,
idx_t count) {
auto source = reinterpret_cast<WeightedSumState **>(source_p);
auto target = reinterpret_cast<WeightedSumState **>(target_p);
for (idx_t i = 0; i < count; i++) {
target[i]->sum += source[i]->sum;
target[i]->count += source[i]->count;
}
}
void WeightedSumFinalize(duckdb_function_info info, duckdb_aggregate_state *source_p, duckdb_vector result, idx_t count,
idx_t offset) {
auto source = reinterpret_cast<WeightedSumState **>(source_p);
auto result_data = static_cast<int64_t *>(duckdb_vector_get_data(result));
duckdb_vector_ensure_validity_writable(result);
auto result_validity = duckdb_vector_get_validity(result);
for (idx_t i = 0; i < count; i++) {
if (source[i]->count == 0) {
duckdb_validity_set_row_invalid(result_validity, offset + i);
} else {
result_data[offset + i] = source[i]->sum;
}
}
}
static duckdb_aggregate_function CAPIGetAggregateFunction(duckdb_connection connection, const char *name,
idx_t parameter_count = 2) {
// create an aggregate function
auto function = duckdb_create_aggregate_function();
duckdb_aggregate_function_set_name(nullptr, name);
duckdb_aggregate_function_set_name(function, nullptr);
duckdb_aggregate_function_set_name(function, name);
duckdb_aggregate_function_set_name(function, name);
// add a two bigint parameters
auto type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_aggregate_function_add_parameter(nullptr, type);
duckdb_aggregate_function_add_parameter(function, nullptr);
for (idx_t idx = 0; idx < parameter_count; idx++) {
duckdb_aggregate_function_add_parameter(function, type);
}
// set the return type to bigint
duckdb_aggregate_function_set_return_type(nullptr, type);
duckdb_aggregate_function_set_return_type(function, nullptr);
duckdb_aggregate_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
// set up the function
duckdb_aggregate_function_set_functions(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
duckdb_aggregate_function_set_functions(function, nullptr, nullptr, nullptr, nullptr, nullptr);
duckdb_aggregate_function_set_functions(function, WeightedSumSize, WeightedSumInit, WeightedSumUpdate,
WeightedSumCombine, WeightedSumFinalize);
return function;
}
static void CAPIRegisterWeightedSum(duckdb_connection connection, const char *name, duckdb_state expected_outcome) {
duckdb_state status;
// create an aggregate function
auto function = CAPIGetAggregateFunction(connection, name);
// register and cleanup
status = duckdb_register_aggregate_function(connection, function);
REQUIRE(status == expected_outcome);
duckdb_destroy_aggregate_function(&function);
duckdb_destroy_aggregate_function(&function);
duckdb_destroy_aggregate_function(nullptr);
}
struct CAPICallbacks {
duckdb_aggregate_state_size state_size;
duckdb_aggregate_init_t init;
duckdb_aggregate_update_t update;
duckdb_aggregate_combine_t combine;
duckdb_aggregate_finalize_t finalize;
};
idx_t CallbackSize(duckdb_function_info info) {
auto callbacks = (CAPICallbacks *)duckdb_aggregate_function_get_extra_info(info);
return callbacks->state_size(info);
}
void CallbackInit(duckdb_function_info info, duckdb_aggregate_state state_p) {
auto callbacks = (CAPICallbacks *)duckdb_aggregate_function_get_extra_info(info);
callbacks->init(info, state_p);
}
void CallbackUpdate(duckdb_function_info info, duckdb_data_chunk input, duckdb_aggregate_state *states) {
auto callbacks = (CAPICallbacks *)duckdb_aggregate_function_get_extra_info(info);
callbacks->update(info, input, states);
}
void CallbackCombine(duckdb_function_info info, duckdb_aggregate_state *source_p, duckdb_aggregate_state *target_p,
idx_t count) {
auto callbacks = (CAPICallbacks *)duckdb_aggregate_function_get_extra_info(info);
callbacks->combine(info, source_p, target_p, count);
}
void CallbackFinalize(duckdb_function_info info, duckdb_aggregate_state *source_p, duckdb_vector result, idx_t count,
idx_t offset) {
auto callbacks = (CAPICallbacks *)duckdb_aggregate_function_get_extra_info(info);
callbacks->finalize(info, source_p, result, count, offset);
}
static void CAPIRegisterWeightedSumExtraInfo(duckdb_connection connection, const char *name,
duckdb_state expected_outcome) {
duckdb_state status;
// create an aggregate function
auto function = duckdb_create_aggregate_function();
duckdb_aggregate_function_set_name(function, name);
// add a two bigint parameters
auto type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_aggregate_function_add_parameter(function, type);
duckdb_aggregate_function_add_parameter(function, type);
// set the return type to bigint
duckdb_aggregate_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
auto callback_ptr = malloc(sizeof(CAPICallbacks));
auto callback_struct = (CAPICallbacks *)callback_ptr;
callback_struct->state_size = WeightedSumSize;
callback_struct->init = WeightedSumInit;
callback_struct->update = WeightedSumUpdate;
callback_struct->combine = WeightedSumCombine;
callback_struct->finalize = WeightedSumFinalize;
duckdb_aggregate_function_set_extra_info(function, callback_ptr, free);
// set up the function
duckdb_aggregate_function_set_functions(function, CallbackSize, CallbackInit, CallbackUpdate, CallbackCombine,
CallbackFinalize);
// register and cleanup
status = duckdb_register_aggregate_function(connection, function);
REQUIRE(status == expected_outcome);
duckdb_destroy_aggregate_function(&function);
}
TEST_CASE("Test Aggregate Functions C API", "[capi]") {
typedef void (*register_function_t)(duckdb_connection, const char *, duckdb_state);
duckdb::vector<register_function_t> register_functions {CAPIRegisterWeightedSum, CAPIRegisterWeightedSumExtraInfo};
for (auto &register_function : register_functions) {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
register_function(tester.connection, "my_weighted_sum", DuckDBSuccess);
// try to register it again - this should be an error
register_function(tester.connection, "my_weighted_sum", DuckDBError);
// now call it
result = tester.Query("SELECT my_weighted_sum(40, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 80);
result = tester.Query("SELECT my_weighted_sum(40, NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_weighted_sum(NULL, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_weighted_sum(i, 2) FROM range(100) t(i)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 9900);
result = tester.Query("SELECT i % 2 AS gr, my_weighted_sum(i, 2) FROM range(100) t(i) GROUP BY gr ORDER BY gr");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(1, 0) == 4900);
REQUIRE(result->Fetch<int64_t>(0, 1) == 1);
REQUIRE(result->Fetch<int64_t>(1, 1) == 5000);
}
}
struct RepeatedStringAggState {
char *data;
idx_t size;
};
idx_t RepeatedStringAggSize(duckdb_function_info info) {
return sizeof(RepeatedStringAggState);
}
void RepeatedStringAggInit(duckdb_function_info info, duckdb_aggregate_state state_p) {
auto state = reinterpret_cast<RepeatedStringAggState *>(state_p);
state->data = nullptr;
state->size = 0;
}
void RepeatedStringAggUpdate(duckdb_function_info info, duckdb_data_chunk input, duckdb_aggregate_state *states) {
auto state = reinterpret_cast<RepeatedStringAggState **>(states);
auto row_count = duckdb_data_chunk_get_size(input);
auto input_vector = duckdb_data_chunk_get_vector(input, 0);
auto input_data = static_cast<duckdb_string_t *>(duckdb_vector_get_data(input_vector));
auto input_validity = duckdb_vector_get_validity(input_vector);
auto weight_vector = duckdb_data_chunk_get_vector(input, 1);
auto weight_data = static_cast<int64_t *>(duckdb_vector_get_data(weight_vector));
auto weight_validity = duckdb_vector_get_validity(weight_vector);
for (idx_t i = 0; i < row_count; i++) {
if (!duckdb_validity_row_is_valid(input_validity, i) || !duckdb_validity_row_is_valid(weight_validity, i)) {
continue;
}
auto length = duckdb_string_t_length(input_data[i]);
auto data = duckdb_string_t_data(input_data + i);
auto weight = weight_data[i];
if (weight < 0) {
duckdb_aggregate_function_set_error(info, "Weight must be >= 0");
return;
}
auto new_data = (char *)malloc(state[i]->size + length * weight + 1);
if (state[i]->size > 0) {
memcpy((void *)(new_data), state[i]->data, state[i]->size);
}
if (state[i]->data) {
free((void *)(state[i]->data));
}
idx_t offset = state[i]->size;
for (idx_t rep_idx = 0; rep_idx < static_cast<idx_t>(weight); rep_idx++) {
memcpy((void *)(new_data + offset), data, length);
offset += length;
}
state[i]->data = new_data;
state[i]->size = offset;
state[i]->data[state[i]->size] = '\0';
}
}
void RepeatedStringAggCombine(duckdb_function_info info, duckdb_aggregate_state *source_p,
duckdb_aggregate_state *target_p, idx_t count) {
auto source = reinterpret_cast<RepeatedStringAggState **>(source_p);
auto target = reinterpret_cast<RepeatedStringAggState **>(target_p);
for (idx_t i = 0; i < count; i++) {
if (source[i]->size == 0) {
continue;
}
auto new_data = (char *)malloc(target[i]->size + source[i]->size + 1);
if (target[i]->size > 0) {
memcpy((void *)new_data, target[i]->data, target[i]->size);
}
if (target[i]->data) {
free((void *)target[i]->data);
}
memcpy((void *)(new_data + target[i]->size), source[i]->data, source[i]->size);
target[i]->data = new_data;
target[i]->size += source[i]->size;
target[i]->data[target[i]->size] = '\0';
}
}
void RepeatedStringAggFinalize(duckdb_function_info info, duckdb_aggregate_state *source_p, duckdb_vector result,
idx_t count, idx_t offset) {
auto source = reinterpret_cast<RepeatedStringAggState **>(source_p);
duckdb_vector_ensure_validity_writable(result);
auto result_validity = duckdb_vector_get_validity(result);
for (idx_t i = 0; i < count; i++) {
if (!source[i]->data) {
duckdb_validity_set_row_invalid(result_validity, offset + i);
} else {
duckdb_vector_assign_string_element_len(result, offset + i, reinterpret_cast<const char *>(source[i]->data),
source[i]->size);
}
}
}
void RepeatedStringAggDestructor(duckdb_aggregate_state *states, idx_t count) {
auto source = reinterpret_cast<RepeatedStringAggState **>(states);
for (idx_t i = 0; i < count; i++) {
if (source[i]->data) {
free(source[i]->data);
}
}
}
static void CAPIRegisterRepeatedStringAgg(duckdb_connection connection) {
duckdb_state status;
// create an aggregate function
auto function = duckdb_create_aggregate_function();
duckdb_aggregate_function_set_name(function, "repeated_string_agg");
// add a varchar/bigint parameter
auto varchar_type = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
auto bigint_type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_aggregate_function_add_parameter(function, varchar_type);
duckdb_aggregate_function_add_parameter(function, bigint_type);
// set the return type to varchar
duckdb_aggregate_function_set_return_type(function, varchar_type);
duckdb_destroy_logical_type(&varchar_type);
duckdb_destroy_logical_type(&bigint_type);
// set up the function
duckdb_aggregate_function_set_functions(function, RepeatedStringAggSize, RepeatedStringAggInit,
RepeatedStringAggUpdate, RepeatedStringAggCombine,
RepeatedStringAggFinalize);
duckdb_aggregate_function_set_destructor(function, RepeatedStringAggDestructor);
// register and cleanup
status = duckdb_register_aggregate_function(connection, function);
REQUIRE(status == DuckDBSuccess);
duckdb_destroy_aggregate_function(&function);
}
TEST_CASE("Test String Aggregate Function", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterRepeatedStringAgg(tester.connection);
// now call it
result = tester.Query("SELECT repeated_string_agg('x', 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<string>(0, 0) == "xx");
result = tester.Query("SELECT repeated_string_agg('', 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<string>(0, 0) == "");
result = tester.Query("SELECT repeated_string_agg('abcdefgh', 3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<string>(0, 0) == "abcdefghabcdefghabcdefgh");
result = tester.Query("SELECT repeated_string_agg(NULL, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
REQUIRE_FAIL(tester.Query("SELECT repeated_string_agg('x', -1)"));
result = tester.Query(
"SELECT repeated_string_agg(CASE WHEN i%10=0 THEN i::VARCHAR ELSE '' END, 2) FROM range(100) t(i)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<string>(0, 0) == "00101020203030404050506060707080809090");
}
static void CAPIRegisterWeightedSumOverloads(duckdb_connection connection, const char *name,
duckdb_state expected_outcome) {
duckdb_state status;
auto function_set = duckdb_create_aggregate_function_set(name);
// create an aggregate function with 2 parameters
auto function = CAPIGetAggregateFunction(connection, name, 1);
duckdb_add_aggregate_function_to_set(function_set, function);
duckdb_destroy_aggregate_function(&function);
// create an aggregate function with 3 parameters
function = CAPIGetAggregateFunction(connection, name, 2);
duckdb_add_aggregate_function_to_set(function_set, function);
duckdb_destroy_aggregate_function(&function);
// register and cleanup
status = duckdb_register_aggregate_function_set(connection, function_set);
REQUIRE(status == expected_outcome);
duckdb_destroy_aggregate_function_set(&function_set);
duckdb_destroy_aggregate_function_set(&function_set);
duckdb_destroy_aggregate_function_set(nullptr);
}
TEST_CASE("Test Aggregate Function Overloads C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterWeightedSumOverloads(tester.connection, "my_weighted_sum", DuckDBSuccess);
// try to register it again - this should be an error
CAPIRegisterWeightedSumOverloads(tester.connection, "my_weighted_sum", DuckDBError);
// now call it
result = tester.Query("SELECT my_weighted_sum(40)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 40);
result = tester.Query("SELECT my_weighted_sum(40, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 80);
}

View File

@@ -0,0 +1,260 @@
#include "capi_tester.hpp"
#include <cstdio>
using namespace duckdb;
using namespace std;
static void CAPIRegisterCustomType(duckdb_connection connection, const char *name, duckdb_type duckdb_type,
duckdb_state expected_outcome) {
duckdb_state status;
auto base_type = duckdb_create_logical_type(duckdb_type);
duckdb_logical_type_set_alias(base_type, name);
status = duckdb_register_logical_type(connection, base_type, nullptr);
REQUIRE(status == expected_outcome);
duckdb_destroy_logical_type(&base_type);
duckdb_destroy_logical_type(&base_type);
duckdb_destroy_logical_type(nullptr);
}
static void Vec3DAddFunction(duckdb_function_info info, duckdb_data_chunk input, duckdb_vector output) {
const auto count = duckdb_data_chunk_get_size(input);
const auto left_vector = duckdb_data_chunk_get_vector(input, 0);
const auto right_vector = duckdb_data_chunk_get_vector(input, 1);
const auto left_data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(left_vector)));
const auto right_data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(right_vector)));
const auto result_data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(output)));
for (idx_t i = 0; i < count; i++) {
for (idx_t j = 0; j < 3; j++) {
const auto idx = i * 3 + j;
result_data[idx] = left_data[idx] + right_data[idx];
}
}
}
static bool Vec3DToVarcharCastFunction(duckdb_function_info info, idx_t count, duckdb_vector input,
duckdb_vector output) {
const auto input_data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(input)));
for (idx_t i = 0; i < count; i++) {
const auto x = input_data[i * 3];
const auto y = input_data[i * 3 + 1];
const auto z = input_data[i * 3 + 2];
const auto res = StringUtil::Format("<%f, %f, %f>", x, y, z);
duckdb_vector_assign_string_element_len(output, i, res.c_str(), res.size());
}
return true;
}
static bool TryParseVec3D(char *str, idx_t len, float &x, float &y, float &z) {
char *end = str + len;
while (str < end && *str != '<') {
str++;
}
str++;
if (str >= end) {
return false;
}
x = std::strtof(str, &str);
if (str >= end || *str != ',') {
return false;
}
str++;
y = std::strtof(str, &str);
if (str >= end || *str != ',') {
return false;
}
str++;
z = std::strtof(str, &str);
if (str >= end || *str != '>') {
return false;
}
str++;
return str == end;
}
static bool Vec3DFromVarcharCastFunction(duckdb_function_info info, idx_t count, duckdb_vector input,
duckdb_vector output) {
const auto cast_mode = duckdb_cast_function_get_cast_mode(info);
// For testing purposes, check that we got the custom data
auto custom_data = reinterpret_cast<string *>(duckdb_cast_function_get_extra_info(info));
REQUIRE(*custom_data == "foobar");
const auto input_data = static_cast<duckdb_string_t *>(duckdb_vector_get_data(input));
const auto output_data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(output)));
bool success = true;
for (idx_t i = 0; i < count; i++) {
auto x = 0.0f, y = 0.0f, z = 0.0f;
auto str = input_data[i];
auto str_len = duckdb_string_t_length(str);
char *str_ptr = duckdb_string_is_inlined(str) ? str.value.inlined.inlined : str.value.pointer.ptr;
// yes, sscanf is not safe, but this is just a test
if (TryParseVec3D(str_ptr, str_len, x, y, z)) {
// Success
output_data[i * 3] = x;
output_data[i * 3 + 1] = y;
output_data[i * 3 + 2] = z;
} else {
// Error
duckdb_cast_function_set_row_error(info, "Failed to parse VEC3D", i, output);
if (cast_mode == DUCKDB_CAST_TRY) {
// Try cast, continue with the next row
success = false;
} else {
// Strict cast, short-circuit and return false
return false;
}
}
}
return success;
}
TEST_CASE("Test Custom Type Registration", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
// try to register a custom type with an invalid base type
CAPIRegisterCustomType(tester.connection, "NUMBER", DUCKDB_TYPE_INVALID, DuckDBError);
// try to register a custom type with a valid base type
CAPIRegisterCustomType(tester.connection, "NUMBER", DUCKDB_TYPE_INTEGER, DuckDBSuccess);
// try to register it again - this should be an error
CAPIRegisterCustomType(tester.connection, "NUMBER", DUCKDB_TYPE_INTEGER, DuckDBError);
// check that it is in the catalog
result = tester.Query("SELECT type_name FROM duckdb_types WHERE type_name = 'NUMBER'");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<string>(0, 0) == "NUMBER");
}
TEST_CASE("Test Custom Type Function", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
// Register a custom type (VEC3D) that is between 0 and 1
auto element_type = duckdb_create_logical_type(DUCKDB_TYPE_FLOAT);
auto vector_type = duckdb_create_array_type(element_type, 3);
duckdb_logical_type_set_alias(vector_type, "VEC3D");
REQUIRE(duckdb_register_logical_type(tester.connection, vector_type, nullptr) == DuckDBSuccess);
// Register a scalar function that adds two vectors
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, "vec3d_add");
duckdb_scalar_function_set_return_type(function, vector_type);
duckdb_scalar_function_add_parameter(function, vector_type);
duckdb_scalar_function_add_parameter(function, vector_type);
duckdb_scalar_function_set_function(function, Vec3DAddFunction);
REQUIRE(duckdb_register_scalar_function(tester.connection, function) == DuckDBSuccess);
// Also add a cast function to convert VEC3D to VARCHAR
auto varchar_type = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
auto to_varchar_cast_function = duckdb_create_cast_function();
duckdb_cast_function_set_implicit_cast_cost(to_varchar_cast_function, 0);
duckdb_cast_function_set_source_type(to_varchar_cast_function, vector_type);
duckdb_cast_function_set_target_type(to_varchar_cast_function, varchar_type);
duckdb_cast_function_set_function(to_varchar_cast_function, Vec3DToVarcharCastFunction);
REQUIRE(duckdb_register_cast_function(tester.connection, to_varchar_cast_function) == DuckDBSuccess);
auto from_varchar_cast_function = duckdb_create_cast_function();
duckdb_cast_function_set_implicit_cast_cost(from_varchar_cast_function, 0);
duckdb_cast_function_set_source_type(from_varchar_cast_function, varchar_type);
duckdb_cast_function_set_target_type(from_varchar_cast_function, vector_type);
duckdb_cast_function_set_function(from_varchar_cast_function, Vec3DFromVarcharCastFunction);
auto cast_custom_data = new string("foobar");
auto cast_custom_data_delete = [](void *data) {
delete reinterpret_cast<string *>(data);
};
duckdb_cast_function_set_extra_info(from_varchar_cast_function, cast_custom_data, cast_custom_data_delete);
REQUIRE(duckdb_register_cast_function(tester.connection, from_varchar_cast_function) == DuckDBSuccess);
// Cleanup the custom type and functions
duckdb_destroy_scalar_function(&function);
duckdb_destroy_cast_function(&to_varchar_cast_function);
duckdb_destroy_cast_function(&from_varchar_cast_function);
duckdb_destroy_logical_type(&varchar_type);
duckdb_destroy_logical_type(&element_type);
duckdb_destroy_logical_type(&vector_type);
// Ensure that we can free the casts multiple times without issue
duckdb_destroy_cast_function(&from_varchar_cast_function);
duckdb_destroy_cast_function(nullptr);
// Create a table with the custom type
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE vec3d_table (a VEC3D)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO vec3d_table VALUES ([1.0, 2.0, 3.0]::FLOAT[3])"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO vec3d_table VALUES ([4.0, 5.0, 6.0]::FLOAT[3])"));
// Query the table
result = tester.Query("SELECT vec3d_add(a, a) FROM vec3d_table");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 2);
// Check that the result is correct
auto chunk = result->FetchChunk(0);
auto data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(chunk->GetVector(0))));
REQUIRE(data[0] == 2.0f);
REQUIRE(data[1] == 4.0f);
REQUIRE(data[2] == 6.0f);
REQUIRE(data[3] == 8.0f);
REQUIRE(data[4] == 10.0f);
REQUIRE(data[5] == 12.0f);
// But we cant execute the function with a non-VEC3D type
result = tester.Query("SELECT vec3d_add([0,0,0]::FLOAT[3], [1,1,1]::FLOAT[3])");
REQUIRE_FAIL(result);
REQUIRE(result->ErrorType() == DUCKDB_ERROR_BINDER);
// But we can cast the base type to the custom type
result = tester.Query("SELECT vec3d_add(CAST([0,0,0] AS VEC3D), CAST([1,1,1] AS VEC3D))");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 1);
// Speaking of casts, let's test the VEC3D to VARCHAR cast
result = tester.Query("SELECT CAST(a AS VARCHAR) FROM vec3d_table");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 2);
// Check that the result is correct
REQUIRE(result->Fetch<string>(0, 0) == "<1.000000, 2.000000, 3.000000>");
REQUIRE(result->Fetch<string>(0, 1) == "<4.000000, 5.000000, 6.000000>");
// Now cast from varchar to VEC3D
result = tester.Query("SELECT CAST('<1.0, 3.0, 4.0>' AS VEC3D)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 1);
// Check that the result is correct
chunk = result->FetchChunk(0);
data = static_cast<float *>(duckdb_vector_get_data(duckdb_array_vector_get_child(chunk->GetVector(0))));
REQUIRE(data[0] == 1.0f);
REQUIRE(data[1] == 3.0f);
REQUIRE(data[2] == 4.0f);
// Try a faulty cast
result = tester.Query("SELECT CAST('<1.0, 3.0, abc' AS VEC3D)");
REQUIRE_FAIL(result);
REQUIRE(result->ErrorType() == DUCKDB_ERROR_CONVERSION);
REQUIRE_THAT(result->ErrorMessage(), Catch::Matchers::StartsWith("Conversion Error: Failed to parse VEC3D"));
// Try a faulty cast with TRY_CAST
result = tester.Query("SELECT TRY_CAST('<1.0, 3.0, abc' AS FLOAT[3]) IS NULL");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<bool>(0, 0) == true);
}

View File

@@ -0,0 +1,159 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
static void test_file_system(duckdb_file_system fs, string file_name) {
REQUIRE(fs != nullptr);
duckdb_state state = DuckDBSuccess;
duckdb_file_handle file;
auto file_path = TestDirectoryPath() + "/" + file_name;
auto options = duckdb_create_file_open_options();
state = duckdb_file_open_options_set_flag(options, DUCKDB_FILE_FLAG_WRITE, true);
REQUIRE(state == DuckDBSuccess);
state = duckdb_file_open_options_set_flag(options, DUCKDB_FILE_FLAG_READ, true);
REQUIRE(state == DuckDBSuccess);
// Try to open non-existing file without create flag
state = duckdb_file_system_open(fs, file_path.c_str(), options, &file);
REQUIRE(state != DuckDBSuccess);
auto error_data = duckdb_file_system_error_data(fs);
auto error_type = duckdb_error_data_error_type(error_data);
REQUIRE(error_type == DUCKDB_ERROR_IO);
duckdb_destroy_error_data(&error_data);
// Try to write to a null file handle
auto failed_bytes_written = duckdb_file_handle_write(file, "data", 4);
REQUIRE(failed_bytes_written == -1);
auto file_error_data = duckdb_file_handle_error_data(file);
auto has_error = duckdb_error_data_has_error(file_error_data);
REQUIRE(has_error == false);
duckdb_destroy_error_data(&file_error_data);
// Set create flag
state = duckdb_file_open_options_set_flag(options, DUCKDB_FILE_FLAG_CREATE, true);
REQUIRE(state == DuckDBSuccess);
// Create and open a file
state = duckdb_file_system_open(fs, file_path.c_str(), options, &file);
REQUIRE(state == DuckDBSuccess);
REQUIRE(file != nullptr);
// Write to the file
const char *data = "Hello, DuckDB File System!";
auto bytes_written = duckdb_file_handle_write(file, data, strlen(data));
REQUIRE(bytes_written == (int64_t)strlen(data));
auto position = duckdb_file_handle_tell(file);
REQUIRE(position == bytes_written);
auto size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Sync
state = duckdb_file_handle_sync(file);
// Seek to the beginning
state = duckdb_file_handle_seek(file, 0);
REQUIRE(state == DuckDBSuccess);
position = duckdb_file_handle_tell(file);
REQUIRE(position == 0);
// Read from the file
char buffer[30];
memset(buffer, 0, sizeof(buffer));
auto bytes_read = duckdb_file_handle_read(file, buffer, sizeof(buffer) - 1);
REQUIRE(bytes_read == bytes_written);
REQUIRE(strcmp(buffer, data) == 0);
position = duckdb_file_handle_tell(file);
REQUIRE(position == bytes_read);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Seek to the end
state = duckdb_file_handle_seek(file, bytes_written);
REQUIRE(state == DuckDBSuccess);
position = duckdb_file_handle_tell(file);
REQUIRE(position == bytes_written);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Try to read from the end of the file
memset(buffer, 0, sizeof(buffer));
bytes_read = duckdb_file_handle_read(file, buffer, sizeof(buffer) - 1);
REQUIRE(bytes_read == 0); // EOF
position = duckdb_file_handle_tell(file);
REQUIRE(position == bytes_written);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Seek back to the beginning
state = duckdb_file_handle_seek(file, 0);
REQUIRE(state == DuckDBSuccess);
position = duckdb_file_handle_tell(file);
REQUIRE(position == 0);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Close the file
duckdb_file_handle_close(file);
duckdb_destroy_file_handle(&file);
// Open file again for reading
state = duckdb_file_system_open(fs, file_path.c_str(), options, &file);
REQUIRE(state == DuckDBSuccess);
REQUIRE(file != nullptr);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Check that the data is still there
memset(buffer, 0, sizeof(buffer));
bytes_read = duckdb_file_handle_read(file, buffer, sizeof(buffer) - 1);
REQUIRE(bytes_read == bytes_written);
REQUIRE(strcmp(buffer, data) == 0);
position = duckdb_file_handle_tell(file);
REQUIRE(position == bytes_read);
size = duckdb_file_handle_size(file);
REQUIRE(size == bytes_written);
// Close the file again
duckdb_file_handle_close(file);
duckdb_destroy_file_handle(&file);
duckdb_destroy_file_open_options(&options);
REQUIRE(file == nullptr);
REQUIRE(options == nullptr);
// Try destroy again for good measure
duckdb_destroy_file_handle(&file);
duckdb_destroy_file_open_options(&options);
REQUIRE(file == nullptr);
REQUIRE(options == nullptr);
}
TEST_CASE("Test File System in C API", "[capi]") {
CAPITester tester;
duckdb_client_context context;
duckdb_file_system fs;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
// get a file system from the client context
duckdb_connection_get_client_context(tester.connection, &context);
fs = duckdb_client_context_get_file_system(context);
REQUIRE(fs != nullptr);
test_file_system(fs, "test_file_capi_1.txt");
duckdb_destroy_file_system(&fs);
REQUIRE(fs == nullptr);
duckdb_destroy_client_context(&context);
// Try to destory fs again
duckdb_destroy_file_system(&fs);
REQUIRE(fs == nullptr);
}

View File

@@ -0,0 +1,646 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
void AddVariadicNumbersTogether(duckdb_function_info, duckdb_data_chunk input, duckdb_vector output) {
// get the total number of rows in this chunk
auto input_size = duckdb_data_chunk_get_size(input);
// extract the input vectors
auto column_count = duckdb_data_chunk_get_column_count(input);
std::vector<duckdb_vector> inputs;
std::vector<int64_t *> data_ptrs;
std::vector<uint64_t *> validity_masks;
auto result_data = (int64_t *)duckdb_vector_get_data(output);
duckdb_vector_ensure_validity_writable(output);
auto result_validity = duckdb_vector_get_validity(output);
// early-out by setting each row to NULL
if (column_count == 0) {
for (idx_t row_idx = 0; row_idx < input_size; row_idx++) {
duckdb_validity_set_row_invalid(result_validity, row_idx);
}
return;
}
// setup
for (idx_t col_idx = 0; col_idx < column_count; col_idx++) {
inputs.push_back(duckdb_data_chunk_get_vector(input, col_idx));
auto data_ptr = (int64_t *)duckdb_vector_get_data(inputs.back());
data_ptrs.push_back(data_ptr);
auto validity_mask = duckdb_vector_get_validity(inputs.back());
validity_masks.push_back(validity_mask);
}
// execution
for (idx_t row_idx = 0; row_idx < input_size; row_idx++) {
// validity check
auto invalid = false;
for (idx_t col_idx = 0; col_idx < column_count; col_idx++) {
if (!duckdb_validity_row_is_valid(validity_masks[col_idx], row_idx)) {
// not valid, set to NULL
duckdb_validity_set_row_invalid(result_validity, row_idx);
invalid = true;
break;
}
}
if (invalid) {
continue;
}
result_data[row_idx] = 0;
for (idx_t col_idx = 0; col_idx < column_count; col_idx++) {
auto data = data_ptrs[col_idx][row_idx];
result_data[row_idx] += data;
}
}
}
static duckdb_scalar_function CAPIGetScalarFunction(duckdb_connection connection, const char *name,
idx_t parameter_count = 2) {
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(nullptr, name);
duckdb_scalar_function_set_name(function, nullptr);
duckdb_scalar_function_set_name(function, name);
duckdb_scalar_function_set_name(function, name);
// add a two bigint parameters
auto type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_scalar_function_add_parameter(nullptr, type);
duckdb_scalar_function_add_parameter(function, nullptr);
for (idx_t idx = 0; idx < parameter_count; idx++) {
duckdb_scalar_function_add_parameter(function, type);
}
// set the return type to bigint
duckdb_scalar_function_set_return_type(nullptr, type);
duckdb_scalar_function_set_return_type(function, nullptr);
duckdb_scalar_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
// set up the function
duckdb_scalar_function_set_function(nullptr, AddVariadicNumbersTogether);
duckdb_scalar_function_set_function(function, nullptr);
duckdb_scalar_function_set_function(function, AddVariadicNumbersTogether);
return function;
}
static void CAPIRegisterAddition(duckdb_connection connection, const char *name, duckdb_state expected_outcome) {
duckdb_state status;
// create a scalar function
auto function = CAPIGetScalarFunction(connection, name);
// register and cleanup
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == expected_outcome);
duckdb_destroy_scalar_function(&function);
duckdb_destroy_scalar_function(&function);
duckdb_destroy_scalar_function(nullptr);
}
TEST_CASE("Test Scalar Functions C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterAddition(tester.connection, "my_addition", DuckDBSuccess);
// try to register it again - this should not be an error
CAPIRegisterAddition(tester.connection, "my_addition", DuckDBSuccess);
// now call it
result = tester.Query("SELECT my_addition(40, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
result = tester.Query("SELECT my_addition(40, NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_addition(NULL, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
// call it over a vector of values
result = tester.Query("SELECT my_addition(1000000, i) FROM range(10000) t(i)");
REQUIRE_NO_FAIL(*result);
for (idx_t row = 0; row < 10000; row++) {
REQUIRE(result->Fetch<int64_t>(0, row) == static_cast<int64_t>(1000000 + row));
}
}
void ReturnStringInfo(duckdb_function_info info, duckdb_data_chunk input, duckdb_vector output) {
auto extra_info = string((const char *)duckdb_scalar_function_get_extra_info(info));
// get the total number of rows in this chunk
auto input_size = duckdb_data_chunk_get_size(input);
// extract the two input vectors
auto input_vector = duckdb_data_chunk_get_vector(input, 0);
// get the data pointers for the input vectors (both int64 as specified by the parameter types)
auto input_data = (duckdb_string_t *)duckdb_vector_get_data(input_vector);
// get the validity vectors
auto input_validity = duckdb_vector_get_validity(input_vector);
duckdb_vector_ensure_validity_writable(output);
auto result_validity = duckdb_vector_get_validity(output);
for (idx_t row = 0; row < input_size; row++) {
if (duckdb_validity_row_is_valid(input_validity, row)) {
// not null - do the operation
auto input_string = input_data[row];
string result = extra_info + "_";
if (duckdb_string_is_inlined(input_string)) {
result += string(input_string.value.inlined.inlined, input_string.value.inlined.length);
} else {
result += string(input_string.value.pointer.ptr, input_string.value.pointer.length);
}
duckdb_vector_assign_string_element_len(output, row, result.c_str(), result.size());
} else {
// either a or b is NULL - set the result row to NULL
duckdb_validity_set_row_invalid(result_validity, row);
}
}
}
static void CAPIRegisterStringInfo(duckdb_connection connection, const char *name, duckdb_function_info info,
duckdb_delete_callback_t destroy_func) {
duckdb_state status;
// create a scalar function
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, name);
// add a single varchar parameter
auto type = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
duckdb_scalar_function_add_parameter(function, type);
// set the return type to varchar
duckdb_scalar_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
// set up the function
duckdb_scalar_function_set_function(function, ReturnStringInfo);
// set the extra info
duckdb_scalar_function_set_extra_info(function, info, destroy_func);
// register and cleanup
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == DuckDBSuccess);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test Scalar Functions - strings & extra_info", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
auto string_data = reinterpret_cast<char *>(malloc(100));
strcpy(string_data, "my_prefix");
auto extra_info = reinterpret_cast<duckdb_function_info>(string_data);
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterStringInfo(tester.connection, "my_prefix", extra_info, free);
// now call it
result = tester.Query("SELECT my_prefix('hello_world')");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<string>(0, 0) == "my_prefix_hello_world");
result = tester.Query("SELECT my_prefix(NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
}
static void CAPIRegisterVarargsFun(duckdb_connection connection, const char *name, duckdb_state expected_outcome) {
duckdb_state status;
// create a scalar function
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, name);
// set the variable arguments
auto type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_scalar_function_set_varargs(function, type);
// set the return type to bigint
duckdb_scalar_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
// set up the function
duckdb_scalar_function_set_function(function, AddVariadicNumbersTogether);
// register and cleanup
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == expected_outcome);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test Scalar Functions - variadic number of input parameters", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterVarargsFun(tester.connection, "my_addition", DuckDBSuccess);
result = tester.Query("SELECT my_addition(40, 2, 100, 3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 145);
result = tester.Query("SELECT my_addition(40, 42, NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_addition(NULL, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_addition()");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_addition('hello', [1])");
REQUIRE_FAIL(result);
}
void CountNULLValues(duckdb_function_info, duckdb_data_chunk input, duckdb_vector output) {
// Get the total number of rows and columns in this chunk.
auto input_size = duckdb_data_chunk_get_size(input);
auto column_count = duckdb_data_chunk_get_column_count(input);
// Extract the validity masks.
std::vector<uint64_t *> validity_masks;
for (idx_t col_idx = 0; col_idx < column_count; col_idx++) {
auto col = duckdb_data_chunk_get_vector(input, col_idx);
auto validity_mask = duckdb_vector_get_validity(col);
validity_masks.push_back(validity_mask);
}
// Execute the function.
auto result_data = (uint64_t *)duckdb_vector_get_data(output);
for (idx_t row_idx = 0; row_idx < input_size; row_idx++) {
idx_t null_count = 0;
idx_t other_null_count = 0;
for (idx_t col_idx = 0; col_idx < column_count; col_idx++) {
if (!duckdb_validity_row_is_valid(validity_masks[col_idx], row_idx)) {
null_count++;
}
// Alternative code path using SQLNULL.
auto duckdb_vector = duckdb_data_chunk_get_vector(input, col_idx);
auto logical_type = duckdb_vector_get_column_type(duckdb_vector);
auto type_id = duckdb_get_type_id(logical_type);
if (type_id == DUCKDB_TYPE_SQLNULL) {
other_null_count++;
}
duckdb_destroy_logical_type(&logical_type);
}
REQUIRE(null_count == other_null_count);
result_data[row_idx] = null_count;
}
}
static void CAPIRegisterANYFun(duckdb_connection connection, const char *name, duckdb_state expected_outcome) {
duckdb_state status;
// create a scalar function
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, name);
// set the variable arguments
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
duckdb_scalar_function_set_varargs(function, any_type);
duckdb_destroy_logical_type(&any_type);
// Set special null handling.
duckdb_scalar_function_set_special_handling(function);
duckdb_scalar_function_set_volatile(function);
duckdb_scalar_function_set_special_handling(nullptr);
duckdb_scalar_function_set_volatile(nullptr);
// set the return type uto bigint
auto return_type = duckdb_create_logical_type(DUCKDB_TYPE_UBIGINT);
duckdb_scalar_function_set_return_type(function, return_type);
duckdb_destroy_logical_type(&return_type);
// set up the function
duckdb_scalar_function_set_function(function, CountNULLValues);
// register and cleanup
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == expected_outcome);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test Scalar Functions - variadic number of ANY parameters", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterANYFun(tester.connection, "my_null_count", DuckDBSuccess);
result = tester.Query("SELECT my_null_count(40, [1], 'hello', 3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 0);
result = tester.Query("SELECT my_null_count([1], 42, NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 1);
result = tester.Query("SELECT my_null_count(NULL, NULL, NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 3);
result = tester.Query("SELECT my_null_count()");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 0);
}
static void CAPIRegisterAdditionOverloads(duckdb_connection connection, const char *name,
duckdb_state expected_outcome) {
duckdb_state status;
auto function_set = duckdb_create_scalar_function_set(name);
// create a scalar function with 2 parameters
auto function = CAPIGetScalarFunction(connection, name, 2);
duckdb_add_scalar_function_to_set(function_set, function);
duckdb_destroy_scalar_function(&function);
// create a scalar function with 3 parameters
function = CAPIGetScalarFunction(connection, name, 3);
duckdb_add_scalar_function_to_set(function_set, function);
duckdb_destroy_scalar_function(&function);
// register and cleanup
status = duckdb_register_scalar_function_set(connection, function_set);
REQUIRE(status == expected_outcome);
duckdb_destroy_scalar_function_set(&function_set);
duckdb_destroy_scalar_function_set(&function_set);
duckdb_destroy_scalar_function_set(nullptr);
}
TEST_CASE("Test Scalar Function Overloads C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterAdditionOverloads(tester.connection, "my_addition", DuckDBSuccess);
// try to register it again - this should not be an error
CAPIRegisterAdditionOverloads(tester.connection, "my_addition", DuckDBSuccess);
// now call it
result = tester.Query("SELECT my_addition(40, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
result = tester.Query("SELECT my_addition(40, 2, 2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 44);
// call it over a vector of values
result = tester.Query("SELECT my_addition(1000000, i, i) FROM range(10000) t(i)");
REQUIRE_NO_FAIL(*result);
for (idx_t row = 0; row < 10000; row++) {
REQUIRE(result->Fetch<int64_t>(0, row) == static_cast<int64_t>(1000000 + row + row));
}
}
struct ConnectionIdStruct {
idx_t connection_id;
idx_t folded_value;
};
void *CopyConnectionIdStruct(void *in_data_ptr) {
auto in_data = reinterpret_cast<ConnectionIdStruct *>(in_data_ptr);
auto out_data = reinterpret_cast<ConnectionIdStruct *>(malloc(sizeof(ConnectionIdStruct)));
out_data->connection_id = in_data->connection_id;
out_data->folded_value = in_data->folded_value;
return out_data;
}
void GetConnectionIdBind(duckdb_bind_info info) {
// Get the extra info.
auto extra_info_ptr = duckdb_scalar_function_bind_get_extra_info(info);
auto extra_info = string(reinterpret_cast<const char *>(extra_info_ptr));
if (extra_info.empty()) {
return;
}
// Get the connection ID.
duckdb_client_context context;
duckdb_scalar_function_get_client_context(info, &context);
auto connection_id = duckdb_client_context_get_connection_id(context);
// Get the expression.
auto argument_count = duckdb_scalar_function_bind_get_argument_count(info);
REQUIRE(argument_count == 1);
auto expr = duckdb_scalar_function_bind_get_argument(info, 0);
auto foldable = duckdb_expression_is_foldable(expr);
if (!foldable) {
duckdb_scalar_function_bind_set_error(info, "input argument must be foldable");
duckdb_destroy_expression(&expr);
duckdb_destroy_client_context(&context);
return;
}
// Fold the expression.
duckdb_value value;
auto error_data = duckdb_expression_fold(context, expr, &value);
auto has_error = duckdb_error_data_has_error(error_data);
if (has_error) {
auto error_msg = duckdb_error_data_message(error_data);
duckdb_scalar_function_bind_set_error(info, error_msg);
duckdb_destroy_expression(&expr);
duckdb_destroy_client_context(&context);
duckdb_destroy_error_data(&error_data);
return;
}
auto value_type = duckdb_get_value_type(value);
auto value_type_id = duckdb_get_type_id(value_type);
REQUIRE(value_type_id == DUCKDB_TYPE_UBIGINT);
auto uint64_value = duckdb_get_uint64(value);
duckdb_destroy_value(&value);
duckdb_destroy_expression(&expr);
duckdb_destroy_client_context(&context);
// Set the connection id.
auto bind_data = reinterpret_cast<ConnectionIdStruct *>(malloc(sizeof(ConnectionIdStruct)));
bind_data->connection_id = connection_id;
bind_data->folded_value = uint64_value;
duckdb_scalar_function_set_bind_data(info, bind_data, free);
duckdb_scalar_function_set_bind_data_copy(info, CopyConnectionIdStruct);
}
void GetConnectionId(duckdb_function_info info, duckdb_data_chunk input, duckdb_vector output) {
auto bind_data_ptr = duckdb_scalar_function_get_bind_data(info);
if (bind_data_ptr == nullptr) {
duckdb_scalar_function_set_error(info, "empty bind data");
return;
}
auto bind_data = reinterpret_cast<ConnectionIdStruct *>(bind_data_ptr);
auto input_size = duckdb_data_chunk_get_size(input);
auto result_data = reinterpret_cast<uint64_t *>(duckdb_vector_get_data(output));
for (idx_t row_idx = 0; row_idx < input_size; row_idx++) {
result_data[row_idx] = bind_data->connection_id + bind_data->folded_value;
}
}
static void CAPIRegisterGetConnectionId(duckdb_connection connection, bool is_volatile, string name) {
duckdb_state status;
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, name.c_str());
// Set the return type to UBIGINT.
auto type = duckdb_create_logical_type(DUCKDB_TYPE_UBIGINT);
duckdb_scalar_function_add_parameter(function, type);
duckdb_scalar_function_set_return_type(function, type);
duckdb_destroy_logical_type(&type);
if (is_volatile) {
duckdb_scalar_function_set_volatile(function);
}
// Set up the bind and function callbacks.
duckdb_scalar_function_set_bind(function, GetConnectionIdBind);
duckdb_scalar_function_set_function(function, GetConnectionId);
// Set some extra info to retrieve during binding.
auto string_data = reinterpret_cast<char *>(malloc(100));
strcpy(string_data, "my_prefix");
auto extra_info = reinterpret_cast<duckdb_function_info>(string_data);
duckdb_scalar_function_set_extra_info(function, extra_info, free);
// Register and cleanup.
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == DuckDBSuccess);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test Scalar Function with Bind Info", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterGetConnectionId(tester.connection, false, "get_connection_id");
duckdb_client_context context;
duckdb_connection_get_client_context(tester.connection, &context);
auto first_conn_id = duckdb_client_context_get_connection_id(context);
duckdb_destroy_client_context(&context);
result = tester.Query("SELECT get_connection_id((40 + 2)::UBIGINT)");
REQUIRE_NO_FAIL(*result);
auto first_result = result->Fetch<uint64_t>(0, 0);
REQUIRE(first_result == first_conn_id + 42);
tester.ChangeConnection();
duckdb_connection_get_client_context(tester.connection, &context);
auto second_conn_id = duckdb_client_context_get_connection_id(context);
duckdb_destroy_client_context(&context);
result = tester.Query("SELECT get_connection_id((44 - 2)::UBIGINT)");
REQUIRE_NO_FAIL(*result);
auto second_result = result->Fetch<uint64_t>(0, 0);
REQUIRE(second_conn_id + 42 == second_result);
REQUIRE(first_result != second_result);
result = tester.Query("SELECT get_connection_id(random()::UBIGINT)");
REQUIRE_FAIL(result);
REQUIRE(StringUtil::Contains(result->ErrorMessage(), "input argument must be foldable"));
result = tester.Query("SELECT get_connection_id(200::UTINYINT + 200::UTINYINT)");
REQUIRE_FAIL(result);
REQUIRE(StringUtil::Contains(result->ErrorMessage(), "Overflow in addition of"));
}
TEST_CASE("Test volatile scalar function with bind in WHERE clause", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterGetConnectionId(tester.connection, true, "my_volatile_fun");
result = tester.Query("SELECT true WHERE my_volatile_fun((40 + 2)::UBIGINT) != 0");
REQUIRE(!result->HasError());
REQUIRE(result->Fetch<bool>(0, 0));
}
void ListSum(duckdb_function_info, duckdb_data_chunk input, duckdb_vector output) {
auto input_vector = duckdb_data_chunk_get_vector(input, 0);
auto input_size = duckdb_data_chunk_get_size(input);
auto input_validity = duckdb_vector_get_validity(input_vector);
auto list_entry = reinterpret_cast<duckdb_list_entry *>(duckdb_vector_get_data(input_vector));
auto list_child = duckdb_list_vector_get_child(input_vector);
auto child_validity = duckdb_vector_get_validity(list_child);
auto child_data = reinterpret_cast<uint64_t *>(duckdb_vector_get_data(list_child));
auto result_data = reinterpret_cast<uint64_t *>(duckdb_vector_get_data(output));
duckdb_vector_ensure_validity_writable(output);
auto result_validity = duckdb_vector_get_validity(output);
for (idx_t row = 0; row < input_size; row++) {
if (!duckdb_validity_row_is_valid(input_validity, row)) {
duckdb_validity_set_row_invalid(result_validity, row);
continue;
}
auto entry = list_entry[row];
auto offset = entry.offset;
auto length = entry.length;
uint64_t sum = 0;
for (idx_t idx = offset; idx < offset + length; idx++) {
if (duckdb_validity_row_is_valid(child_validity, idx)) {
sum += child_data[idx];
}
}
result_data[row] = sum;
}
}
static void CAPIRegisterListSum(duckdb_connection connection, const char *name) {
duckdb_state status;
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, name);
auto ubigint_type = duckdb_create_logical_type(DUCKDB_TYPE_UBIGINT);
auto list_type = duckdb_create_list_type(ubigint_type);
duckdb_scalar_function_add_parameter(function, list_type);
duckdb_scalar_function_set_return_type(function, ubigint_type);
duckdb_destroy_logical_type(&list_type);
duckdb_destroy_logical_type(&ubigint_type);
duckdb_scalar_function_set_function(function, ListSum);
status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == DuckDBSuccess);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test Scalar Functions - LIST", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
CAPIRegisterListSum(tester.connection, "my_list_sum");
result = tester.Query("SELECT my_list_sum([1::uint64])");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 1);
result = tester.Query("SELECT my_list_sum(NULL)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
result = tester.Query("SELECT my_list_sum([])");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 0);
}

View File

@@ -0,0 +1,322 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
struct my_bind_data_struct {
int64_t size;
};
void my_bind(duckdb_bind_info info) {
REQUIRE(duckdb_bind_get_parameter_count(info) == 1);
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_bind_add_result_column(info, "forty_two", type);
duckdb_destroy_logical_type(&type);
auto my_bind_data = (my_bind_data_struct *)malloc(sizeof(my_bind_data_struct));
auto param = duckdb_bind_get_parameter(info, 0);
my_bind_data->size = duckdb_get_int64(param);
duckdb_destroy_value(&param);
duckdb_bind_set_bind_data(info, my_bind_data, free);
}
struct my_init_data_struct {
int64_t pos;
};
void my_init(duckdb_init_info info) {
REQUIRE(duckdb_init_get_bind_data(info) != nullptr);
REQUIRE(duckdb_init_get_bind_data(nullptr) == nullptr);
auto my_init_data = (my_init_data_struct *)malloc(sizeof(my_init_data_struct));
my_init_data->pos = 0;
duckdb_init_set_init_data(info, my_init_data, free);
}
void my_function(duckdb_function_info info, duckdb_data_chunk output) {
auto bind_data = (my_bind_data_struct *)duckdb_function_get_bind_data(info);
auto init_data = (my_init_data_struct *)duckdb_function_get_init_data(info);
auto ptr = (int64_t *)duckdb_vector_get_data(duckdb_data_chunk_get_vector(output, 0));
idx_t i;
for (i = 0; i < STANDARD_VECTOR_SIZE; i++) {
if (init_data->pos >= bind_data->size) {
break;
}
ptr[i] = init_data->pos % 2 == 0 ? 42 : 84;
init_data->pos++;
}
duckdb_data_chunk_set_size(output, i);
}
static void capi_register_table_function(duckdb_connection connection, const char *name,
duckdb_table_function_bind_t bind, duckdb_table_function_init_t init,
duckdb_table_function_t f, duckdb_state expected_state = DuckDBSuccess) {
duckdb_state status;
// create a table function
auto function = duckdb_create_table_function();
duckdb_table_function_set_name(nullptr, name);
duckdb_table_function_set_name(function, nullptr);
duckdb_table_function_set_name(function, name);
duckdb_table_function_set_name(function, name);
// add a string parameter
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_table_function_add_parameter(function, type);
duckdb_destroy_logical_type(&type);
// add a named parameter
duckdb_logical_type itype = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_table_function_add_named_parameter(function, "my_parameter", itype);
duckdb_destroy_logical_type(&itype);
// set up the function pointers
duckdb_table_function_set_bind(function, bind);
duckdb_table_function_set_init(function, init);
duckdb_table_function_set_function(function, f);
// register and cleanup
status = duckdb_register_table_function(connection, function);
duckdb_destroy_table_function(&function);
duckdb_destroy_table_function(&function);
duckdb_destroy_table_function(nullptr);
REQUIRE(status == expected_state);
}
TEST_CASE("Test Table Functions C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
capi_register_table_function(tester.connection, "my_function", my_bind, my_init, my_function);
// registering again does not cause error, because we overload
capi_register_table_function(tester.connection, "my_function", my_bind, my_init, my_function);
// now call it
result = tester.Query("SELECT * FROM my_function(1)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
result = tester.Query("SELECT * FROM my_function(1, my_parameter=3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
result = tester.Query("SELECT * FROM my_function(1, my_parameter=\"val\")");
REQUIRE(result->HasError());
result = tester.Query("SELECT * FROM my_function(1, nota_parameter=\"val\")");
REQUIRE(result->HasError());
result = tester.Query("SELECT * FROM my_function(3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
REQUIRE(result->Fetch<int64_t>(0, 1) == 84);
REQUIRE(result->Fetch<int64_t>(0, 2) == 42);
result = tester.Query("SELECT forty_two, COUNT(*) FROM my_function(10000) GROUP BY 1 ORDER BY 1");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
REQUIRE(result->Fetch<int64_t>(0, 1) == 84);
REQUIRE(result->Fetch<int64_t>(1, 0) == 5000);
REQUIRE(result->Fetch<int64_t>(1, 1) == 5000);
}
void my_error_bind(duckdb_bind_info info) {
duckdb_bind_set_error(nullptr, nullptr);
duckdb_bind_set_error(info, "My error message");
}
void my_error_init(duckdb_init_info info) {
duckdb_init_set_error(nullptr, nullptr);
duckdb_init_set_error(info, "My error message");
}
void my_error_function(duckdb_function_info info, duckdb_data_chunk output) {
duckdb_function_set_error(nullptr, nullptr);
duckdb_function_set_error(info, "My error message");
}
TEST_CASE("Test Table Function errors in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
capi_register_table_function(tester.connection, "my_error_bind", my_error_bind, my_init, my_function);
capi_register_table_function(tester.connection, "my_error_init", my_bind, my_error_init, my_function);
capi_register_table_function(tester.connection, "my_error_function", my_bind, my_init, my_error_function);
result = tester.Query("SELECT * FROM my_error_bind(1)");
REQUIRE(result->HasError());
result = tester.Query("SELECT * FROM my_error_init(1)");
REQUIRE(result->HasError());
result = tester.Query("SELECT * FROM my_error_function(1)");
REQUIRE(result->HasError());
}
TEST_CASE("Test Table Function register errors in C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
capi_register_table_function(tester.connection, "x", my_error_bind, my_init, my_function, DuckDBSuccess);
// Try to register it again with the same name, is ok (because of overloading)
capi_register_table_function(tester.connection, "x", my_error_bind, my_init, my_function, DuckDBSuccess);
}
struct my_named_bind_data_struct {
int64_t size;
int64_t multiplier;
};
void my_named_bind(duckdb_bind_info info) {
REQUIRE(duckdb_bind_get_parameter_count(info) == 1);
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_bind_add_result_column(info, "forty_two", type);
duckdb_destroy_logical_type(&type);
auto my_bind_data = (my_named_bind_data_struct *)malloc(sizeof(my_named_bind_data_struct));
auto param = duckdb_bind_get_parameter(info, 0);
my_bind_data->size = duckdb_get_int64(param);
duckdb_destroy_value(&param);
auto nparam = duckdb_bind_get_named_parameter(info, "my_parameter");
if (nparam) {
my_bind_data->multiplier = duckdb_get_int64(nparam);
} else {
my_bind_data->multiplier = 1;
}
duckdb_destroy_value(&nparam);
duckdb_bind_set_bind_data(info, my_bind_data, free);
}
void my_named_init(duckdb_init_info info) {
REQUIRE(duckdb_init_get_bind_data(info) != nullptr);
REQUIRE(duckdb_init_get_bind_data(nullptr) == nullptr);
auto my_init_data = (my_init_data_struct *)malloc(sizeof(my_init_data_struct));
my_init_data->pos = 0;
duckdb_init_set_init_data(info, my_init_data, free);
}
void my_named_function(duckdb_function_info info, duckdb_data_chunk output) {
auto bind_data = (my_named_bind_data_struct *)duckdb_function_get_bind_data(info);
auto init_data = (my_init_data_struct *)duckdb_function_get_init_data(info);
auto ptr = (int64_t *)duckdb_vector_get_data(duckdb_data_chunk_get_vector(output, 0));
idx_t i;
for (i = 0; i < STANDARD_VECTOR_SIZE; i++) {
if (init_data->pos >= bind_data->size) {
break;
}
ptr[i] = init_data->pos % 2 == 0 ? (42 * bind_data->multiplier) : (84 * bind_data->multiplier);
init_data->pos++;
}
duckdb_data_chunk_set_size(output, i);
}
TEST_CASE("Test Table Function named parameters in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
capi_register_table_function(tester.connection, "my_multiplier_function", my_named_bind, my_named_init,
my_named_function);
result = tester.Query("SELECT * FROM my_multiplier_function(3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
REQUIRE(result->Fetch<int64_t>(0, 1) == 84);
REQUIRE(result->Fetch<int64_t>(0, 2) == 42);
result = tester.Query("SELECT * FROM my_multiplier_function(2, my_parameter=2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 84);
REQUIRE(result->Fetch<int64_t>(0, 1) == 168);
result = tester.Query("SELECT * FROM my_multiplier_function(2, my_parameter=3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 126);
REQUIRE(result->Fetch<int64_t>(0, 1) == 252);
}
struct my_bind_connection_id_data {
idx_t connection_id;
idx_t rows_requested;
};
void my_bind_connection_id(duckdb_bind_info info) {
REQUIRE(duckdb_bind_get_parameter_count(info) == 1);
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_bind_add_result_column(info, "connection_id", type);
duckdb_destroy_logical_type(&type);
type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
duckdb_bind_add_result_column(info, "forty_two", type);
duckdb_destroy_logical_type(&type);
auto bind_data = (my_bind_connection_id_data *)malloc(sizeof(my_bind_connection_id_data));
auto param = duckdb_bind_get_parameter(info, 0);
auto rows_requested = duckdb_get_int64(param);
duckdb_destroy_value(&param);
duckdb_client_context context;
duckdb_table_function_get_client_context(info, &context);
auto connection_id = duckdb_client_context_get_connection_id(context);
duckdb_destroy_client_context(&context);
bind_data->rows_requested = rows_requested;
bind_data->connection_id = connection_id;
duckdb_bind_set_bind_data(info, bind_data, free);
}
void my_init_connection_id(duckdb_init_info info) {
REQUIRE(duckdb_init_get_bind_data(info) != nullptr);
REQUIRE(duckdb_init_get_bind_data(nullptr) == nullptr);
auto init_data = (my_init_data_struct *)malloc(sizeof(my_init_data_struct));
init_data->pos = 0;
duckdb_init_set_init_data(info, init_data, free);
}
void my_function_connection_id(duckdb_function_info info, duckdb_data_chunk output) {
auto bind_data = (my_bind_connection_id_data *)duckdb_function_get_bind_data(info);
auto init_data = (my_init_data_struct *)duckdb_function_get_init_data(info);
auto ptr = (int64_t *)duckdb_vector_get_data(duckdb_data_chunk_get_vector(output, 0));
auto ptr2 = (int64_t *)duckdb_vector_get_data(duckdb_data_chunk_get_vector(output, 1));
idx_t i;
for (i = 0; i < STANDARD_VECTOR_SIZE; i++) {
if (init_data->pos >= bind_data->rows_requested) {
break;
}
ptr[i] = bind_data->connection_id;
ptr2[i] = 42;
init_data->pos++;
}
duckdb_data_chunk_set_size(output, i);
}
TEST_CASE("Table function client context return") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
capi_register_table_function(tester.connection, "my_connection_id_function", my_bind_connection_id,
my_init_connection_id, my_function_connection_id);
duckdb_client_context context;
duckdb_connection_get_client_context(tester.connection, &context);
auto first_conn_id = duckdb_client_context_get_connection_id(context);
duckdb_destroy_client_context(&context);
result = tester.Query("SELECT * FROM my_connection_id_function(3)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == first_conn_id);
REQUIRE(result->Fetch<int64_t>(0, 1) == first_conn_id);
REQUIRE(result->Fetch<int64_t>(0, 2) == first_conn_id);
REQUIRE(result->Fetch<int64_t>(1, 0) == 42);
REQUIRE(result->Fetch<int64_t>(1, 1) == 42);
REQUIRE(result->Fetch<int64_t>(1, 2) == 42);
}

View File

@@ -0,0 +1,737 @@
#include "capi_tester.hpp"
#include <regex>
using namespace duckdb;
using namespace std;
static void require_hugeint_eq(duckdb_hugeint left, duckdb_hugeint right) {
REQUIRE(left.lower == right.lower);
REQUIRE(left.upper == right.upper);
}
static void require_hugeint_eq(duckdb_hugeint left, uint64_t lower, int64_t upper) {
duckdb_hugeint temp;
temp.lower = lower;
temp.upper = upper;
require_hugeint_eq(left, temp);
}
static void require_uhugeint_eq(duckdb_uhugeint left, duckdb_uhugeint right) {
REQUIRE(left.lower == right.lower);
REQUIRE(left.upper == right.upper);
}
static void require_uhugeint_eq(duckdb_uhugeint left, uint64_t lower, uint64_t upper) {
duckdb_uhugeint temp;
temp.lower = lower;
temp.upper = upper;
require_uhugeint_eq(left, temp);
}
TEST_CASE("Basic test of C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("SET default_null_order='nulls_first'"));
// select scalar value
result = tester.Query("SELECT CAST(42 AS BIGINT)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnType(0) == DUCKDB_TYPE_BIGINT);
REQUIRE(result->ColumnData<int64_t>(0)[0] == 42);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
REQUIRE(!result->IsNull(0, 0));
// out of range fetch
REQUIRE(result->Fetch<int64_t>(1, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 1) == 0);
// cannot fetch data chunk after using the value API
REQUIRE(result->FetchChunk(0) == nullptr);
// select scalar NULL
result = tester.Query("SELECT NULL");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->IsNull(0, 0));
// select scalar string
result = tester.Query("SELECT 'hello'");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<string>(0, 0) == "hello");
REQUIRE(!result->IsNull(0, 0));
result = tester.Query("SELECT 1=1");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<bool>(0, 0) == true);
REQUIRE(!result->IsNull(0, 0));
result = tester.Query("SELECT 1=0");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 1);
REQUIRE(result->Fetch<bool>(0, 0) == false);
REQUIRE(!result->IsNull(0, 0));
result = tester.Query("SELECT i FROM (values (true), (false)) tbl(i) group by i order by i");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 2);
REQUIRE(result->Fetch<bool>(0, 0) == false);
REQUIRE(result->Fetch<bool>(0, 1) == true);
REQUIRE(!result->IsNull(0, 0));
// multiple insertions
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE test (a INTEGER, b INTEGER);"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO test VALUES (11, 22)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO test VALUES (NULL, 21)"));
result = tester.Query("INSERT INTO test VALUES (13, 22)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->rows_changed() == 1);
// NULL selection
result = tester.Query("SELECT a, b FROM test ORDER BY a");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->rows_changed() == 0);
// NULL, 11, 13
REQUIRE(result->IsNull(0, 0));
REQUIRE(result->Fetch<int32_t>(0, 1) == 11);
REQUIRE(result->Fetch<int32_t>(0, 2) == 13);
// 21, 22, 22
REQUIRE(result->Fetch<int32_t>(1, 0) == 21);
REQUIRE(result->Fetch<int32_t>(1, 1) == 22);
REQUIRE(result->Fetch<int32_t>(1, 2) == 22);
REQUIRE(result->ColumnName(0) == "a");
REQUIRE(result->ColumnName(1) == "b");
REQUIRE(result->ColumnName(2) == "");
result = tester.Query("UPDATE test SET a = 1 WHERE b=22");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->rows_changed() == 2);
// several error conditions
REQUIRE(duckdb_value_is_null(nullptr, 0, 0) == false);
REQUIRE(duckdb_column_type(nullptr, 0) == DUCKDB_TYPE_INVALID);
REQUIRE(duckdb_column_count(nullptr) == 0);
REQUIRE(duckdb_row_count(nullptr) == 0);
REQUIRE(duckdb_rows_changed(nullptr) == 0);
REQUIRE(duckdb_result_error(nullptr) == nullptr);
REQUIRE(duckdb_nullmask_data(nullptr, 0) == nullptr);
REQUIRE(duckdb_column_data(nullptr, 0) == nullptr);
REQUIRE(duckdb_result_error_type(nullptr) == DUCKDB_ERROR_INVALID);
}
TEST_CASE("Test different types of C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("SET default_null_order='nulls_first'"));
// integer columns
duckdb::vector<string> types = {"TINYINT", "SMALLINT", "INTEGER", "BIGINT", "HUGEINT",
"UTINYINT", "USMALLINT", "UINTEGER", "UBIGINT", "UHUGEINT"};
for (auto &type : types) {
// create the table and insert values
REQUIRE_NO_FAIL(tester.Query("BEGIN TRANSACTION"));
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE integers(i " + type + ")"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO integers VALUES (1), (NULL)"));
result = tester.Query("SELECT * FROM integers ORDER BY i");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
REQUIRE(result->Fetch<int8_t>(0, 0) == 0);
REQUIRE(result->Fetch<int16_t>(0, 0) == 0);
REQUIRE(result->Fetch<int32_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->Fetch<uint8_t>(0, 0) == 0);
REQUIRE(result->Fetch<uint16_t>(0, 0) == 0);
REQUIRE(result->Fetch<uint32_t>(0, 0) == 0);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 0);
REQUIRE(duckdb_uhugeint_to_double(result->Fetch<duckdb_uhugeint>(0, 0)) == 0);
REQUIRE(duckdb_hugeint_to_double(result->Fetch<duckdb_hugeint>(0, 0)) == 0);
REQUIRE(result->Fetch<string>(0, 0) == "");
REQUIRE(ApproxEqual(result->Fetch<float>(0, 0), 0.0f));
REQUIRE(ApproxEqual(result->Fetch<double>(0, 0), 0.0));
REQUIRE(!result->IsNull(0, 1));
REQUIRE(result->Fetch<int8_t>(0, 1) == 1);
REQUIRE(result->Fetch<int16_t>(0, 1) == 1);
REQUIRE(result->Fetch<int32_t>(0, 1) == 1);
REQUIRE(result->Fetch<int64_t>(0, 1) == 1);
REQUIRE(result->Fetch<uint8_t>(0, 1) == 1);
REQUIRE(result->Fetch<uint16_t>(0, 1) == 1);
REQUIRE(result->Fetch<uint32_t>(0, 1) == 1);
REQUIRE(result->Fetch<uint64_t>(0, 1) == 1);
REQUIRE(duckdb_uhugeint_to_double(result->Fetch<duckdb_uhugeint>(0, 1)) == 1);
REQUIRE(duckdb_hugeint_to_double(result->Fetch<duckdb_hugeint>(0, 1)) == 1);
REQUIRE(ApproxEqual(result->Fetch<float>(0, 1), 1.0f));
REQUIRE(ApproxEqual(result->Fetch<double>(0, 1), 1.0));
REQUIRE(result->Fetch<string>(0, 1) == "1");
REQUIRE_NO_FAIL(tester.Query("ROLLBACK"));
}
// real/double columns
types = {"REAL", "DOUBLE"};
for (auto &type : types) {
// create the table and insert values
REQUIRE_NO_FAIL(tester.Query("BEGIN TRANSACTION"));
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE doubles(i " + type + ")"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO doubles VALUES (1), (NULL)"));
result = tester.Query("SELECT * FROM doubles ORDER BY i");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
REQUIRE(result->Fetch<int8_t>(0, 0) == 0);
REQUIRE(result->Fetch<int16_t>(0, 0) == 0);
REQUIRE(result->Fetch<int32_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->Fetch<string>(0, 0) == "");
REQUIRE(ApproxEqual(result->Fetch<float>(0, 0), 0.0f));
REQUIRE(ApproxEqual(result->Fetch<double>(0, 0), 0.0));
REQUIRE(!result->IsNull(0, 1));
REQUIRE(result->Fetch<int8_t>(0, 1) == 1);
REQUIRE(result->Fetch<int16_t>(0, 1) == 1);
REQUIRE(result->Fetch<int32_t>(0, 1) == 1);
REQUIRE(result->Fetch<int64_t>(0, 1) == 1);
REQUIRE(ApproxEqual(result->Fetch<float>(0, 1), 1.0f));
REQUIRE(ApproxEqual(result->Fetch<double>(0, 1), 1.0));
REQUIRE_NO_FAIL(tester.Query("ROLLBACK"));
}
// date columns
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE dates(d DATE)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO dates VALUES ('1992-09-20'), (NULL), ('30000-09-20')"));
result = tester.Query("SELECT * FROM dates ORDER BY d");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
duckdb_date_struct date = duckdb_from_date(result->Fetch<duckdb_date>(0, 1));
REQUIRE(date.year == 1992);
REQUIRE(date.month == 9);
REQUIRE(date.day == 20);
REQUIRE(result->Fetch<string>(0, 1) == Value::DATE(1992, 9, 20).ToString());
date = duckdb_from_date(result->Fetch<duckdb_date>(0, 2));
REQUIRE(date.year == 30000);
REQUIRE(date.month == 9);
REQUIRE(date.day == 20);
REQUIRE(result->Fetch<string>(0, 2) == Value::DATE(30000, 9, 20).ToString());
// time columns
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE times(d TIME)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO times VALUES ('12:00:30.1234'), (NULL), ('02:30:01')"));
result = tester.Query("SELECT * FROM times ORDER BY d");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
duckdb_time_struct time_val = duckdb_from_time(result->Fetch<duckdb_time>(0, 1));
REQUIRE(time_val.hour == 2);
REQUIRE(time_val.min == 30);
REQUIRE(time_val.sec == 1);
REQUIRE(time_val.micros == 0);
REQUIRE(result->Fetch<string>(0, 1) == Value::TIME(2, 30, 1, 0).ToString());
time_val = duckdb_from_time(result->Fetch<duckdb_time>(0, 2));
REQUIRE(time_val.hour == 12);
REQUIRE(time_val.min == 0);
REQUIRE(time_val.sec == 30);
REQUIRE(time_val.micros == 123400);
REQUIRE(result->Fetch<string>(0, 2) == Value::TIME(12, 0, 30, 123400).ToString());
// blob columns
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE blobs(b BLOB)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO blobs VALUES ('hello\\x12world'), ('\\x00'), (NULL)"));
result = tester.Query("SELECT * FROM blobs");
REQUIRE_NO_FAIL(*result);
REQUIRE(!result->IsNull(0, 0));
duckdb_blob blob = result->Fetch<duckdb_blob>(0, 0);
REQUIRE(blob.size == 11);
REQUIRE(memcmp(blob.data, "hello\012world", 11));
REQUIRE(result->Fetch<string>(0, 1) == "\\x00");
REQUIRE(result->IsNull(0, 2));
blob = result->Fetch<duckdb_blob>(0, 2);
REQUIRE(blob.data == nullptr);
REQUIRE(blob.size == 0);
// boolean columns
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE booleans(b BOOLEAN)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO booleans VALUES (42 > 60), (42 > 20), (42 > NULL)"));
result = tester.Query("SELECT * FROM booleans ORDER BY b");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
REQUIRE(!result->Fetch<bool>(0, 0));
REQUIRE(!result->Fetch<bool>(0, 1));
REQUIRE(result->Fetch<bool>(0, 2));
REQUIRE(result->Fetch<string>(0, 2) == "true");
// decimal columns
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE decimals(dec DECIMAL(18, 4) NULL)"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO decimals VALUES (NULL), (12.3)"));
result = tester.Query("SELECT * FROM decimals ORDER BY dec");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->IsNull(0, 0));
duckdb_decimal decimal = result->Fetch<duckdb_decimal>(0, 1);
REQUIRE(duckdb_decimal_to_double(decimal) == 12.3);
// test more decimal physical types
result = tester.Query("SELECT "
"1.2::DECIMAL(4,1),"
"100.3::DECIMAL(9,1),"
"-320938.4298::DECIMAL(18,4),"
"49082094824.904820482094::DECIMAL(30,12),"
"NULL::DECIMAL");
REQUIRE_NO_FAIL(*result);
REQUIRE(duckdb_decimal_to_double(result->Fetch<duckdb_decimal>(0, 0)) == 1.2);
REQUIRE(duckdb_decimal_to_double(result->Fetch<duckdb_decimal>(1, 0)) == 100.3);
REQUIRE(duckdb_decimal_to_double(result->Fetch<duckdb_decimal>(2, 0)) == -320938.4298);
REQUIRE(duckdb_decimal_to_double(result->Fetch<duckdb_decimal>(3, 0)) == 49082094824.904820482094);
REQUIRE(duckdb_decimal_to_double(result->Fetch<duckdb_decimal>(4, 0)) == 0.0);
REQUIRE(!result->IsNull(0, 0));
REQUIRE(!result->IsNull(1, 0));
REQUIRE(!result->IsNull(2, 0));
REQUIRE(!result->IsNull(3, 0));
REQUIRE(result->IsNull(4, 0));
REQUIRE(result->Fetch<bool>(0, 0) == true);
REQUIRE(result->Fetch<bool>(1, 0) == true);
REQUIRE(result->Fetch<bool>(2, 0) == true);
REQUIRE(result->Fetch<bool>(3, 0) == true);
REQUIRE(result->Fetch<bool>(4, 0) == false);
REQUIRE(result->Fetch<int8_t>(0, 0) == 1);
REQUIRE(result->Fetch<int8_t>(1, 0) == 100);
REQUIRE(result->Fetch<int8_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<int8_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<int8_t>(4, 0) == 0);
REQUIRE(result->Fetch<uint8_t>(0, 0) == 1);
REQUIRE(result->Fetch<uint8_t>(1, 0) == 100);
REQUIRE(result->Fetch<uint8_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<uint8_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<uint8_t>(4, 0) == 0);
REQUIRE(result->Fetch<int16_t>(0, 0) == 1);
REQUIRE(result->Fetch<int16_t>(1, 0) == 100);
REQUIRE(result->Fetch<int16_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<int16_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<int16_t>(4, 0) == 0);
REQUIRE(result->Fetch<uint16_t>(0, 0) == 1);
REQUIRE(result->Fetch<uint16_t>(1, 0) == 100);
REQUIRE(result->Fetch<uint16_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<uint16_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<uint16_t>(4, 0) == 0);
REQUIRE(result->Fetch<int32_t>(0, 0) == 1);
REQUIRE(result->Fetch<int32_t>(1, 0) == 100);
REQUIRE(result->Fetch<int32_t>(2, 0) == -320938);
REQUIRE(result->Fetch<int32_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<int32_t>(4, 0) == 0);
REQUIRE(result->Fetch<uint32_t>(0, 0) == 1);
REQUIRE(result->Fetch<uint32_t>(1, 0) == 100);
REQUIRE(result->Fetch<uint32_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<uint32_t>(3, 0) == 0); // overflow
REQUIRE(result->Fetch<uint32_t>(4, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 0) == 1);
REQUIRE(result->Fetch<int64_t>(1, 0) == 100);
REQUIRE(result->Fetch<int64_t>(2, 0) == -320938);
REQUIRE(result->Fetch<int64_t>(3, 0) == 49082094825); // ceiling
REQUIRE(result->Fetch<int64_t>(4, 0) == 0);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 1);
REQUIRE(result->Fetch<uint64_t>(1, 0) == 100);
REQUIRE(result->Fetch<uint64_t>(2, 0) == 0); // overflow
REQUIRE(result->Fetch<uint64_t>(3, 0) == 49082094825);
REQUIRE(result->Fetch<uint64_t>(4, 0) == 0);
require_hugeint_eq(result->Fetch<duckdb_hugeint>(0, 0), 1, 0);
require_hugeint_eq(result->Fetch<duckdb_hugeint>(1, 0), 100, 0);
require_hugeint_eq(result->Fetch<duckdb_hugeint>(2, 0), 18446744073709230678ul, -1);
require_hugeint_eq(result->Fetch<duckdb_hugeint>(3, 0), 49082094825, 0);
require_hugeint_eq(result->Fetch<duckdb_hugeint>(4, 0), 0, 0);
require_uhugeint_eq(result->Fetch<duckdb_uhugeint>(0, 0), 1, 0);
require_uhugeint_eq(result->Fetch<duckdb_uhugeint>(1, 0), 100, 0);
require_uhugeint_eq(result->Fetch<duckdb_uhugeint>(2, 0), 0, 0); // overflow
require_uhugeint_eq(result->Fetch<duckdb_uhugeint>(3, 0), 49082094825, 0);
require_uhugeint_eq(result->Fetch<duckdb_uhugeint>(4, 0), 0, 0);
REQUIRE(result->Fetch<float>(0, 0) == 1.2f);
REQUIRE(result->Fetch<float>(1, 0) == 100.3f);
REQUIRE(floor(result->Fetch<float>(2, 0)) == -320939);
REQUIRE((int64_t)floor(result->Fetch<float>(3, 0)) == 49082093568);
REQUIRE(result->Fetch<float>(4, 0) == 0.0);
REQUIRE(result->Fetch<double>(0, 0) == 1.2);
REQUIRE(result->Fetch<double>(1, 0) == 100.3);
REQUIRE(result->Fetch<double>(2, 0) == -320938.4298);
REQUIRE(result->Fetch<double>(3, 0) == 49082094824.904820482094);
REQUIRE(result->Fetch<double>(4, 0) == 0.0);
REQUIRE(result->Fetch<string>(0, 0) == "1.2");
REQUIRE(result->Fetch<string>(1, 0) == "100.3");
REQUIRE(result->Fetch<string>(2, 0) == "-320938.4298");
REQUIRE(result->Fetch<string>(3, 0) == "49082094824.904820482094");
REQUIRE(result->Fetch<string>(4, 0) == "");
result = tester.Query("SELECT -123.45::DECIMAL(5,2)");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<bool>(0, 0) == true);
REQUIRE(result->Fetch<int8_t>(0, 0) == -123);
REQUIRE(result->Fetch<uint8_t>(0, 0) == 0);
REQUIRE(result->Fetch<int16_t>(0, 0) == -123);
REQUIRE(result->Fetch<uint16_t>(0, 0) == 0);
REQUIRE(result->Fetch<int32_t>(0, 0) == -123);
REQUIRE(result->Fetch<uint32_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 0) == -123);
REQUIRE(result->Fetch<uint64_t>(0, 0) == 0);
hugeint_t expected_hugeint_val;
Hugeint::TryConvert(-123, expected_hugeint_val);
duckdb_hugeint expected_val;
expected_val.lower = expected_hugeint_val.lower;
expected_val.upper = expected_hugeint_val.upper;
require_hugeint_eq(result->Fetch<duckdb_hugeint>(0, 0), expected_val);
REQUIRE(result->Fetch<float>(0, 0) == -123.45f);
REQUIRE(result->Fetch<double>(0, 0) == -123.45);
REQUIRE(result->Fetch<string>(0, 0) == "-123.45");
}
TEST_CASE("decompose timetz with duckdb_from_time_tz", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
auto res = tester.Query("SELECT TIMETZ '11:30:00.123456-02:00'");
REQUIRE(res->success);
auto chunk = res->FetchChunk(0);
REQUIRE(chunk->ColumnCount() == 1);
REQUIRE(res->ColumnType(0) == DUCKDB_TYPE_TIME_TZ);
auto data = (duckdb_time_tz *)chunk->GetData(0);
auto time_tz = duckdb_from_time_tz(data[0]);
REQUIRE(time_tz.time.hour == 11);
REQUIRE(time_tz.time.min == 30);
REQUIRE(time_tz.time.sec == 0);
REQUIRE(time_tz.time.micros == 123456);
REQUIRE(time_tz.offset == -7200);
}
TEST_CASE("create time_tz value") {
duckdb_time_struct time;
time.hour = 4;
time.min = 2;
time.sec = 6;
time.micros = 9;
int offset = 8000;
auto micros = duckdb_to_time(time);
auto res = duckdb_create_time_tz(micros.micros, offset);
// and back again
auto inverse = duckdb_from_time_tz(res);
REQUIRE(offset == inverse.offset);
REQUIRE(inverse.time.hour == 4);
REQUIRE(inverse.time.min == 2);
REQUIRE(inverse.time.sec == 6);
REQUIRE(inverse.time.micros == 9);
}
TEST_CASE("Test errors in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// cannot open database in random directory
REQUIRE(!tester.OpenDatabase("/bla/this/directory/should/not/exist/hopefully/awerar333"));
REQUIRE(tester.OpenDatabase(nullptr));
// syntax error in query
REQUIRE_FAIL(tester.Query("SELEC * FROM TABLE"));
// bind error
REQUIRE_FAIL(tester.Query("SELECT * FROM TABLE"));
duckdb_result res;
duckdb_prepared_statement stmt = nullptr;
// fail prepare API calls
REQUIRE(duckdb_prepare(NULL, "SELECT 42", &stmt) == DuckDBError);
REQUIRE(duckdb_prepare(tester.connection, NULL, &stmt) == DuckDBError);
REQUIRE(stmt == nullptr);
REQUIRE(duckdb_prepare(tester.connection, "SELECT * from INVALID_TABLE", &stmt) == DuckDBError);
REQUIRE(duckdb_prepare_error(nullptr) == nullptr);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_prepare_error(stmt) != nullptr);
duckdb_destroy_prepare(&stmt);
REQUIRE(duckdb_bind_boolean(NULL, 0, true) == DuckDBError);
REQUIRE(duckdb_execute_prepared(NULL, &res) == DuckDBError);
duckdb_destroy_prepare(NULL);
// fail to query arrow
duckdb_arrow out_arrow;
REQUIRE(duckdb_query_arrow(tester.connection, "SELECT * from INVALID_TABLE", &out_arrow) == DuckDBError);
REQUIRE(duckdb_query_arrow_error(out_arrow) != nullptr);
duckdb_destroy_arrow(&out_arrow);
// various edge cases/nullptrs
REQUIRE(duckdb_query_arrow_schema(out_arrow, nullptr) == DuckDBSuccess);
REQUIRE(duckdb_query_arrow_array(out_arrow, nullptr) == DuckDBSuccess);
// default duckdb_value_date on invalid date
result = tester.Query("SELECT 1, true, 'a'");
REQUIRE_NO_FAIL(*result);
duckdb_date_struct d = result->Fetch<duckdb_date_struct>(0, 0);
REQUIRE(d.year == 1970);
REQUIRE(d.month == 1);
REQUIRE(d.day == 1);
d = result->Fetch<duckdb_date_struct>(1, 0);
REQUIRE(d.year == 1970);
REQUIRE(d.month == 1);
REQUIRE(d.day == 1);
d = result->Fetch<duckdb_date_struct>(2, 0);
REQUIRE(d.year == 1970);
REQUIRE(d.month == 1);
REQUIRE(d.day == 1);
}
TEST_CASE("Test C API config", "[capi]") {
duckdb_database db = nullptr;
duckdb_connection con = nullptr;
duckdb_config config = nullptr;
duckdb_result result;
// enumerate config options
auto config_count = duckdb_config_count();
for (size_t i = 0; i < config_count; i++) {
const char *name = nullptr;
const char *description = nullptr;
duckdb_get_config_flag(i, &name, &description);
REQUIRE(strlen(name) > 0);
REQUIRE(strlen(description) > 0);
}
// test config creation
REQUIRE(duckdb_create_config(&config) == DuckDBSuccess);
REQUIRE(duckdb_set_config(config, "access_mode", "invalid_access_mode") == DuckDBError);
REQUIRE(duckdb_set_config(config, "access_mode", "read_only") == DuckDBSuccess);
auto dbdir = TestCreatePath("capi_read_only_db");
// open the database & connection
// cannot open an in-memory database in read-only mode
char *error = nullptr;
REQUIRE(duckdb_open_ext(":memory:", &db, config, &error) == DuckDBError);
REQUIRE(strlen(error) > 0);
duckdb_free(error);
// now without the error
REQUIRE(duckdb_open_ext(":memory:", &db, config, nullptr) == DuckDBError);
// cannot open a database that does not exist
REQUIRE(duckdb_open_ext(dbdir.c_str(), &db, config, &error) == DuckDBError);
REQUIRE(strlen(error) > 0);
duckdb_free(error);
// we can create the database and add some tables
{
DuckDB cppdb(dbdir);
Connection cppcon(cppdb);
cppcon.Query("CREATE TABLE integers(i INTEGER)");
cppcon.Query("INSERT INTO integers VALUES (42)");
}
// now we can connect
REQUIRE(duckdb_open_ext(dbdir.c_str(), &db, config, &error) == DuckDBSuccess);
// test unrecognized configuration
REQUIRE(duckdb_set_config(config, "aaaa_invalidoption", "read_only") == DuckDBSuccess);
REQUIRE(((DBConfig *)config)->options.unrecognized_options["aaaa_invalidoption"] == "read_only");
REQUIRE(duckdb_open_ext(dbdir.c_str(), &db, config, &error) == DuckDBError);
REQUIRE_THAT(error, Catch::Matchers::Contains("The following options were not recognized"));
duckdb_free(error);
// we can destroy the config right after duckdb_open
duckdb_destroy_config(&config);
// we can spam this
duckdb_destroy_config(&config);
duckdb_destroy_config(&config);
REQUIRE(duckdb_connect(db, nullptr) == DuckDBError);
REQUIRE(duckdb_connect(nullptr, &con) == DuckDBError);
REQUIRE(duckdb_connect(db, &con) == DuckDBSuccess);
// we can query
REQUIRE(duckdb_query(con, "SELECT 42::INT", &result) == DuckDBSuccess);
REQUIRE(duckdb_value_int32(&result, 0, 0) == 42);
duckdb_destroy_result(&result);
REQUIRE(duckdb_query(con, "SELECT i::INT FROM integers", &result) == DuckDBSuccess);
REQUIRE(duckdb_value_int32(&result, 0, 0) == 42);
duckdb_destroy_result(&result);
// but we cannot create new tables
REQUIRE(duckdb_query(con, "CREATE TABLE new_table(i INTEGER)", nullptr) == DuckDBError);
duckdb_disconnect(&con);
duckdb_close(&db);
// api abuse
REQUIRE(duckdb_create_config(nullptr) == DuckDBError);
REQUIRE(duckdb_get_config_flag(9999999, nullptr, nullptr) == DuckDBError);
REQUIRE(duckdb_set_config(nullptr, nullptr, nullptr) == DuckDBError);
REQUIRE(duckdb_create_config(nullptr) == DuckDBError);
duckdb_destroy_config(nullptr);
duckdb_destroy_config(nullptr);
}
TEST_CASE("Issue #2058: Cleanup after execution of invalid SQL statement causes segmentation fault", "[capi]") {
duckdb_database db;
duckdb_connection con;
duckdb_result result;
duckdb_result result_count;
REQUIRE(duckdb_open(NULL, &db) != DuckDBError);
REQUIRE(duckdb_connect(db, &con) != DuckDBError);
REQUIRE(duckdb_query(con, "CREATE TABLE integers(i INTEGER, j INTEGER);", NULL) != DuckDBError);
REQUIRE((duckdb_query(con, "SELECT count(*) FROM integers;", &result_count) != DuckDBError));
duckdb_destroy_result(&result_count);
REQUIRE(duckdb_query(con, "non valid SQL", &result) == DuckDBError);
duckdb_destroy_result(&result); // segmentation failure happens here
duckdb_disconnect(&con);
duckdb_close(&db);
}
TEST_CASE("Decimal -> Double casting issue", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
result = tester.Query("select -0.5;");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->ColumnType(0) == DUCKDB_TYPE_DECIMAL);
auto double_from_decimal = result->Fetch<double>(0, 0);
REQUIRE(double_from_decimal == (double)-0.5);
auto string_from_decimal = result->Fetch<string>(0, 0);
REQUIRE(string_from_decimal == "-0.5");
}
TEST_CASE("Test custom_user_agent config", "[capi]") {
{
duckdb_database db;
duckdb_connection con;
duckdb_result result;
// Default custom_user_agent value
REQUIRE(duckdb_open_ext(NULL, &db, nullptr, NULL) != DuckDBError);
REQUIRE(duckdb_connect(db, &con) != DuckDBError);
duckdb_query(con, "PRAGMA user_agent", &result);
REQUIRE(duckdb_row_count(&result) == 1);
char *user_agent_value = duckdb_value_varchar(&result, 0, 0);
REQUIRE_THAT(user_agent_value, Catch::Matchers::Matches("duckdb/.*(.*) capi"));
duckdb_free(user_agent_value);
duckdb_destroy_result(&result);
duckdb_disconnect(&con);
duckdb_close(&db);
}
{
// Custom custom_user_agent value
duckdb_database db;
duckdb_connection con;
duckdb_result result_custom_user_agent;
duckdb_result result_full_user_agent;
duckdb_config config;
REQUIRE(duckdb_create_config(&config) != DuckDBError);
REQUIRE(duckdb_set_config(config, "custom_user_agent", "CUSTOM_STRING") != DuckDBError);
REQUIRE(duckdb_open_ext(NULL, &db, config, NULL) != DuckDBError);
REQUIRE(duckdb_connect(db, &con) != DuckDBError);
duckdb_query(con, "SELECT current_setting('custom_user_agent')", &result_custom_user_agent);
duckdb_query(con, "PRAGMA user_agent", &result_full_user_agent);
REQUIRE(duckdb_row_count(&result_custom_user_agent) == 1);
REQUIRE(duckdb_row_count(&result_full_user_agent) == 1);
char *custom_user_agent_value = duckdb_value_varchar(&result_custom_user_agent, 0, 0);
REQUIRE(string(custom_user_agent_value) == "CUSTOM_STRING");
char *full_user_agent_value = duckdb_value_varchar(&result_full_user_agent, 0, 0);
REQUIRE_THAT(full_user_agent_value, Catch::Matchers::Matches("duckdb/.*(.*) capi CUSTOM_STRING"));
duckdb_destroy_config(&config);
duckdb_free(custom_user_agent_value);
duckdb_free(full_user_agent_value);
duckdb_destroy_result(&result_custom_user_agent);
duckdb_destroy_result(&result_full_user_agent);
duckdb_disconnect(&con);
duckdb_close(&db);
}
}
TEST_CASE("Test unsupported types in the deprecated C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
string query_1 = R"EOF(
CREATE TABLE test(
id BIGINT,
one DECIMAL(18,3)[]
);
)EOF";
string query_2 = "INSERT INTO test VALUES (410, '[]');";
string query_3 = "INSERT INTO test VALUES (412, '[]');";
string query_4 = "SELECT id, one FROM test;";
REQUIRE_NO_FAIL(tester.Query(query_1));
REQUIRE_NO_FAIL(tester.Query(query_2));
REQUIRE_NO_FAIL(tester.Query(query_3));
// Passes, but does return invalid data for unsupported types.
auto result = tester.Query(query_4);
auto &result_c = result->InternalResult();
auto first_bigint_row = duckdb_value_string(&result_c, 0, 0).data;
REQUIRE(!string(first_bigint_row).compare("410"));
duckdb_free(first_bigint_row);
REQUIRE(duckdb_value_string(&result_c, 1, 0).data == nullptr);
auto second_bigint_row = duckdb_value_string(&result_c, 0, 1).data;
REQUIRE(!string(second_bigint_row).compare("412"));
duckdb_free(second_bigint_row);
REQUIRE(duckdb_value_string(&result_c, 1, 1).data == nullptr);
}

View File

@@ -0,0 +1,262 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test logical type creation with unsupported types", "[capi]") {
// Test duckdb_create_logical_type with unsupported types.
duckdb::vector<duckdb_type> unsupported_types = {
DUCKDB_TYPE_INVALID, DUCKDB_TYPE_DECIMAL, DUCKDB_TYPE_ENUM, DUCKDB_TYPE_LIST,
DUCKDB_TYPE_STRUCT, DUCKDB_TYPE_MAP, DUCKDB_TYPE_ARRAY, DUCKDB_TYPE_UNION,
};
for (const auto unsupported_type : unsupported_types) {
auto logical_type = duckdb_create_logical_type(unsupported_type);
REQUIRE(DUCKDB_TYPE_INVALID == duckdb_get_type_id(logical_type));
duckdb_destroy_logical_type(&logical_type);
}
}
TEST_CASE("Test INVALID, ANY and SQLNULL", "[capi]") {
auto sql_null_type = duckdb_create_logical_type(DUCKDB_TYPE_SQLNULL);
duckdb_destroy_logical_type(&sql_null_type);
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
auto result_type_id = duckdb_get_type_id(any_type);
REQUIRE(result_type_id == DUCKDB_TYPE_ANY);
result_type_id = duckdb_get_type_id(invalid_type);
REQUIRE(result_type_id == DUCKDB_TYPE_INVALID);
// LIST with ANY
auto list = duckdb_create_list_type(any_type);
result_type_id = duckdb_get_type_id(list);
REQUIRE(result_type_id == DUCKDB_TYPE_LIST);
duckdb_destroy_logical_type(&list);
// LIST with INVALID
list = duckdb_create_list_type(invalid_type);
result_type_id = duckdb_get_type_id(list);
REQUIRE(result_type_id == DUCKDB_TYPE_LIST);
duckdb_destroy_logical_type(&list);
// ARRAY with ANY
auto array = duckdb_create_array_type(any_type, 2);
result_type_id = duckdb_get_type_id(array);
REQUIRE(result_type_id == DUCKDB_TYPE_ARRAY);
duckdb_destroy_logical_type(&array);
// ARRAY with INVALID
array = duckdb_create_array_type(invalid_type, 2);
result_type_id = duckdb_get_type_id(array);
REQUIRE(result_type_id == DUCKDB_TYPE_ARRAY);
duckdb_destroy_logical_type(&array);
// MAP with ANY
auto map = duckdb_create_map_type(any_type, any_type);
result_type_id = duckdb_get_type_id(map);
REQUIRE(result_type_id == DUCKDB_TYPE_MAP);
duckdb_destroy_logical_type(&map);
// MAP with INVALID
map = duckdb_create_map_type(any_type, any_type);
result_type_id = duckdb_get_type_id(map);
REQUIRE(result_type_id == DUCKDB_TYPE_MAP);
duckdb_destroy_logical_type(&map);
// UNION with ANY and INVALID
std::vector<const char *> member_names {"any", "invalid"};
duckdb::vector<duckdb_logical_type> types = {any_type, invalid_type};
auto union_type = duckdb_create_union_type(types.data(), member_names.data(), member_names.size());
result_type_id = duckdb_get_type_id(union_type);
REQUIRE(result_type_id == DUCKDB_TYPE_UNION);
duckdb_destroy_logical_type(&union_type);
// Clean-up.
duckdb_destroy_logical_type(&any_type);
duckdb_destroy_logical_type(&invalid_type);
}
TEST_CASE("Test LIST and ARRAY with INVALID and ANY", "[capi]") {
auto int_type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
auto value = duckdb_create_int64(42);
duckdb::vector<duckdb_value> list_values {value, value};
auto int_list = duckdb_create_list_value(int_type, list_values.data(), list_values.size());
auto result = duckdb_get_varchar(int_list);
REQUIRE(string(result).compare("[42, 42]") == 0);
duckdb_free(result);
duckdb_destroy_value(&int_list);
auto int_array = duckdb_create_array_value(int_type, list_values.data(), list_values.size());
result = duckdb_get_varchar(int_array);
REQUIRE(string(result).compare("[42, 42]") == 0);
duckdb_free(result);
duckdb_destroy_value(&int_array);
auto invalid_list = duckdb_create_list_value(any_type, list_values.data(), list_values.size());
REQUIRE(invalid_list == nullptr);
auto invalid_array = duckdb_create_array_value(any_type, list_values.data(), list_values.size());
REQUIRE(invalid_array == nullptr);
auto any_list = duckdb_create_list_value(any_type, list_values.data(), list_values.size());
REQUIRE(any_list == nullptr);
auto any_array = duckdb_create_array_value(any_type, list_values.data(), list_values.size());
REQUIRE(any_array == nullptr);
// Clean-up.
duckdb_destroy_value(&value);
duckdb_destroy_logical_type(&int_type);
duckdb_destroy_logical_type(&any_type);
duckdb_destroy_logical_type(&invalid_type);
}
TEST_CASE("Test STRUCT with INVALID and ANY", "[capi]") {
auto int_type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
auto value = duckdb_create_int64(42);
duckdb::vector<duckdb_value> struct_values {value, value};
// Test duckdb_create_struct_type with ANY.
std::vector<const char *> member_names {"int", "other"};
duckdb::vector<duckdb_logical_type> types = {int_type, any_type};
auto struct_type = duckdb_create_struct_type(types.data(), member_names.data(), member_names.size());
REQUIRE(struct_type != nullptr);
// Test duckdb_create_struct_value with ANY.
auto struct_value = duckdb_create_struct_value(struct_type, struct_values.data());
REQUIRE(struct_value == nullptr);
duckdb_destroy_logical_type(&struct_type);
// Test duckdb_create_struct_type with INVALID.
types = {int_type, invalid_type};
struct_type = duckdb_create_struct_type(types.data(), member_names.data(), member_names.size());
REQUIRE(struct_type != nullptr);
// Test duckdb_create_struct_value with INVALID.
struct_value = duckdb_create_struct_value(struct_type, struct_values.data());
REQUIRE(struct_value == nullptr);
duckdb_destroy_logical_type(&struct_type);
// Clean-up.
duckdb_destroy_value(&value);
duckdb_destroy_logical_type(&int_type);
duckdb_destroy_logical_type(&any_type);
duckdb_destroy_logical_type(&invalid_type);
}
TEST_CASE("Test data chunk creation with INVALID and ANY types", "[capi]") {
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
auto list_type = duckdb_create_list_type(any_type);
// For each type, try to create a data chunk with that type.
std::vector<duckdb_logical_type> test_types = {any_type, invalid_type, list_type};
for (idx_t i = 0; i < test_types.size(); i++) {
duckdb_logical_type types[1];
types[0] = test_types[i];
auto data_chunk = duckdb_create_data_chunk(types, 1);
REQUIRE(data_chunk == nullptr);
}
// Clean-up.
duckdb_destroy_logical_type(&list_type);
duckdb_destroy_logical_type(&any_type);
duckdb_destroy_logical_type(&invalid_type);
}
void DummyScalar(duckdb_function_info, duckdb_data_chunk, duckdb_vector) {
}
static duckdb_scalar_function DummyScalarFunction() {
auto function = duckdb_create_scalar_function();
duckdb_scalar_function_set_name(function, "hello");
duckdb_scalar_function_set_function(function, DummyScalar);
return function;
}
static void TestScalarFunction(duckdb_scalar_function function, duckdb_connection connection) {
auto status = duckdb_register_scalar_function(connection, function);
REQUIRE(status == DuckDBError);
duckdb_destroy_scalar_function(&function);
}
TEST_CASE("Test scalar functions with INVALID and ANY types", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
auto int_type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
auto any_type = duckdb_create_logical_type(DUCKDB_TYPE_ANY);
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
// Set INVALID as a parameter.
auto function = DummyScalarFunction();
duckdb_scalar_function_add_parameter(function, invalid_type);
duckdb_scalar_function_set_return_type(function, int_type);
TestScalarFunction(function, tester.connection);
// Set INVALID as the return type.
function = DummyScalarFunction();
duckdb_scalar_function_set_return_type(function, invalid_type);
TestScalarFunction(function, tester.connection);
// Set ANY as the return type.
function = DummyScalarFunction();
duckdb_scalar_function_set_return_type(function, any_type);
TestScalarFunction(function, tester.connection);
// Clean-up.
duckdb_destroy_logical_type(&int_type);
duckdb_destroy_logical_type(&any_type);
duckdb_destroy_logical_type(&invalid_type);
}
void my_dummy_bind(duckdb_bind_info) {
}
void my_dummy_init(duckdb_init_info) {
}
void my_dummy_function(duckdb_function_info, duckdb_data_chunk) {
}
static duckdb_table_function DummyTableFunction() {
auto function = duckdb_create_table_function();
duckdb_table_function_set_name(function, "hello");
duckdb_table_function_set_bind(function, my_dummy_bind);
duckdb_table_function_set_init(function, my_dummy_init);
duckdb_table_function_set_function(function, my_dummy_function);
return function;
}
static void TestTableFunction(duckdb_table_function function, duckdb_connection connection) {
auto status = duckdb_register_table_function(connection, function);
REQUIRE(status == DuckDBError);
duckdb_destroy_table_function(&function);
}
TEST_CASE("Test table functions with INVALID and ANY types", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
auto invalid_type = duckdb_create_logical_type(DUCKDB_TYPE_INVALID);
// Set INVALID as a parameter.
auto function = DummyTableFunction();
duckdb_table_function_add_parameter(function, invalid_type);
TestTableFunction(function, tester.connection);
// Set INVALID as a named parameter.
function = DummyTableFunction();
duckdb_table_function_add_named_parameter(function, "my_parameter", invalid_type);
TestTableFunction(function, tester.connection);
duckdb_destroy_logical_type(&invalid_type);
}

View File

@@ -0,0 +1,140 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test casting columns in AppendDataChunk in C API", "[capi]") {
duckdb::vector<string> tables;
tables.push_back("CREATE TABLE test(i BIGINT, j VARCHAR);");
tables.push_back("CREATE TABLE test(i BIGINT, j BOOLEAN);");
for (idx_t i = 0; i < tables.size(); i++) {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(duckdb_vector_size() == STANDARD_VECTOR_SIZE);
tester.Query(tables[i]);
duckdb_logical_type types[2];
types[0] = duckdb_create_logical_type(DUCKDB_TYPE_SMALLINT);
types[1] = duckdb_create_logical_type(DUCKDB_TYPE_BOOLEAN);
auto data_chunk = duckdb_create_data_chunk(types, 2);
REQUIRE(data_chunk);
auto smallint_col = duckdb_data_chunk_get_vector(data_chunk, 0);
auto boolean_col = duckdb_data_chunk_get_vector(data_chunk, 1);
auto smallint_data = reinterpret_cast<int16_t *>(duckdb_vector_get_data(smallint_col));
smallint_data[0] = 15;
smallint_data[1] = -15;
auto boolean_data = reinterpret_cast<bool *>(duckdb_vector_get_data(boolean_col));
boolean_data[0] = false;
boolean_data[1] = true;
duckdb_data_chunk_set_size(data_chunk, 2);
duckdb_appender appender;
auto status = duckdb_appender_create(tester.connection, nullptr, "test", &appender);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBSuccess);
duckdb_appender_close(appender);
auto result = tester.Query("SELECT i, j FROM test;");
REQUIRE(result->Fetch<int64_t>(0, 0) == 15);
REQUIRE(result->Fetch<int64_t>(0, 1) == -15);
auto str = result->Fetch<string>(1, 0);
REQUIRE(str.compare("false") == 0);
str = result->Fetch<string>(1, 1);
REQUIRE(str.compare("true") == 0);
duckdb_appender_destroy(&appender);
duckdb_destroy_data_chunk(&data_chunk);
duckdb_destroy_logical_type(&types[0]);
duckdb_destroy_logical_type(&types[1]);
}
}
TEST_CASE("Test casting error in AppendDataChunk in C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(duckdb_vector_size() == STANDARD_VECTOR_SIZE);
tester.Query("CREATE TABLE test(i BIGINT, j BOOLEAN[]);");
duckdb_logical_type types[2];
types[0] = duckdb_create_logical_type(DUCKDB_TYPE_SMALLINT);
types[1] = duckdb_create_logical_type(DUCKDB_TYPE_BOOLEAN);
auto data_chunk = duckdb_create_data_chunk(types, 2);
REQUIRE(data_chunk);
auto smallint_col = duckdb_data_chunk_get_vector(data_chunk, 0);
auto boolean_col = duckdb_data_chunk_get_vector(data_chunk, 1);
auto smallint_data = reinterpret_cast<int16_t *>(duckdb_vector_get_data(smallint_col));
smallint_data[0] = 15;
smallint_data[1] = -15;
auto boolean_data = reinterpret_cast<bool *>(duckdb_vector_get_data(boolean_col));
boolean_data[0] = false;
boolean_data[1] = true;
duckdb_data_chunk_set_size(data_chunk, 2);
duckdb_appender appender;
auto status = duckdb_appender_create(tester.connection, nullptr, "test", &appender);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBError);
auto error_msg = duckdb_appender_error(appender);
REQUIRE(string(error_msg) == "type mismatch in AppendDataChunk, expected BOOLEAN[], got BOOLEAN for column 1");
duckdb_appender_close(appender);
duckdb_appender_destroy(&appender);
duckdb_destroy_data_chunk(&data_chunk);
duckdb_destroy_logical_type(&types[0]);
duckdb_destroy_logical_type(&types[1]);
}
TEST_CASE("Test casting timestamps in AppendDataChunk in C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(duckdb_vector_size() == STANDARD_VECTOR_SIZE);
tester.Query("CREATE TABLE test(i TIMESTAMP, j DATE);");
duckdb_logical_type types[2];
types[0] = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
types[1] = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
auto data_chunk = duckdb_create_data_chunk(types, 2);
REQUIRE(data_chunk);
auto ts_column = duckdb_data_chunk_get_vector(data_chunk, 0);
auto date_column = duckdb_data_chunk_get_vector(data_chunk, 1);
duckdb_vector_assign_string_element(ts_column, 0, "2017-07-23 13:10:11");
duckdb_vector_assign_string_element(date_column, 0, "1993-08-14");
duckdb_data_chunk_set_size(data_chunk, 1);
duckdb_appender appender;
auto status = duckdb_appender_create(tester.connection, nullptr, "test", &appender);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBSuccess);
duckdb_appender_close(appender);
auto result = tester.Query("SELECT i::VARCHAR, j::VARCHAR FROM test;");
auto str = result->Fetch<string>(0, 0);
REQUIRE(str.compare("2017-07-23 13:10:11") == 0);
str = result->Fetch<string>(1, 0);
REQUIRE(str.compare("1993-08-14") == 0);
duckdb_appender_destroy(&appender);
duckdb_destroy_data_chunk(&data_chunk);
duckdb_destroy_logical_type(&types[0]);
duckdb_destroy_logical_type(&types[1]);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,463 @@
#include "capi_tester.hpp"
#include "duckdb/common/arrow/arrow_appender.hpp"
#include "duckdb/common/arrow/arrow_converter.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test arrow in C API", "[capi][arrow]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_prepared_statement stmt = nullptr;
duckdb_arrow arrow_result = nullptr;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
SECTION("test rows changed") {
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE test(a INTEGER);"));
auto state = duckdb_query_arrow(tester.connection, "INSERT INTO test VALUES (1), (2);", &arrow_result);
REQUIRE(state == DuckDBSuccess);
REQUIRE(duckdb_arrow_rows_changed(arrow_result) == 2);
duckdb_destroy_arrow(&arrow_result);
REQUIRE_NO_FAIL(tester.Query("DROP TABLE test;"));
}
SECTION("test query arrow") {
auto state = duckdb_query_arrow(tester.connection, "SELECT 42 AS VALUE, [1,2,3,4,5] AS LST;", &arrow_result);
REQUIRE(state == DuckDBSuccess);
REQUIRE(duckdb_arrow_row_count(arrow_result) == 1);
REQUIRE(duckdb_arrow_column_count(arrow_result) == 2);
REQUIRE(duckdb_arrow_rows_changed(arrow_result) == 0);
// query the arrow schema
ArrowSchema arrow_schema;
arrow_schema.Init();
auto arrow_schema_ptr = &arrow_schema;
state = duckdb_query_arrow_schema(arrow_result, reinterpret_cast<duckdb_arrow_schema *>(&arrow_schema_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(string(arrow_schema.name) == "duckdb_query_result");
if (arrow_schema.release) {
arrow_schema.release(arrow_schema_ptr);
}
// query array data
ArrowArray arrow_array;
arrow_array.Init();
auto arrow_array_ptr = &arrow_array;
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&arrow_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(arrow_array.length == 1);
REQUIRE(arrow_array.release != nullptr);
arrow_array.release(arrow_array_ptr);
duckdb_arrow_array null_array = nullptr;
state = duckdb_query_arrow_array(arrow_result, &null_array);
REQUIRE(state == DuckDBSuccess);
REQUIRE(null_array == nullptr);
// destroy the arrow result
duckdb_destroy_arrow(&arrow_result);
}
SECTION("test multiple chunks") {
// create a table that consists of multiple chunks
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE test(a INTEGER);"));
REQUIRE_NO_FAIL(
tester.Query("INSERT INTO test SELECT i FROM (VALUES (1), (2), (3), (4), (5)) t(i), range(500);"));
auto state = duckdb_query_arrow(tester.connection, "SELECT CAST(a AS INTEGER) AS a FROM test ORDER BY a;",
&arrow_result);
REQUIRE(state == DuckDBSuccess);
// query the arrow schema
ArrowSchema arrow_schema;
arrow_schema.Init();
auto arrow_schema_ptr = &arrow_schema;
state = duckdb_query_arrow_schema(arrow_result, reinterpret_cast<duckdb_arrow_schema *>(&arrow_schema_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(arrow_schema.release != nullptr);
arrow_schema.release(arrow_schema_ptr);
int total_count = 0;
while (true) {
// query array data
ArrowArray arrow_array;
arrow_array.Init();
auto arrow_array_ptr = &arrow_array;
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&arrow_array_ptr));
REQUIRE(state == DuckDBSuccess);
if (arrow_array.length == 0) {
// nothing to release
REQUIRE(total_count == 2500);
break;
}
REQUIRE(arrow_array.length > 0);
total_count += arrow_array.length;
REQUIRE(arrow_array.release != nullptr);
arrow_array.release(arrow_array_ptr);
}
// destroy the arrow result
duckdb_destroy_arrow(&arrow_result);
REQUIRE_NO_FAIL(tester.Query("DROP TABLE test;"));
}
SECTION("test prepare query arrow") {
auto state = duckdb_prepare(tester.connection, "SELECT CAST($1 AS BIGINT)", &stmt);
REQUIRE(state == DuckDBSuccess);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_bind_int64(stmt, 1, 42) == DuckDBSuccess);
// prepare and execute the arrow schema
ArrowSchema prepared_schema;
prepared_schema.Init();
auto prepared_schema_ptr = &prepared_schema;
state = duckdb_prepared_arrow_schema(stmt, reinterpret_cast<duckdb_arrow_schema *>(&prepared_schema_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(string(prepared_schema.format) == "+s");
REQUIRE(duckdb_execute_prepared_arrow(stmt, nullptr) == DuckDBError);
REQUIRE(duckdb_execute_prepared_arrow(stmt, &arrow_result) == DuckDBSuccess);
REQUIRE(prepared_schema.release != nullptr);
prepared_schema.release(prepared_schema_ptr);
// query the arrow schema
ArrowSchema arrow_schema;
arrow_schema.Init();
auto arrow_schema_ptr = &arrow_schema;
state = duckdb_query_arrow_schema(arrow_result, reinterpret_cast<duckdb_arrow_schema *>(&arrow_schema_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(string(arrow_schema.format) == "+s");
REQUIRE(arrow_schema.release != nullptr);
arrow_schema.release(arrow_schema_ptr);
ArrowArray arrow_array;
arrow_array.Init();
auto arrow_array_ptr = &arrow_array;
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&arrow_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(arrow_array.length == 1);
REQUIRE(arrow_array.release);
arrow_array.release(arrow_array_ptr);
duckdb_destroy_arrow(&arrow_result);
duckdb_destroy_prepare(&stmt);
}
SECTION("test scan") {
const auto logical_types = duckdb::vector<LogicalType> {LogicalType(LogicalTypeId::INTEGER)};
const auto column_names = duckdb::vector<string> {"value"};
// arrow schema, release after use
ArrowSchema arrow_schema;
arrow_schema.Init();
auto arrow_schema_ptr = &arrow_schema;
ClientProperties options = (reinterpret_cast<Connection *>(tester.connection)->context->GetClientProperties());
duckdb::ArrowConverter::ToArrowSchema(arrow_schema_ptr, logical_types, column_names, options);
ArrowArray arrow_array;
arrow_array.Init();
auto arrow_array_ptr = &arrow_array;
SECTION("empty array") {
// Create an empty view with a `value` column.
string view_name = "foo_empty_table";
// arrow array scan, destroy out_stream after use
ArrowArrayStream *arrow_array_stream;
auto out_stream = reinterpret_cast<duckdb_arrow_stream *>(&arrow_array_stream);
auto state = duckdb_arrow_array_scan(tester.connection, view_name.c_str(),
reinterpret_cast<duckdb_arrow_schema>(arrow_schema_ptr),
reinterpret_cast<duckdb_arrow_array>(arrow_array_ptr), out_stream);
REQUIRE(state == DuckDBSuccess);
// get the created view from the DB
auto get_query = "SELECT * FROM " + view_name + ";";
state = duckdb_prepare(tester.connection, get_query.c_str(), &stmt);
REQUIRE(state == DuckDBSuccess);
REQUIRE(stmt != nullptr);
state = duckdb_execute_prepared_arrow(stmt, &arrow_result);
REQUIRE(state == DuckDBSuccess);
// recover the arrow array from the arrow result
ArrowArray out_array;
out_array.Init();
auto out_array_ptr = &out_array;
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&out_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(out_array.length == 0);
REQUIRE(out_array.release == nullptr);
duckdb_destroy_arrow_stream(out_stream);
REQUIRE(arrow_array.release == nullptr);
}
SECTION("big array") {
// Create a view with a `value` column containing 4096 values.
int num_buffers = 2, size = STANDARD_VECTOR_SIZE * num_buffers;
unordered_map<idx_t, const duckdb::shared_ptr<ArrowTypeExtensionData>> extension_type_cast;
ArrowAppender appender(logical_types, size, options, extension_type_cast);
Allocator allocator;
auto data_chunks = std::vector<DataChunk>(num_buffers);
for (int i = 0; i < num_buffers; i++) {
auto data_chunk = &data_chunks[i];
data_chunk->Initialize(allocator, logical_types, STANDARD_VECTOR_SIZE);
data_chunk->SetCardinality(STANDARD_VECTOR_SIZE);
for (idx_t row = 0; row < STANDARD_VECTOR_SIZE; row++) {
data_chunk->SetValue(0, row, duckdb::Value(i));
}
appender.Append(*data_chunk, 0, data_chunk->size(), data_chunk->size());
}
*arrow_array_ptr = appender.Finalize();
// Create the view.
string view_name = "foo_table";
// arrow array scan, destroy out_stream after use
ArrowArrayStream *arrow_array_stream;
auto out_stream = reinterpret_cast<duckdb_arrow_stream *>(&arrow_array_stream);
auto state = duckdb_arrow_array_scan(tester.connection, view_name.c_str(),
reinterpret_cast<duckdb_arrow_schema>(arrow_schema_ptr),
reinterpret_cast<duckdb_arrow_array>(arrow_array_ptr), out_stream);
REQUIRE(state == DuckDBSuccess);
// Get the created view from the DB.
auto get_query = "SELECT * FROM " + view_name + ";";
state = duckdb_prepare(tester.connection, get_query.c_str(), &stmt);
REQUIRE(state == DuckDBSuccess);
REQUIRE(stmt != nullptr);
state = duckdb_execute_prepared_arrow(stmt, &arrow_result);
REQUIRE(state == DuckDBSuccess);
// Recover the arrow array from the arrow result.
ArrowArray out_array;
out_array.Init();
auto out_array_ptr = &out_array;
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&out_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(out_array.length == STANDARD_VECTOR_SIZE);
REQUIRE(out_array.release != nullptr);
out_array.release(out_array_ptr);
out_array.Init();
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&out_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(out_array.length == STANDARD_VECTOR_SIZE);
REQUIRE(out_array.release != nullptr);
out_array.release(out_array_ptr);
out_array.Init();
state = duckdb_query_arrow_array(arrow_result, reinterpret_cast<duckdb_arrow_array *>(&out_array_ptr));
REQUIRE(state == DuckDBSuccess);
REQUIRE(out_array.length == 0);
REQUIRE(out_array.release == nullptr);
duckdb_destroy_arrow_stream(out_stream);
REQUIRE(arrow_array.release != nullptr);
}
SECTION("null schema") {
// Creating a view with a null schema should fail gracefully.
string view_name = "foo_empty_table_null_schema";
// arrow array scan, destroy out_stream after use
ArrowArrayStream *arrow_array_stream;
auto out_stream = reinterpret_cast<duckdb_arrow_stream *>(&arrow_array_stream);
auto state = duckdb_arrow_array_scan(tester.connection, view_name.c_str(), nullptr,
reinterpret_cast<duckdb_arrow_array>(arrow_array_ptr), out_stream);
REQUIRE(state == DuckDBError);
duckdb_destroy_arrow_stream(out_stream);
}
if (arrow_schema.release) {
arrow_schema.release(arrow_schema_ptr);
}
if (arrow_array.release) {
arrow_array.release(arrow_array_ptr);
}
duckdb_destroy_arrow(&arrow_result);
duckdb_destroy_prepare(&stmt);
}
// FIXME: needs test for scanning a fixed size list
// this likely requires nanoarrow to create the array to scan
}
TEST_CASE("Test C-API Arrow conversion functions", "[capi][arrow]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
SECTION("roundtrip: duckdb table -> arrow -> duckdb chunk, validate correctness") {
// 1. Create and populate table
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE big_table(i INTEGER);"));
REQUIRE_NO_FAIL(tester.Query("INSERT INTO big_table SELECT i FROM range(10000) tbl(i);"));
// 2. Query the table and fetch all results as data chunks
duckdb_result result;
REQUIRE(duckdb_query(tester.connection, "SELECT i FROM big_table ORDER BY i", &result) == DuckDBSuccess);
idx_t chunk_count = duckdb_result_chunk_count(result);
idx_t total_rows = 0;
std::vector<ArrowArray> arrow_arrays;
std::vector<duckdb_data_chunk> duckdb_chunks;
std::vector<int32_t> all_duckdb_values;
std::vector<int32_t> all_arrow_values;
// 3. For each chunk, convert to Arrow Array and collect values
for (idx_t chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) {
duckdb_data_chunk chunk = duckdb_result_get_chunk(result, chunk_idx);
duckdb_chunks.push_back(chunk); // for later roundtrip
idx_t chunk_size = duckdb_data_chunk_get_size(chunk);
total_rows += chunk_size;
auto vec = duckdb_data_chunk_get_vector(chunk, 0);
auto data = static_cast<int32_t *>(duckdb_vector_get_data(vec));
for (idx_t i = 0; i < chunk_size; i++) {
all_duckdb_values.push_back(data[i]);
}
ArrowArray duckdb_arrow_array;
duckdb_arrow_options arrow_options;
duckdb_connection_get_arrow_options(tester.connection, &arrow_options);
duckdb_error_data err = duckdb_data_chunk_to_arrow(arrow_options, chunk, &duckdb_arrow_array);
duckdb_destroy_arrow_options(&arrow_options);
REQUIRE(err == nullptr);
arrow_arrays.push_back(duckdb_arrow_array);
}
REQUIRE(total_rows == 10000);
REQUIRE(all_duckdb_values.size() == 10000);
// 4. Prepare Arrow schema for roundtrip
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_INTEGER);
duckdb_logical_type types[1] = {type};
const char *names[1] = {strdup("i")};
ArrowSchemaWrapper arrow_schema_wrapper;
duckdb_arrow_options arrow_options;
duckdb_connection_get_arrow_options(tester.connection, &arrow_options);
duckdb_error_data err =
duckdb_to_arrow_schema(arrow_options, types, names, 1, &arrow_schema_wrapper.arrow_schema);
duckdb_destroy_arrow_options(&arrow_options);
REQUIRE(err == nullptr);
duckdb_arrow_converted_schema converted_schema = nullptr;
// Convert schema (simulate real use)
err = duckdb_schema_from_arrow(tester.connection, &arrow_schema_wrapper.arrow_schema, &converted_schema);
REQUIRE(err == nullptr);
// 5. For each Arrow array, convert back to DuckDB chunk and validate
for (size_t idx = 0, offset = 0; idx < arrow_arrays.size(); idx++) {
ArrowArray *duckdb_arrow_array = &arrow_arrays[idx];
// Prepare output chunk
duckdb_data_chunk out_chunk;
// Convert Arrow array to DuckDB chunk
err = duckdb_data_chunk_from_arrow(tester.connection, duckdb_arrow_array, converted_schema, &out_chunk);
REQUIRE(err == nullptr);
idx_t chunk_size = duckdb_data_chunk_get_size(out_chunk);
auto vec = duckdb_data_chunk_get_vector(out_chunk, 0);
auto data = static_cast<int32_t *>(duckdb_vector_get_data(vec));
for (idx_t i = 0; i < chunk_size; i++, offset++) {
REQUIRE(data[i] == all_duckdb_values[offset]);
all_arrow_values.push_back(data[i]);
}
duckdb_destroy_data_chunk(&out_chunk);
}
REQUIRE(all_arrow_values.size() == 10000);
REQUIRE(all_arrow_values == all_duckdb_values);
// 6. Cleanup
free((void *)names[0]);
duckdb_destroy_arrow_converted_schema(&converted_schema);
for (auto arrow_array : arrow_arrays) {
if (arrow_array.release) {
arrow_array.release(&arrow_array);
}
}
for (auto chunk : duckdb_chunks) {
duckdb_destroy_data_chunk(&chunk);
}
duckdb_destroy_logical_type(&type);
duckdb_destroy_result(&result);
}
SECTION("C-API Arrow Tess Null pointer inputs") {
duckdb_error_data err;
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_INTEGER);
const char *names[1] = {strdup("i")};
// Test duckdb_to_arrow_schema
ArrowSchema duckdb_arrow_schema;
err = duckdb_to_arrow_schema(nullptr, &type, names, 1, &duckdb_arrow_schema);
duckdb_arrow_options arrow_options;
duckdb_connection_get_arrow_options(tester.connection, &arrow_options);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_to_arrow_schema(arrow_options, nullptr, names, 1, &duckdb_arrow_schema);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_to_arrow_schema(arrow_options, &type, nullptr, 1, &duckdb_arrow_schema);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
// Zero columns
err = duckdb_to_arrow_schema(arrow_options, &type, names, 0, &duckdb_arrow_schema);
REQUIRE(err == nullptr); // zero columns is allowed, but produces an empty schema
if (duckdb_arrow_schema.release) {
duckdb_arrow_schema.release(&duckdb_arrow_schema);
}
duckdb_destroy_logical_type(&type);
// Test duckdb_data_chunk_to_arrow
ArrowArray duckdb_arrow_array;
duckdb_destroy_error_data(&err);
err = duckdb_data_chunk_to_arrow(arrow_options, nullptr, &duckdb_arrow_array);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_data_chunk_to_arrow(nullptr, nullptr, &duckdb_arrow_array);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
// Test duckdb_schema_from_arrow
ArrowSchema schema;
duckdb_arrow_converted_schema converted_schema = nullptr;
err = duckdb_schema_from_arrow(nullptr, &schema, &converted_schema);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_schema_from_arrow(tester.connection, &schema, nullptr);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
// Test duckdb_data_chunk_from_arrow
ArrowArray arr;
duckdb_data_chunk out_chunk = nullptr;
err = duckdb_data_chunk_from_arrow(nullptr, &arr, converted_schema, &out_chunk);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_data_chunk_from_arrow(tester.connection, &arr, nullptr, &out_chunk);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
err = duckdb_data_chunk_from_arrow(tester.connection, &arr, converted_schema, nullptr);
REQUIRE(err != nullptr);
duckdb_destroy_error_data(&err);
duckdb_destroy_arrow_options(&arrow_options);
free((void *)names[0]);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,549 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test table_info incorrect 'is_valid' value for 'dflt_value' column", "[capi]") {
duckdb_database db;
duckdb_connection con;
duckdb_result result;
REQUIRE(duckdb_open(NULL, &db) != DuckDBError);
REQUIRE(duckdb_connect(db, &con) != DuckDBError);
//! Create a table with 40 columns
REQUIRE(duckdb_query(con,
"CREATE TABLE foo (c00 varchar, c01 varchar, c02 varchar, c03 varchar, c04 varchar, c05 "
"varchar, c06 varchar, c07 varchar, c08 varchar, c09 varchar, c10 varchar, c11 varchar, c12 "
"varchar, c13 varchar, c14 varchar, c15 varchar, c16 varchar, c17 varchar, c18 varchar, c19 "
"varchar, c20 varchar, c21 varchar, c22 varchar, c23 varchar, c24 varchar, c25 varchar, c26 "
"varchar, c27 varchar, c28 varchar, c29 varchar, c30 varchar, c31 varchar, c32 varchar, c33 "
"varchar, c34 varchar, c35 varchar, c36 varchar, c37 varchar, c38 varchar, c39 varchar);",
NULL) != DuckDBError);
//! Get table info for the created table
REQUIRE(duckdb_query(con, "PRAGMA table_info(foo);", &result) != DuckDBError);
//! Columns ({cid, name, type, notnull, dflt_value, pk}}
idx_t col_count = duckdb_column_count(&result);
REQUIRE(col_count == 6);
idx_t chunk_count = duckdb_result_chunk_count(result);
// Loop over the produced chunks
for (idx_t chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) {
duckdb_data_chunk chunk = duckdb_result_get_chunk(result, chunk_idx);
idx_t row_count = duckdb_data_chunk_get_size(chunk);
for (idx_t row_idx = 0; row_idx < row_count; row_idx++) {
for (idx_t col_idx = 0; col_idx < col_count; col_idx++) {
//! Get the column
duckdb_vector vector = duckdb_data_chunk_get_vector(chunk, col_idx);
uint64_t *validity = duckdb_vector_get_validity(vector);
bool is_valid = duckdb_validity_row_is_valid(validity, row_idx);
if (col_idx == 4) {
//'dflt_value' column
REQUIRE(is_valid == false);
}
}
}
duckdb_destroy_data_chunk(&chunk);
}
duckdb_destroy_result(&result);
duckdb_disconnect(&con);
duckdb_close(&db);
}
TEST_CASE("Test Logical Types C API", "[capi]") {
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
REQUIRE(type);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_BIGINT);
duckdb_destroy_logical_type(&type);
duckdb_destroy_logical_type(&type);
// list type
duckdb_logical_type elem_type = duckdb_create_logical_type(DUCKDB_TYPE_INTEGER);
duckdb_logical_type list_type = duckdb_create_list_type(elem_type);
REQUIRE(duckdb_get_type_id(list_type) == DUCKDB_TYPE_LIST);
duckdb_logical_type elem_type_dup = duckdb_list_type_child_type(list_type);
REQUIRE(elem_type_dup != elem_type);
REQUIRE(duckdb_get_type_id(elem_type_dup) == duckdb_get_type_id(elem_type));
duckdb_destroy_logical_type(&elem_type);
duckdb_destroy_logical_type(&list_type);
duckdb_destroy_logical_type(&elem_type_dup);
// map type
duckdb_logical_type key_type = duckdb_create_logical_type(DUCKDB_TYPE_SMALLINT);
duckdb_logical_type value_type = duckdb_create_logical_type(DUCKDB_TYPE_DOUBLE);
duckdb_logical_type map_type = duckdb_create_map_type(key_type, value_type);
REQUIRE(duckdb_get_type_id(map_type) == DUCKDB_TYPE_MAP);
duckdb_logical_type key_type_dup = duckdb_map_type_key_type(map_type);
duckdb_logical_type value_type_dup = duckdb_map_type_value_type(map_type);
REQUIRE(key_type_dup != key_type);
REQUIRE(value_type_dup != value_type);
REQUIRE(duckdb_get_type_id(key_type_dup) == duckdb_get_type_id(key_type));
REQUIRE(duckdb_get_type_id(value_type_dup) == duckdb_get_type_id(value_type));
duckdb_destroy_logical_type(&key_type);
duckdb_destroy_logical_type(&value_type);
duckdb_destroy_logical_type(&map_type);
duckdb_destroy_logical_type(&key_type_dup);
duckdb_destroy_logical_type(&value_type_dup);
duckdb_destroy_logical_type(nullptr);
}
TEST_CASE("Test DataChunk C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_state status;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(duckdb_vector_size() == STANDARD_VECTOR_SIZE);
// create column types
const idx_t COLUMN_COUNT = 3;
duckdb_type duckdbTypes[COLUMN_COUNT];
duckdbTypes[0] = DUCKDB_TYPE_BIGINT;
duckdbTypes[1] = DUCKDB_TYPE_SMALLINT;
duckdbTypes[2] = DUCKDB_TYPE_BLOB;
duckdb_logical_type types[COLUMN_COUNT];
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
types[i] = duckdb_create_logical_type(duckdbTypes[i]);
}
// create data chunk
auto data_chunk = duckdb_create_data_chunk(types, COLUMN_COUNT);
REQUIRE(data_chunk);
REQUIRE(duckdb_data_chunk_get_column_count(data_chunk) == COLUMN_COUNT);
// test duckdb_vector_get_column_type
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
auto vector = duckdb_data_chunk_get_vector(data_chunk, i);
auto type = duckdb_vector_get_column_type(vector);
REQUIRE(duckdb_get_type_id(type) == duckdbTypes[i]);
duckdb_destroy_logical_type(&type);
}
REQUIRE(duckdb_data_chunk_get_vector(data_chunk, 999) == nullptr);
REQUIRE(duckdb_data_chunk_get_vector(nullptr, 0) == nullptr);
REQUIRE(duckdb_vector_get_column_type(nullptr) == nullptr);
REQUIRE(duckdb_data_chunk_get_size(data_chunk) == 0);
REQUIRE(duckdb_data_chunk_get_size(nullptr) == 0);
// create table
tester.Query("CREATE TABLE test(i BIGINT, j SMALLINT, k BLOB)");
// use the appender to insert values using the data chunk API
duckdb_appender appender;
status = duckdb_appender_create(tester.connection, nullptr, "test", &appender);
REQUIRE(status == DuckDBSuccess);
// get the column types from the appender
REQUIRE(duckdb_appender_column_count(appender) == COLUMN_COUNT);
// test duckdb_appender_column_type
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
auto type = duckdb_appender_column_type(appender, i);
REQUIRE(duckdb_get_type_id(type) == duckdbTypes[i]);
duckdb_destroy_logical_type(&type);
}
// append BIGINT
auto bigint_vector = duckdb_data_chunk_get_vector(data_chunk, 0);
auto int64_ptr = (int64_t *)duckdb_vector_get_data(bigint_vector);
*int64_ptr = 42;
// append SMALLINT
auto smallint_vector = duckdb_data_chunk_get_vector(data_chunk, 1);
auto int16_ptr = (int16_t *)duckdb_vector_get_data(smallint_vector);
*int16_ptr = 84;
// append BLOB
string s = "this is my blob";
auto blob_vector = duckdb_data_chunk_get_vector(data_chunk, 2);
duckdb_vector_assign_string_element_len(blob_vector, 0, s.c_str(), s.length());
REQUIRE(duckdb_vector_get_data(nullptr) == nullptr);
duckdb_data_chunk_set_size(data_chunk, 1);
REQUIRE(duckdb_data_chunk_get_size(data_chunk) == 1);
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBSuccess);
REQUIRE(duckdb_append_data_chunk(appender, nullptr) == DuckDBError);
REQUIRE(duckdb_append_data_chunk(nullptr, data_chunk) == DuckDBError);
// append nulls
duckdb_data_chunk_reset(data_chunk);
REQUIRE(duckdb_data_chunk_get_size(data_chunk) == 0);
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
auto vector = duckdb_data_chunk_get_vector(data_chunk, i);
duckdb_vector_ensure_validity_writable(vector);
auto validity = duckdb_vector_get_validity(vector);
REQUIRE(duckdb_validity_row_is_valid(validity, 0));
duckdb_validity_set_row_validity(validity, 0, false);
REQUIRE(!duckdb_validity_row_is_valid(validity, 0));
}
duckdb_data_chunk_set_size(data_chunk, 1);
REQUIRE(duckdb_data_chunk_get_size(data_chunk) == 1);
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBSuccess);
REQUIRE(duckdb_vector_get_validity(nullptr) == nullptr);
duckdb_appender_destroy(&appender);
result = tester.Query("SELECT * FROM test");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->Fetch<int64_t>(0, 0) == 42);
REQUIRE(result->Fetch<int16_t>(1, 0) == 84);
REQUIRE(result->Fetch<string>(2, 0) == "this is my blob");
REQUIRE(result->IsNull(0, 1));
REQUIRE(result->IsNull(1, 1));
REQUIRE(result->IsNull(2, 1));
duckdb_data_chunk_reset(data_chunk);
duckdb_data_chunk_reset(nullptr);
REQUIRE(duckdb_data_chunk_get_size(data_chunk) == 0);
duckdb_destroy_data_chunk(&data_chunk);
duckdb_destroy_data_chunk(&data_chunk);
duckdb_destroy_data_chunk(nullptr);
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
duckdb_destroy_logical_type(&types[i]);
}
}
TEST_CASE("Test DataChunk varchar result fetch in C API", "[capi]") {
if (duckdb_vector_size() < 64) {
return;
}
duckdb_database database;
duckdb_connection connection;
duckdb_state state;
state = duckdb_open(nullptr, &database);
REQUIRE(state == DuckDBSuccess);
state = duckdb_connect(database, &connection);
REQUIRE(state == DuckDBSuccess);
constexpr const char *VARCHAR_TEST_QUERY = "select case when i != 0 and i % 42 = 0 then NULL else repeat(chr((65 + "
"(i % 26))::INTEGER), (4 + (i % 12))) end from range(5000) tbl(i);";
// fetch a small result set
duckdb_result result;
state = duckdb_query(connection, VARCHAR_TEST_QUERY, &result);
REQUIRE(state == DuckDBSuccess);
REQUIRE(duckdb_column_count(&result) == 1);
REQUIRE(duckdb_row_count(&result) == 5000);
REQUIRE(duckdb_result_error(&result) == nullptr);
idx_t expected_chunk_count = (5000 / STANDARD_VECTOR_SIZE) + (5000 % STANDARD_VECTOR_SIZE != 0);
REQUIRE(duckdb_result_chunk_count(result) == expected_chunk_count);
auto chunk = duckdb_result_get_chunk(result, 0);
REQUIRE(duckdb_data_chunk_get_column_count(chunk) == 1);
REQUIRE(STANDARD_VECTOR_SIZE < 5000);
REQUIRE(duckdb_data_chunk_get_size(chunk) == STANDARD_VECTOR_SIZE);
duckdb_destroy_data_chunk(&chunk);
idx_t tuple_index = 0;
auto chunk_amount = duckdb_result_chunk_count(result);
for (idx_t chunk_index = 0; chunk_index < chunk_amount; chunk_index++) {
chunk = duckdb_result_get_chunk(result, chunk_index);
// Our result only has one column
auto vector = duckdb_data_chunk_get_vector(chunk, 0);
auto validity = duckdb_vector_get_validity(vector);
auto string_data = (duckdb_string_t *)duckdb_vector_get_data(vector);
auto tuples_in_chunk = duckdb_data_chunk_get_size(chunk);
for (idx_t i = 0; i < tuples_in_chunk; i++, tuple_index++) {
if (!duckdb_validity_row_is_valid(validity, i)) {
// This entry is NULL
REQUIRE((tuple_index != 0 && tuple_index % 42 == 0));
continue;
}
idx_t expected_length = (tuple_index % 12) + 4;
char expected_character = (tuple_index % 26) + 'A';
// TODO: how does the c-api handle non-flat vectors?
auto tuple = string_data[i];
auto length = tuple.value.inlined.length;
REQUIRE(length == expected_length);
if (duckdb_string_is_inlined(tuple)) {
// The data is small enough to fit in the string_t, it does not have a separate allocation
for (idx_t string_index = 0; string_index < length; string_index++) {
REQUIRE(tuple.value.inlined.inlined[string_index] == expected_character);
}
} else {
for (idx_t string_index = 0; string_index < length; string_index++) {
REQUIRE(tuple.value.pointer.ptr[string_index] == expected_character);
}
}
}
duckdb_destroy_data_chunk(&chunk);
}
duckdb_destroy_result(&result);
duckdb_disconnect(&connection);
duckdb_close(&database);
}
TEST_CASE("Test DataChunk result fetch in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
if (duckdb_vector_size() < 64) {
return;
}
REQUIRE(tester.OpenDatabase(nullptr));
// fetch a small result set
result = tester.Query("SELECT CASE WHEN i=1 THEN NULL ELSE i::INTEGER END i FROM range(3) tbl(i)");
REQUIRE(NO_FAIL(*result));
REQUIRE(result->ColumnCount() == 1);
REQUIRE(result->row_count() == 3);
REQUIRE(result->ErrorMessage() == nullptr);
// fetch the first chunk
REQUIRE(result->ChunkCount() == 1);
auto chunk = result->FetchChunk(0);
REQUIRE(chunk);
REQUIRE(chunk->ColumnCount() == 1);
REQUIRE(chunk->size() == 3);
auto data = (int32_t *)chunk->GetData(0);
auto validity = chunk->GetValidity(0);
REQUIRE(data[0] == 0);
REQUIRE(data[2] == 2);
REQUIRE(duckdb_validity_row_is_valid(validity, 0));
REQUIRE(!duckdb_validity_row_is_valid(validity, 1));
REQUIRE(duckdb_validity_row_is_valid(validity, 2));
// after fetching a chunk, we cannot use the old API anymore
REQUIRE(result->ColumnData<int32_t>(0) == nullptr);
REQUIRE(result->Fetch<int32_t>(0, 1) == 0);
// result set is exhausted!
chunk = result->FetchChunk(1);
REQUIRE(!chunk);
}
TEST_CASE("Test duckdb_result_return_type", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
result = tester.Query("CREATE TABLE t (id INT)");
REQUIRE(duckdb_result_return_type(result->InternalResult()) == DUCKDB_RESULT_TYPE_NOTHING);
result = tester.Query("INSERT INTO t VALUES (42)");
REQUIRE(duckdb_result_return_type(result->InternalResult()) == DUCKDB_RESULT_TYPE_CHANGED_ROWS);
result = tester.Query("FROM t");
REQUIRE(duckdb_result_return_type(result->InternalResult()) == DUCKDB_RESULT_TYPE_QUERY_RESULT);
}
TEST_CASE("Test DataChunk populate ListVector in C API", "[capi]") {
if (duckdb_vector_size() < 3) {
return;
}
REQUIRE(duckdb_list_vector_reserve(nullptr, 100) == duckdb_state::DuckDBError);
REQUIRE(duckdb_list_vector_set_size(nullptr, 200) == duckdb_state::DuckDBError);
auto elem_type = duckdb_create_logical_type(duckdb_type::DUCKDB_TYPE_INTEGER);
auto list_type = duckdb_create_list_type(elem_type);
duckdb_logical_type schema[] = {list_type};
auto chunk = duckdb_create_data_chunk(schema, 1);
auto list_vector = duckdb_data_chunk_get_vector(chunk, 0);
duckdb_data_chunk_set_size(chunk, 3);
REQUIRE(duckdb_list_vector_reserve(list_vector, 123) == duckdb_state::DuckDBSuccess);
REQUIRE(duckdb_list_vector_get_size(list_vector) == 0);
auto child = duckdb_list_vector_get_child(list_vector);
for (int i = 0; i < 123; i++) {
((int *)duckdb_vector_get_data(child))[i] = i;
}
REQUIRE(duckdb_list_vector_set_size(list_vector, 123) == duckdb_state::DuckDBSuccess);
REQUIRE(duckdb_list_vector_get_size(list_vector) == 123);
auto entries = (duckdb_list_entry *)duckdb_vector_get_data(list_vector);
entries[0].offset = 0;
entries[0].length = 20;
entries[1].offset = 20;
entries[1].length = 80;
entries[2].offset = 100;
entries[2].length = 23;
auto child_data = (int *)duckdb_vector_get_data(child);
int count = 0;
for (idx_t i = 0; i < duckdb_data_chunk_get_size(chunk); i++) {
for (idx_t j = 0; j < entries[i].length; j++) {
REQUIRE(child_data[entries[i].offset + j] == count);
count++;
}
}
auto &vector = (Vector &)(*list_vector);
for (int i = 0; i < 123; i++) {
REQUIRE(ListVector::GetEntry(vector).GetValue(i) == i);
}
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_logical_type(&list_type);
duckdb_destroy_logical_type(&elem_type);
}
TEST_CASE("Test DataChunk populate ArrayVector in C API", "[capi]") {
auto elem_type = duckdb_create_logical_type(duckdb_type::DUCKDB_TYPE_INTEGER);
auto array_type = duckdb_create_array_type(elem_type, 3);
duckdb_logical_type schema[] = {array_type};
auto chunk = duckdb_create_data_chunk(schema, 1);
duckdb_data_chunk_set_size(chunk, 2);
auto array_vector = duckdb_data_chunk_get_vector(chunk, 0);
auto child = duckdb_array_vector_get_child(array_vector);
for (int i = 0; i < 6; i++) {
((int *)duckdb_vector_get_data(child))[i] = i;
}
auto vec = (Vector &)(*array_vector);
for (int i = 0; i < 2; i++) {
auto child_vals = ArrayValue::GetChildren(vec.GetValue(i));
for (int j = 0; j < 3; j++) {
REQUIRE(child_vals[j].GetValue<int>() == i * 3 + j);
}
}
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_logical_type(&array_type);
duckdb_destroy_logical_type(&elem_type);
}
TEST_CASE("Test PK violation in the C API appender", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(duckdb_vector_size() == STANDARD_VECTOR_SIZE);
// Create column types.
const idx_t COLUMN_COUNT = 1;
duckdb_logical_type types[COLUMN_COUNT];
types[0] = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
// Create data chunk.
auto data_chunk = duckdb_create_data_chunk(types, COLUMN_COUNT);
auto bigint_vector = duckdb_data_chunk_get_vector(data_chunk, 0);
auto int64_ptr = reinterpret_cast<int64_t *>(duckdb_vector_get_data(bigint_vector));
int64_ptr[0] = 42;
int64_ptr[1] = 42;
duckdb_data_chunk_set_size(data_chunk, 2);
// Use the appender to append the data chunk.
tester.Query("CREATE TABLE test(i BIGINT PRIMARY KEY)");
duckdb_appender appender;
REQUIRE(duckdb_appender_create(tester.connection, nullptr, "test", &appender) == DuckDBSuccess);
// We only flush when destroying the appender. Thus, we expect this to succeed, as we only
// detect constraint violations when flushing the results.
REQUIRE(duckdb_append_data_chunk(appender, data_chunk) == DuckDBSuccess);
// duckdb_appender_close attempts to flush the data and fails.
auto state = duckdb_appender_close(appender);
REQUIRE(state == DuckDBError);
auto error = duckdb_appender_error(appender);
REQUIRE(duckdb::StringUtil::Contains(error, "PRIMARY KEY or UNIQUE constraint violation"));
// Destroy the appender despite the error to avoid leaks.
state = duckdb_appender_destroy(&appender);
REQUIRE(state == DuckDBError);
// Clean-up.
duckdb_destroy_data_chunk(&data_chunk);
for (idx_t i = 0; i < COLUMN_COUNT; i++) {
duckdb_destroy_logical_type(&types[i]);
}
// Ensure that no rows were appended.
result = tester.Query("SELECT * FROM test;");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 0);
// Try again by appending rows and flushing.
REQUIRE(duckdb_appender_create(tester.connection, nullptr, "test", &appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_begin_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_append_int64(appender, 42) == DuckDBSuccess);
REQUIRE(duckdb_appender_end_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_begin_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_append_int64(appender, 42) == DuckDBSuccess);
REQUIRE(duckdb_appender_end_row(appender) == DuckDBSuccess);
state = duckdb_appender_flush(appender);
REQUIRE(state == DuckDBError);
error = duckdb_appender_error(appender);
REQUIRE(duckdb::StringUtil::Contains(error, "PRIMARY KEY or UNIQUE constraint violation"));
REQUIRE(duckdb_appender_destroy(&appender) == DuckDBError);
// Ensure that only the last row was appended.
result = tester.Query("SELECT * FROM test;");
REQUIRE_NO_FAIL(*result);
REQUIRE(result->row_count() == 0);
}
TEST_CASE("Test DataChunk write BLOB", "[capi]") {
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BLOB);
REQUIRE(type);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_BLOB);
duckdb_logical_type types[] = {type};
auto chunk = duckdb_create_data_chunk(types, 1);
duckdb_data_chunk_set_size(chunk, 1);
duckdb_vector vector = duckdb_data_chunk_get_vector(chunk, 0);
auto column_type = duckdb_vector_get_column_type(vector);
REQUIRE(duckdb_get_type_id(column_type) == DUCKDB_TYPE_BLOB);
duckdb_destroy_logical_type(&column_type);
uint8_t bytes[] = {0x80, 0x00, 0x01, 0x2a};
duckdb_vector_assign_string_element_len(vector, 0, (const char *)bytes, 4);
auto string_data = static_cast<duckdb_string_t *>(duckdb_vector_get_data(vector));
auto string_value = duckdb_string_t_data(string_data);
REQUIRE(duckdb_string_t_length(*string_data) == 4);
REQUIRE(string_value[0] == (char)0x80);
REQUIRE(string_value[1] == (char)0x00);
REQUIRE(string_value[2] == (char)0x01);
REQUIRE(string_value[3] == (char)0x2a);
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_logical_type(&type);
}
TEST_CASE("Test DataChunk write BIGNUM", "[capi]") {
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGNUM);
REQUIRE(type);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_BIGNUM);
duckdb_logical_type types[] = {type};
auto chunk = duckdb_create_data_chunk(types, 1);
duckdb_data_chunk_set_size(chunk, 1);
duckdb_vector vector = duckdb_data_chunk_get_vector(chunk, 0);
auto column_type = duckdb_vector_get_column_type(vector);
REQUIRE(duckdb_get_type_id(column_type) == DUCKDB_TYPE_BIGNUM);
duckdb_destroy_logical_type(&column_type);
uint8_t bytes[] = {0x80, 0x00, 0x01, 0x2a}; // BIGNUM 42
duckdb_vector_assign_string_element_len(vector, 0, (const char *)bytes, 4);
auto string_data = static_cast<duckdb_string_t *>(duckdb_vector_get_data(vector));
auto string_value = duckdb_string_t_data(string_data);
REQUIRE(duckdb_string_t_length(*string_data) == 4);
REQUIRE(string_value[0] == (char)0x80);
REQUIRE(string_value[1] == (char)0x00);
REQUIRE(string_value[2] == (char)0x01);
REQUIRE(string_value[3] == (char)0x2a);
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_logical_type(&type);
}

View File

@@ -0,0 +1,86 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test extract statements in C API", "[capi]") {
CAPITester tester;
duckdb_result res;
duckdb_extracted_statements stmts = nullptr;
duckdb_state status;
const char *error;
duckdb_prepared_statement prepared = nullptr;
REQUIRE(tester.OpenDatabase(nullptr));
idx_t size = duckdb_extract_statements(tester.connection,
"CREATE TABLE tbl (col INT); INSERT INTO tbl VALUES (1), (2), (3), (4); "
"SELECT COUNT(col) FROM tbl WHERE col > $1",
&stmts);
REQUIRE(size == 3);
REQUIRE(stmts != nullptr);
for (idx_t i = 0; i + 1 < size; i++) {
status = duckdb_prepare_extracted_statement(tester.connection, stmts, i, &prepared);
REQUIRE(status == DuckDBSuccess);
status = duckdb_execute_prepared(prepared, &res);
REQUIRE(status == DuckDBSuccess);
duckdb_destroy_prepare(&prepared);
duckdb_destroy_result(&res);
}
duckdb_prepared_statement stmt = nullptr;
status = duckdb_prepare_extracted_statement(tester.connection, stmts, size - 1, &stmt);
REQUIRE(status == DuckDBSuccess);
duckdb_bind_int32(stmt, 1, 1);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 3);
duckdb_destroy_prepare(&stmt);
duckdb_destroy_result(&res);
duckdb_destroy_extracted(&stmts);
// test empty statement is not an error
size = duckdb_extract_statements(tester.connection, "", &stmts);
REQUIRE(size == 0);
error = duckdb_extract_statements_error(stmts);
REQUIRE(error == nullptr);
duckdb_destroy_extracted(&stmts);
// test incorrect statement cannot be extracted
size = duckdb_extract_statements(tester.connection, "This is not valid SQL", &stmts);
REQUIRE(size == 0);
error = duckdb_extract_statements_error(stmts);
REQUIRE(error != nullptr);
duckdb_destroy_extracted(&stmts);
// test out of bounds
size = duckdb_extract_statements(tester.connection, "SELECT CAST($1 AS BIGINT)", &stmts);
REQUIRE(size == 1);
status = duckdb_prepare_extracted_statement(tester.connection, stmts, 2, &prepared);
REQUIRE(status == DuckDBError);
duckdb_destroy_extracted(&stmts);
}
TEST_CASE("Test invalid PRAGMA in C API", "[capi]") {
duckdb_database db;
duckdb_connection con;
const char *err_msg;
REQUIRE(duckdb_open(nullptr, &db) == DuckDBSuccess);
REQUIRE(duckdb_connect(db, &con) == DuckDBSuccess);
duckdb_extracted_statements stmts;
auto size = duckdb_extract_statements(con, "PRAGMA something;", &stmts);
REQUIRE(size == 0);
err_msg = duckdb_extract_statements_error(stmts);
REQUIRE(err_msg != nullptr);
REQUIRE(string(err_msg).find("Catalog Error") != std::string::npos);
duckdb_destroy_extracted(&stmts);
duckdb_disconnect(&con);
duckdb_close(&db);
}

View File

@@ -0,0 +1,67 @@
#include "capi_tester.hpp"
#include "duckdb.h"
#include <chrono>
#include <thread>
using namespace duckdb;
using namespace std;
static void background_thread_connect(duckdb_instance_cache instance_cache, const char *path) {
try {
duckdb_database out_database;
auto state = duckdb_get_or_create_from_cache(instance_cache, path, &out_database, nullptr, nullptr);
REQUIRE(state == DuckDBSuccess);
duckdb_close(&out_database);
} catch (std::exception &ex) {
FAIL(ex.what());
}
}
TEST_CASE("Test the database instance cache in the C API", "[api][.]") {
auto instance_cache = duckdb_create_instance_cache();
for (idx_t i = 0; i < 30; i++) {
auto path = TestCreatePath("shared_db.db");
duckdb_database shared_out_database;
auto state =
duckdb_get_or_create_from_cache(instance_cache, path.c_str(), &shared_out_database, nullptr, nullptr);
REQUIRE(state == DuckDBSuccess);
thread background_thread(background_thread_connect, instance_cache, path.c_str());
duckdb_close(&shared_out_database);
background_thread.join();
TestDeleteFile(path);
REQUIRE(1);
}
duckdb_destroy_instance_cache(&instance_cache);
}
TEST_CASE("Test the database instance cache in the C API with a null path", "[capi]") {
auto instance_cache = duckdb_create_instance_cache();
duckdb_database db;
auto state = duckdb_get_or_create_from_cache(instance_cache, nullptr, &db, nullptr, nullptr);
REQUIRE(state == DuckDBSuccess);
duckdb_close(&db);
duckdb_destroy_instance_cache(&instance_cache);
}
TEST_CASE("Test the database instance cache in the C API with an empty path", "[capi]") {
auto instance_cache = duckdb_create_instance_cache();
duckdb_database db;
auto state = duckdb_get_or_create_from_cache(instance_cache, "", &db, nullptr, nullptr);
REQUIRE(state == DuckDBSuccess);
duckdb_close(&db);
duckdb_destroy_instance_cache(&instance_cache);
}
TEST_CASE("Test the database instance cache in the C API with a memory path", "[capi]") {
auto instance_cache = duckdb_create_instance_cache();
duckdb_database db;
auto state = duckdb_get_or_create_from_cache(instance_cache, ":memory:", &db, nullptr, nullptr);
REQUIRE(state == DuckDBSuccess);
duckdb_close(&db);
duckdb_destroy_instance_cache(&instance_cache);
}

View File

@@ -0,0 +1,30 @@
#include "capi_tester.hpp"
#include "duckdb.h"
using namespace duckdb;
using namespace std;
TEST_CASE("Test pending statements in C API", "[capi]") {
CAPITester tester;
CAPIPrepared prepared;
CAPIPending pending;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(prepared.Prepare(tester, "SELECT SUM(i) FROM range(1000000) tbl(i)"));
REQUIRE(pending.Pending(prepared));
while (true) {
auto state = pending.ExecuteTask();
REQUIRE(state != DUCKDB_PENDING_ERROR);
if (duckdb_pending_execution_is_finished(state)) {
break;
}
}
result = pending.Execute();
REQUIRE(result);
REQUIRE(!result->HasError());
REQUIRE(result->Fetch<int64_t>(0, 0) == 499999500000LL);
}

View File

@@ -0,0 +1,609 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test prepared statements in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_result res;
duckdb_prepared_statement stmt = nullptr;
duckdb_state status;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
status = duckdb_prepare(tester.connection, "SELECT CAST($1 AS BIGINT)", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 1);
REQUIRE(duckdb_prepared_statement_column_type(stmt, 0) == DUCKDB_TYPE_BIGINT);
auto logical_type = duckdb_prepared_statement_column_logical_type(stmt, 0);
REQUIRE(logical_type);
REQUIRE(duckdb_get_type_id(logical_type) == DUCKDB_TYPE_BIGINT);
duckdb_destroy_logical_type(&logical_type);
status = duckdb_bind_boolean(stmt, 1, true);
REQUIRE(status == DuckDBSuccess);
// Parameter index 2 is out of bounds
status = duckdb_bind_boolean(stmt, 2, true);
REQUIRE(status == DuckDBError);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 1);
duckdb_destroy_result(&res);
duckdb_bind_int8(stmt, 1, 8);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 8);
duckdb_destroy_result(&res);
duckdb_bind_int16(stmt, 1, 16);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 16);
duckdb_destroy_result(&res);
duckdb_bind_int32(stmt, 1, 32);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 32);
duckdb_destroy_result(&res);
duckdb_bind_int64(stmt, 1, 64);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 64);
duckdb_destroy_result(&res);
duckdb_bind_hugeint(stmt, 1, duckdb_double_to_hugeint(64));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_hugeint_to_double(duckdb_value_hugeint(&res, 0, 0)) == 64.0);
duckdb_destroy_result(&res);
duckdb_bind_uhugeint(stmt, 1, duckdb_double_to_uhugeint(64));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_uhugeint_to_double(duckdb_value_uhugeint(&res, 0, 0)) == 64.0);
duckdb_destroy_result(&res);
// Fetching a DECIMAL from a non-DECIMAL result returns 0
duckdb_decimal decimal = duckdb_double_to_decimal(634.3453, 7, 4);
duckdb_bind_decimal(stmt, 1, decimal);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
duckdb_decimal result_decimal = duckdb_value_decimal(&res, 0, 0);
REQUIRE(result_decimal.scale == 0);
REQUIRE(result_decimal.width == 0);
REQUIRE(result_decimal.value.upper == 0);
REQUIRE(result_decimal.value.lower == 0);
duckdb_destroy_result(&res);
duckdb_bind_uint8(stmt, 1, 8);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_uint8(&res, 0, 0) == 8);
duckdb_destroy_result(&res);
duckdb_bind_uint16(stmt, 1, 8);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_uint16(&res, 0, 0) == 8);
duckdb_destroy_result(&res);
duckdb_bind_uint32(stmt, 1, 8);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_uint32(&res, 0, 0) == 8);
duckdb_destroy_result(&res);
duckdb_bind_uint64(stmt, 1, 8);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_uint64(&res, 0, 0) == 8);
duckdb_destroy_result(&res);
duckdb_bind_float(stmt, 1, 42.0);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 42);
duckdb_destroy_result(&res);
duckdb_bind_double(stmt, 1, 43.0);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 43);
duckdb_destroy_result(&res);
REQUIRE(duckdb_bind_float(stmt, 1, NAN) == DuckDBSuccess);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBError);
duckdb_destroy_result(&res);
REQUIRE(duckdb_bind_double(stmt, 1, NAN) == DuckDBSuccess);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBError);
duckdb_destroy_result(&res);
REQUIRE(duckdb_bind_varchar(stmt, 1, "\x80\x40\x41") == DuckDBError);
duckdb_bind_varchar(stmt, 1, "44");
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 44);
duckdb_destroy_result(&res);
duckdb_bind_null(stmt, 1);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_nullmask_data(&res, 0)[0] == true);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
// again to make sure it does not crash
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
status = duckdb_prepare(tester.connection, "SELECT CAST($1 AS VARCHAR)", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
// invalid unicode
REQUIRE(duckdb_bind_varchar_length(stmt, 1, "\x80", 1) == DuckDBError);
// we can bind null values, though!
REQUIRE(duckdb_bind_varchar_length(stmt, 1, "\x00\x40\x41", 3) == DuckDBSuccess);
duckdb_bind_varchar_length(stmt, 1, "hello world", 5);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
auto value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "hello");
REQUIRE(duckdb_value_int8(&res, 0, 0) == 0);
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_bind_blob(stmt, 1, "hello\0world", 11);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "hello\\x00world");
REQUIRE(duckdb_value_int8(&res, 0, 0) == 0);
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_date_struct date_struct;
date_struct.year = 1992;
date_struct.month = 9;
date_struct.day = 3;
duckdb_bind_date(stmt, 1, duckdb_to_date(date_struct));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "1992-09-03");
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_time_struct time_struct;
time_struct.hour = 12;
time_struct.min = 22;
time_struct.sec = 33;
time_struct.micros = 123400;
duckdb_bind_time(stmt, 1, duckdb_to_time(time_struct));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "12:22:33.1234");
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_timestamp_struct ts;
ts.date = date_struct;
ts.time = time_struct;
duckdb_bind_timestamp(stmt, 1, duckdb_to_timestamp(ts));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "1992-09-03 12:22:33.1234");
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_bind_timestamp_tz(stmt, 1, duckdb_to_timestamp(ts));
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(StringUtil::Contains(string(value), "1992-09"));
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_interval interval;
interval.months = 3;
interval.days = 0;
interval.micros = 0;
duckdb_bind_interval(stmt, 1, interval);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
value = duckdb_value_varchar(&res, 0, 0);
REQUIRE(string(value) == "3 months");
duckdb_free(value);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
status = duckdb_query(tester.connection, "CREATE TABLE a (i INTEGER)", NULL);
REQUIRE(status == DuckDBSuccess);
status = duckdb_prepare(tester.connection, "INSERT INTO a VALUES (?)", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_nparams(nullptr) == 0);
REQUIRE(duckdb_nparams(stmt) == 1);
REQUIRE(duckdb_param_type(nullptr, 0) == DUCKDB_TYPE_INVALID);
REQUIRE(duckdb_param_type(stmt, 0) == DUCKDB_TYPE_INVALID);
REQUIRE(duckdb_param_type(stmt, 1) == DUCKDB_TYPE_INTEGER);
REQUIRE(duckdb_param_type(stmt, 2) == DUCKDB_TYPE_INVALID);
for (int32_t i = 1; i <= 1000; i++) {
duckdb_bind_int32(stmt, 1, i);
status = duckdb_execute_prepared(stmt, nullptr);
REQUIRE(status == DuckDBSuccess);
}
duckdb_destroy_prepare(&stmt);
status = duckdb_prepare(tester.connection, "SELECT SUM(i)*$1-$2 FROM a", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
// clear bindings
duckdb_bind_int32(stmt, 1, 2);
REQUIRE(duckdb_clear_bindings(stmt) == DuckDBSuccess);
// bind again will succeed
duckdb_bind_int32(stmt, 1, 2);
duckdb_bind_int32(stmt, 2, 1000);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int32(&res, 0, 0) == 1000000);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
// not-so-happy path
status = duckdb_prepare(tester.connection, "SELECT XXXXX", &stmt);
REQUIRE(status == DuckDBError);
duckdb_destroy_prepare(&stmt);
status = duckdb_prepare(tester.connection, "SELECT CAST($1 AS INTEGER)", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 1);
REQUIRE(duckdb_prepared_statement_column_type(stmt, 0) == DUCKDB_TYPE_INTEGER);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBError);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
// test duckdb_malloc explicitly
auto malloced_data = duckdb_malloc(100);
memcpy(malloced_data, "hello\0", 6);
REQUIRE(string((char *)malloced_data) == "hello");
duckdb_free(malloced_data);
status = duckdb_prepare(tester.connection, "SELECT sum(i) FROM a WHERE i > ?", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
REQUIRE(duckdb_nparams(stmt) == 1);
REQUIRE(duckdb_param_type(nullptr, 0) == DUCKDB_TYPE_INVALID);
REQUIRE(duckdb_param_type(stmt, 1) == DUCKDB_TYPE_INTEGER);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 1);
REQUIRE(duckdb_prepared_statement_column_type(stmt, 0) == DUCKDB_TYPE_HUGEINT);
duckdb_destroy_prepare(&stmt);
}
TEST_CASE("Test duckdb_prepared_statement return value APIs", "[capi]") {
duckdb_database db;
duckdb_connection conn;
duckdb_prepared_statement stmt;
REQUIRE(duckdb_open("", &db) == DuckDBSuccess);
REQUIRE(duckdb_connect(db, &conn) == DuckDBSuccess);
// Unambiguous return column types
REQUIRE(duckdb_prepare(conn, "select $1::TEXT, $2::integer, $3::BOOLEAN, $4::FLOAT, $5::DOUBLE", &stmt) ==
DuckDBSuccess);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 5);
auto expected_types = {DUCKDB_TYPE_VARCHAR, DUCKDB_TYPE_INTEGER, DUCKDB_TYPE_BOOLEAN, DUCKDB_TYPE_FLOAT,
DUCKDB_TYPE_DOUBLE};
for (idx_t i = 0; i < 5; i++) {
REQUIRE(duckdb_prepared_statement_column_type(stmt, i) == *next(expected_types.begin(), i));
auto logical_type = duckdb_prepared_statement_column_logical_type(stmt, i);
REQUIRE(logical_type);
REQUIRE(duckdb_get_type_id(logical_type) == *next(expected_types.begin(), i));
duckdb_destroy_logical_type(&logical_type);
}
auto column_name = duckdb_prepared_statement_column_name(stmt, 0);
std::string col_name_str = column_name;
duckdb_free((void *)column_name);
REQUIRE(col_name_str == "CAST($1 AS VARCHAR)");
duckdb_destroy_prepare(&stmt);
// Return columns contain ambiguous types
REQUIRE(duckdb_prepare(conn, "select $1::TEXT, $2::integer, $3, $4::BOOLEAN, $5::FLOAT, $6::DOUBLE", &stmt) ==
DuckDBSuccess);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 1);
REQUIRE(duckdb_prepared_statement_column_type(stmt, 0) == DUCKDB_TYPE_INVALID);
auto logical_type = duckdb_prepared_statement_column_logical_type(stmt, 0);
REQUIRE(logical_type);
REQUIRE(duckdb_get_type_id(logical_type) == DUCKDB_TYPE_INVALID);
duckdb_destroy_logical_type(&logical_type);
auto col_name_ptr = duckdb_prepared_statement_column_name(stmt, 0);
col_name_str = col_name_ptr;
duckdb_free((void *)col_name_ptr);
REQUIRE(col_name_str == "unknown");
REQUIRE(duckdb_prepared_statement_column_name(stmt, 1) == nullptr);
REQUIRE(duckdb_prepared_statement_column_name(stmt, 5) == nullptr);
duckdb_destroy_prepare(&stmt);
duckdb_disconnect(&conn);
duckdb_close(&db);
}
TEST_CASE("Test duckdb_param_type and duckdb_param_logical_type", "[capi]") {
duckdb_database db;
duckdb_connection conn;
duckdb_prepared_statement stmt;
REQUIRE(duckdb_open("", &db) == DuckDBSuccess);
REQUIRE(duckdb_connect(db, &conn) == DuckDBSuccess);
REQUIRE(duckdb_prepare(conn, "select $1::integer, $2::integer", &stmt) == DuckDBSuccess);
auto logical_type = duckdb_param_logical_type(stmt, 2);
REQUIRE(logical_type);
REQUIRE(duckdb_get_type_id(logical_type) == DUCKDB_TYPE_INTEGER);
duckdb_destroy_logical_type(&logical_type);
REQUIRE(duckdb_param_type(stmt, 2) == DUCKDB_TYPE_INTEGER);
REQUIRE(duckdb_bind_null(stmt, 1) == DuckDBSuccess);
REQUIRE(duckdb_bind_int32(stmt, 2, 10) == DuckDBSuccess);
REQUIRE(!duckdb_param_logical_type(nullptr, 2));
REQUIRE(duckdb_param_type(nullptr, 2) == DUCKDB_TYPE_INVALID);
REQUIRE(!duckdb_param_logical_type(stmt, 2000));
REQUIRE(duckdb_param_type(stmt, 2000) == DUCKDB_TYPE_INVALID);
duckdb_result result;
REQUIRE(duckdb_execute_prepared(stmt, &result) == DuckDBSuccess);
REQUIRE(duckdb_param_type(stmt, 2) == DUCKDB_TYPE_INTEGER);
duckdb_clear_bindings(stmt);
duckdb_destroy_result(&result);
duckdb_destroy_prepare(&stmt);
duckdb_disconnect(&conn);
duckdb_close(&db);
}
TEST_CASE("Test prepared statements with named parameters in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_result res;
duckdb_prepared_statement stmt = nullptr;
duckdb_state status;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
status = duckdb_prepare(tester.connection, "SELECT CAST($my_val AS BIGINT)", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
idx_t parameter_index;
// test invalid name
status = duckdb_bind_parameter_index(stmt, &parameter_index, "invalid");
REQUIRE(status == DuckDBError);
status = duckdb_bind_parameter_index(stmt, &parameter_index, "my_val");
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_param_type(stmt, 1) == DUCKDB_TYPE_BIGINT);
auto logical_type = duckdb_param_logical_type(stmt, 1);
REQUIRE(logical_type);
REQUIRE(duckdb_get_type_id(logical_type) == DUCKDB_TYPE_BIGINT);
duckdb_destroy_logical_type(&logical_type);
idx_t param_count = duckdb_nparams(stmt);
duckdb::vector<string> names;
for (idx_t i = 0; i < param_count; i++) {
auto name = duckdb_parameter_name(stmt, i + 1);
names.push_back(std::string(name));
duckdb_free((void *)name);
}
REQUIRE(duckdb_parameter_name(stmt, 0) == (const char *)NULL);
REQUIRE(duckdb_parameter_name(stmt, 2) == (const char *)NULL);
REQUIRE(duckdb_prepared_statement_column_count(stmt) == 1);
REQUIRE(duckdb_prepared_statement_column_type(stmt, 0) == DUCKDB_TYPE_BIGINT);
duckdb::vector<string> expected_names = {"my_val"};
REQUIRE(names.size() == expected_names.size());
for (idx_t i = 0; i < expected_names.size(); i++) {
auto &name = names[i];
auto &expected_name = expected_names[i];
REQUIRE(name == expected_name);
}
status = duckdb_bind_boolean(stmt, parameter_index, 1);
REQUIRE(status == DuckDBSuccess);
status = duckdb_bind_boolean(stmt, parameter_index + 1, 1);
REQUIRE(status == DuckDBError);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 1);
duckdb_destroy_result(&res);
// Clear the bindings, don't rebind the parameter index
status = duckdb_clear_bindings(stmt);
REQUIRE(status == DuckDBSuccess);
status = duckdb_bind_boolean(stmt, parameter_index, 1);
REQUIRE(status == DuckDBSuccess);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 1);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
}
TEST_CASE("Maintain prepared statement types", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_result res;
duckdb_prepared_statement stmt = nullptr;
duckdb_state status;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
status = duckdb_prepare(tester.connection, "select cast(111 as short) * $1", &stmt);
REQUIRE(status == DuckDBSuccess);
REQUIRE(stmt != nullptr);
status = duckdb_bind_int64(stmt, 1, 1665);
REQUIRE(status == DuckDBSuccess);
status = duckdb_execute_prepared(stmt, &res);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_value_int64(&res, 0, 0) == 184815);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
}
TEST_CASE("Prepared streaming result", "[capi]") {
CAPITester tester;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
SECTION("non streaming result") {
REQUIRE(tester.Query("CREATE TABLE t2 (i INTEGER, j INTEGER);"));
duckdb_prepared_statement stmt;
REQUIRE(duckdb_prepare(tester.connection,
"INSERT INTO t2 SELECT 2 AS i, 3 AS j RETURNING *, i * j AS i_times_j",
&stmt) == DuckDBSuccess);
duckdb_result res;
REQUIRE(duckdb_execute_prepared_streaming(stmt, &res) == DuckDBSuccess);
REQUIRE(!duckdb_result_is_streaming(res));
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
}
SECTION("streaming result") {
duckdb_prepared_statement stmt;
REQUIRE(duckdb_prepare(tester.connection, "FROM RANGE(0, 10)", &stmt) == DuckDBSuccess);
duckdb_result res;
REQUIRE(duckdb_execute_prepared_streaming(stmt, &res) == DuckDBSuccess);
REQUIRE(duckdb_result_is_streaming(res));
duckdb_data_chunk chunk;
idx_t index = 0;
while (true) {
chunk = duckdb_stream_fetch_chunk(res);
if (!chunk) {
break;
}
auto chunk_size = duckdb_data_chunk_get_size(chunk);
REQUIRE(chunk_size > 0);
auto vec = duckdb_data_chunk_get_vector(chunk, 0);
auto column_type = duckdb_vector_get_column_type(vec);
REQUIRE(duckdb_get_type_id(column_type) == DUCKDB_TYPE_BIGINT);
duckdb_destroy_logical_type(&column_type);
auto data = reinterpret_cast<int64_t *>(duckdb_vector_get_data(vec));
for (idx_t i = 0; i < chunk_size; i++) {
REQUIRE(data[i] == int64_t(index + i));
}
index += chunk_size;
duckdb_destroy_data_chunk(&chunk);
}
REQUIRE(duckdb_stream_fetch_chunk(res) == nullptr);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
}
SECTION("streaming extracted statements") {
duckdb_extracted_statements stmts;
auto n_statements = duckdb_extract_statements(tester.connection, "Select 1; Select 2;", &stmts);
REQUIRE(n_statements == 2);
for (idx_t i = 0; i < n_statements; i++) {
duckdb_prepared_statement stmt;
REQUIRE(duckdb_prepare_extracted_statement(tester.connection, stmts, i, &stmt) == DuckDBSuccess);
duckdb_result res;
REQUIRE(duckdb_execute_prepared_streaming(stmt, &res) == DuckDBSuccess);
REQUIRE(duckdb_result_is_streaming(res));
duckdb_data_chunk chunk;
chunk = duckdb_stream_fetch_chunk(res);
REQUIRE(chunk != nullptr);
REQUIRE(duckdb_data_chunk_get_size(chunk) == 1);
auto vec = duckdb_data_chunk_get_vector(chunk, 0);
auto type = duckdb_vector_get_column_type(vec);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_INTEGER);
duckdb_destroy_logical_type(&type);
auto data = (int32_t *)duckdb_vector_get_data(vec);
REQUIRE(data[0] == (int32_t)(i + 1));
REQUIRE(duckdb_stream_fetch_chunk(res) == nullptr);
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_result(&res);
duckdb_destroy_prepare(&stmt);
}
duckdb_destroy_extracted(&stmts);
}
}
TEST_CASE("Test STRING LITERAL parameter type", "[capi]") {
duckdb_database db;
duckdb_connection conn;
duckdb_prepared_statement stmt;
REQUIRE(duckdb_open("", &db) == DuckDBSuccess);
REQUIRE(duckdb_connect(db, &conn) == DuckDBSuccess);
REQUIRE(duckdb_prepare(conn, "SELECT ?", &stmt) == DuckDBSuccess);
REQUIRE(duckdb_bind_varchar(stmt, 1, "a") == DuckDBSuccess);
REQUIRE(duckdb_param_type(stmt, 1) == DUCKDB_TYPE_STRING_LITERAL);
duckdb_destroy_prepare(&stmt);
duckdb_disconnect(&conn);
duckdb_close(&db);
}

View File

@@ -0,0 +1,416 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
string BuildSettingsString(const duckdb::vector<string> &settings) {
string result = "'{";
for (idx_t i = 0; i < settings.size(); i++) {
result += "\"" + settings[i] + "\": \"true\"";
if (i < settings.size() - 1) {
result += ", ";
}
}
result += "}'";
return result;
}
void RetrieveMetrics(duckdb_profiling_info info, duckdb::map<string, double> &cumulative_counter,
duckdb::map<string, double> &cumulative_result, const idx_t depth) {
auto map = duckdb_profiling_info_get_metrics(info);
REQUIRE(map);
auto count = duckdb_get_map_size(map);
REQUIRE(count != 0);
// Test index out of bounds for MAP value.
if (depth == 0) {
auto invalid_key = duckdb_get_map_key(map, 10000000);
REQUIRE(!invalid_key);
auto invalid_value = duckdb_get_map_value(map, 10000000);
REQUIRE(!invalid_value);
}
for (idx_t i = 0; i < count; i++) {
auto key = duckdb_get_map_key(map, i);
REQUIRE(key);
auto value = duckdb_get_map_value(map, i);
REQUIRE(value);
auto key_c_str = duckdb_get_varchar(key);
auto value_c_str = duckdb_get_varchar(value);
auto key_str = duckdb::string(key_c_str);
auto value_str = duckdb::string(value_c_str);
if (depth == 0) {
REQUIRE(key_str != EnumUtil::ToString(MetricsType::OPERATOR_CARDINALITY));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::OPERATOR_ROWS_SCANNED));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::OPERATOR_TIMING));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::OPERATOR_NAME));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::OPERATOR_TYPE));
} else {
REQUIRE(key_str != EnumUtil::ToString(MetricsType::QUERY_NAME));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::BLOCKED_THREAD_TIME));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::LATENCY));
REQUIRE(key_str != EnumUtil::ToString(MetricsType::ROWS_RETURNED));
}
if (key_str == EnumUtil::ToString(MetricsType::QUERY_NAME) ||
key_str == EnumUtil::ToString(MetricsType::OPERATOR_NAME) ||
key_str == EnumUtil::ToString(MetricsType::OPERATOR_TYPE) ||
key_str == EnumUtil::ToString(MetricsType::EXTRA_INFO)) {
REQUIRE(!value_str.empty());
} else {
double result = 0;
try {
result = std::stod(value_str);
} catch (std::invalid_argument &e) {
REQUIRE(false);
}
if (cumulative_counter.find(key_str) != cumulative_counter.end()) {
cumulative_counter[key_str] += result;
}
if (cumulative_result.find(key_str) != cumulative_result.end() && cumulative_result[key_str] == 0) {
cumulative_result[key_str] = result;
}
}
duckdb_destroy_value(&key);
duckdb_destroy_value(&value);
duckdb_free(key_c_str);
duckdb_free(value_c_str);
}
duckdb_destroy_value(&map);
}
void TraverseTree(duckdb_profiling_info profiling_info, duckdb::map<string, double> &cumulative_counter,
duckdb::map<string, double> &cumulative_result, const idx_t depth) {
RetrieveMetrics(profiling_info, cumulative_counter, cumulative_result, depth);
// Recurse into the child node.
auto child_count = duckdb_profiling_info_get_child_count(profiling_info);
if (depth == 0) {
REQUIRE(child_count != 0);
}
for (idx_t i = 0; i < child_count; i++) {
auto child = duckdb_profiling_info_get_child(profiling_info, i);
TraverseTree(child, cumulative_counter, cumulative_result, depth + 1);
}
}
idx_t ConvertToInt(double value) {
return idx_t(value * 1000);
}
TEST_CASE("Test profiling with a single metric and get_value", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
// test only CPU_TIME profiling
duckdb::vector<string> settings = {"CPU_TIME"};
REQUIRE_NO_FAIL(tester.Query("PRAGMA custom_profiling_settings=" + BuildSettingsString(settings)));
REQUIRE_NO_FAIL(tester.Query("SELECT 42"));
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info != nullptr);
// Retrieve a metric that is not enabled.
REQUIRE(duckdb_profiling_info_get_value(info, "EXTRA_INFO") == nullptr);
duckdb::map<string, double> cumulative_counter;
duckdb::map<string, double> cumulative_result;
TraverseTree(info, cumulative_counter, cumulative_result, 0);
tester.Cleanup();
}
TEST_CASE("Test profiling with cumulative metrics", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
// test all profiling metrics
duckdb::vector<string> settings = {"BLOCKED_THREAD_TIME", "CPU_TIME", "CUMULATIVE_CARDINALITY", "EXTRA_INFO",
"OPERATOR_CARDINALITY", "OPERATOR_TIMING"};
REQUIRE_NO_FAIL(tester.Query("PRAGMA custom_profiling_settings=" + BuildSettingsString(settings)));
REQUIRE_NO_FAIL(tester.Query("SELECT 42"));
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info != nullptr);
duckdb::map<string, double> cumulative_counter = {{"OPERATOR_TIMING", 0}, {"OPERATOR_CARDINALITY", 0}};
duckdb::map<string, double> cumulative_result {
{"CPU_TIME", 0},
{"CUMULATIVE_CARDINALITY", 0},
};
TraverseTree(info, cumulative_counter, cumulative_result, 0);
REQUIRE(ConvertToInt(cumulative_result["CPU_TIME"]) == ConvertToInt(cumulative_counter["OPERATOR_TIMING"]));
REQUIRE(ConvertToInt(cumulative_result["CUMULATIVE_CARDINALITY"]) ==
ConvertToInt(cumulative_counter["OPERATOR_CARDINALITY"]));
tester.Cleanup();
}
TEST_CASE("Test profiling without profiling enabled", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
// Retrieve info without profiling enabled.
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info == nullptr);
tester.Cleanup();
}
TEST_CASE("Test profiling with detailed profiling mode enabled", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
REQUIRE_NO_FAIL(tester.Query("PRAGMA profiling_mode = 'detailed'"));
REQUIRE_NO_FAIL(tester.Query("SELECT 42"));
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info != nullptr);
duckdb::map<string, double> cumulative_counter;
duckdb::map<string, double> cumulative_result;
TraverseTree(info, cumulative_counter, cumulative_result, 0);
tester.Cleanup();
}
TEST_CASE("Test invalid use of profiling API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
REQUIRE_NO_FAIL(tester.Query("PRAGMA profiling_mode = 'detailed'"));
REQUIRE_NO_FAIL(tester.Query("SELECT 42"));
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info != nullptr);
// Incorrect usage tests.
auto map = duckdb_profiling_info_get_metrics(nullptr);
REQUIRE(map == nullptr);
map = duckdb_profiling_info_get_metrics(info);
auto dummy_value = duckdb_create_bool(true);
auto count = duckdb_get_map_size(nullptr);
REQUIRE(count == 0);
count = duckdb_get_map_size(dummy_value);
REQUIRE(count == 0);
count = duckdb_get_map_size(map);
for (idx_t i = 0; i < count; i++) {
auto key = duckdb_get_map_key(nullptr, i);
REQUIRE(key == nullptr);
key = duckdb_get_map_key(map, DConstants::INVALID_INDEX);
REQUIRE(key == nullptr);
key = duckdb_get_map_key(dummy_value, i);
REQUIRE(key == nullptr);
auto value = duckdb_get_map_value(nullptr, i);
REQUIRE(value == nullptr);
value = duckdb_get_map_value(map, DConstants::INVALID_INDEX);
REQUIRE(value == nullptr);
value = duckdb_get_map_value(dummy_value, i);
REQUIRE(value == nullptr);
break;
}
duckdb_destroy_value(&dummy_value);
duckdb_destroy_value(&map);
tester.Cleanup();
}
TEST_CASE("Test profiling after throwing an error", "[capi]") {
CAPITester tester;
auto main_db = TestCreatePath("profiling_error.db");
REQUIRE(tester.OpenDatabase(main_db.c_str()));
auto path = TestCreatePath("profiling_error.db");
REQUIRE_NO_FAIL(tester.Query("ATTACH IF NOT EXISTS '" + path + "' (TYPE DUCKDB)"));
REQUIRE_NO_FAIL(tester.Query("CREATE TABLE profiling_error.tbl AS SELECT range AS id FROM range(10)"));
REQUIRE_NO_FAIL(tester.Query("SET enable_profiling = 'no_output'"));
REQUIRE_NO_FAIL(tester.Query("SET profiling_mode = 'standard'"));
CAPIPrepared prepared_q1;
CAPIPending pending_q1;
REQUIRE(prepared_q1.Prepare(tester, "SELECT * FROM profiling_error.tbl"));
REQUIRE(pending_q1.Pending(prepared_q1));
auto result = pending_q1.Execute();
REQUIRE(result);
REQUIRE(!result->HasError());
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info != nullptr);
CAPIPrepared prepared_q2;
REQUIRE(!prepared_q2.Prepare(tester, "SELECT * FROM profiling_error.does_not_exist"));
info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info == nullptr);
tester.Cleanup();
}
TEST_CASE("Test profiling with Extra Info enabled", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
duckdb::vector<string> settings = {"EXTRA_INFO"};
REQUIRE_NO_FAIL(tester.Query("PRAGMA custom_profiling_settings=" + BuildSettingsString(settings)));
REQUIRE_NO_FAIL(tester.Query("SELECT 1"));
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info);
// Retrieve the child node.
auto child_info = duckdb_profiling_info_get_child(info, 0);
REQUIRE(duckdb_profiling_info_get_child_count(child_info) != 0);
auto map = duckdb_profiling_info_get_metrics(child_info);
REQUIRE(map);
auto count = duckdb_get_map_size(map);
REQUIRE(count != 0);
bool found_extra_info = false;
for (idx_t i = 0; i < count; i++) {
auto key = duckdb_get_map_key(map, i);
REQUIRE(key);
auto key_c_str = duckdb_get_varchar(key);
auto key_str = duckdb::string(key_c_str);
auto value = duckdb_get_map_value(map, i);
REQUIRE(value);
auto value_c_str = duckdb_get_varchar(value);
auto value_str = duckdb::string(value_c_str);
if (key_str == EnumUtil::ToString(MetricsType::EXTRA_INFO)) {
REQUIRE(value_str.find("__order_by__"));
REQUIRE(value_str.find("ASC"));
found_extra_info = true;
}
if (key) {
duckdb_destroy_value(&key);
duckdb_free(key_c_str);
}
if (value) {
duckdb_destroy_value(&value);
duckdb_free(value_c_str);
}
}
REQUIRE(found_extra_info);
duckdb_destroy_value(&map);
tester.Cleanup();
}
TEST_CASE("Test profiling with the appender", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
tester.Query("CREATE TABLE tbl (i INT PRIMARY KEY, value VARCHAR)");
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
REQUIRE_NO_FAIL(tester.Query("SET profiling_coverage='ALL'"));
duckdb_appender appender;
string query = "INSERT INTO tbl FROM my_appended_data";
duckdb_logical_type types[2];
types[0] = duckdb_create_logical_type(DUCKDB_TYPE_INTEGER);
types[1] = duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR);
auto status = duckdb_appender_create_query(tester.connection, query.c_str(), 2, types, "my_appended_data", nullptr,
&appender);
duckdb_destroy_logical_type(&types[0]);
duckdb_destroy_logical_type(&types[1]);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_appender_error(appender) == nullptr);
REQUIRE(duckdb_appender_begin_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_append_int32(appender, 1) == DuckDBSuccess);
REQUIRE(duckdb_append_varchar(appender, "hello world") == DuckDBSuccess);
REQUIRE(duckdb_appender_end_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_flush(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_close(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_destroy(&appender) == DuckDBSuccess);
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info);
// Check that the query name matches the appender query.
auto query_name = duckdb_profiling_info_get_value(info, "QUERY_NAME");
REQUIRE(query_name);
auto query_name_c_str = duckdb_get_varchar(query_name);
auto query_name_str = duckdb::string(query_name_c_str);
REQUIRE(query_name_str == query);
duckdb_destroy_value(&query_name);
duckdb_free(query_name_c_str);
duckdb::map<string, double> cumulative_counter;
duckdb::map<string, double> cumulative_result;
TraverseTree(info, cumulative_counter, cumulative_result, 0);
tester.Cleanup();
}
TEST_CASE("Test profiling with the non-query appender", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
duckdb_state status;
REQUIRE(tester.OpenDatabase(nullptr));
tester.Query("CREATE TABLE test (i INTEGER)");
REQUIRE_NO_FAIL(tester.Query("PRAGMA enable_profiling = 'no_output'"));
REQUIRE_NO_FAIL(tester.Query("SET profiling_coverage='ALL'"));
duckdb_appender appender;
REQUIRE(duckdb_appender_create(tester.connection, nullptr, "test", &appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_error(appender) == nullptr);
// Appending a row.
REQUIRE(duckdb_appender_begin_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_append_int32(appender, 42) == DuckDBSuccess);
// Finish and flush.
REQUIRE(duckdb_appender_end_row(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_flush(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_close(appender) == DuckDBSuccess);
REQUIRE(duckdb_appender_destroy(&appender) == DuckDBSuccess);
auto info = duckdb_get_profiling_info(tester.connection);
REQUIRE(info);
// Check that the query name matches the appender query.
auto query_name = duckdb_profiling_info_get_value(info, "QUERY_NAME");
REQUIRE(query_name);
auto query_name_c_str = duckdb_get_varchar(query_name);
auto query_name_str = duckdb::string(query_name_c_str);
auto query = "INSERT INTO main.test FROM __duckdb_internal_appended_data";
REQUIRE(query_name_str == query);
duckdb_destroy_value(&query_name);
duckdb_free(query_name_c_str);
duckdb::map<string, double> cumulative_counter;
duckdb::map<string, double> cumulative_result;
TraverseTree(info, cumulative_counter, cumulative_result, 0);
tester.Cleanup();
}

View File

@@ -0,0 +1,81 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
struct MyBaseNumber {
int number;
};
void destroy_base_number(void *data) {
auto num = (MyBaseNumber *)data;
delete num;
}
void number_scanner(duckdb_replacement_scan_info info, const char *table_name, void *data) {
// check if the table name is a number
long long number;
try {
number = std::stoll(table_name);
} catch (...) {
// not a number!
return;
}
auto num_data = (MyBaseNumber *)data;
duckdb_replacement_scan_set_function_name(info, "range");
auto val = duckdb_create_int64(number + num_data->number);
duckdb_replacement_scan_add_parameter(info, val);
duckdb_destroy_value(&val);
}
TEST_CASE("Test replacement scans in C API", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
auto base_number = new MyBaseNumber();
base_number->number = 3;
duckdb_add_replacement_scan(tester.database, number_scanner, (void *)base_number, destroy_base_number);
// 0-4
result = tester.Query("SELECT * FROM \"2\"");
REQUIRE(result->row_count() == 5);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 1) == 1);
REQUIRE(result->Fetch<int64_t>(0, 2) == 2);
REQUIRE(result->Fetch<int64_t>(0, 3) == 3);
REQUIRE(result->Fetch<int64_t>(0, 4) == 4);
base_number->number = 1;
// 0-2
result = tester.Query("SELECT * FROM \"2\"");
REQUIRE(result->row_count() == 3);
REQUIRE(result->Fetch<int64_t>(0, 0) == 0);
REQUIRE(result->Fetch<int64_t>(0, 1) == 1);
REQUIRE(result->Fetch<int64_t>(0, 2) == 2);
// not a number
REQUIRE_FAIL(tester.Query("SELECT * FROM nonexistant"));
}
void error_replacement_scan(duckdb_replacement_scan_info info, const char *table_name, void *data) {
duckdb_replacement_scan_set_error(NULL, NULL);
duckdb_replacement_scan_set_error(info, NULL);
duckdb_replacement_scan_set_error(info, "error in replacement scan");
}
TEST_CASE("Test error replacement scan", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
duckdb_add_replacement_scan(tester.database, error_replacement_scan, NULL, NULL);
// error
REQUIRE_FAIL(tester.Query("SELECT * FROM nonexistant"));
}

View File

@@ -0,0 +1,184 @@
#include "capi_tester.hpp"
#include "duckdb.h"
using namespace duckdb;
using namespace std;
TEST_CASE("Test streaming results in C API", "[capi]") {
CAPITester tester;
CAPIPrepared prepared;
CAPIPending pending;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(prepared.Prepare(tester, "SELECT i::UINT32 FROM range(1000000) tbl(i)"));
REQUIRE(pending.PendingStreaming(prepared));
while (true) {
auto state = pending.ExecuteTask();
REQUIRE(state != DUCKDB_PENDING_ERROR);
if (state == DUCKDB_PENDING_RESULT_READY) {
break;
}
}
result = pending.Execute();
REQUIRE(result);
REQUIRE(!result->HasError());
auto chunk = result->StreamChunk();
idx_t value = duckdb::DConstants::INVALID_INDEX;
idx_t result_count = 0;
while (chunk) {
auto old_value = value;
auto vector = chunk->GetVector(0);
uint32_t *data = (uint32_t *)duckdb_vector_get_data(vector);
value = data[0];
if (old_value != duckdb::DConstants::INVALID_INDEX) {
// We select from a range, so we can expect every starting value of a new chunk to be higher than the last
// one.
REQUIRE(value > old_value);
}
REQUIRE(chunk->size() > 0);
result_count += chunk->size();
REQUIRE(result_count <= 1000000);
chunk = result->StreamChunk();
}
}
TEST_CASE("Test other methods on streaming results in C API", "[capi]") {
CAPITester tester;
CAPIPrepared prepared;
CAPIPending pending;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(prepared.Prepare(tester, "SELECT i::UINT32 FROM range(1000000) tbl(i)"));
REQUIRE(pending.PendingStreaming(prepared));
while (true) {
auto state = pending.ExecuteTask();
REQUIRE(state != DUCKDB_PENDING_ERROR);
if (state == DUCKDB_PENDING_RESULT_READY) {
break;
}
}
// Once we've done this, the StreamQueryResult is made
result = pending.Execute();
REQUIRE(result);
REQUIRE(!result->HasError());
REQUIRE(result->IsStreaming());
// interrogate the result with various methods
auto chunk_count = result->ChunkCount();
REQUIRE(chunk_count == 0);
auto column_count = result->ColumnCount();
(void)column_count;
auto column_name = result->ColumnName(0);
(void)column_name;
auto column_type = result->ColumnType(0);
(void)column_type;
auto error_message = result->ErrorMessage();
REQUIRE(error_message == nullptr);
auto fetched_chunk = result->FetchChunk(0);
REQUIRE(fetched_chunk == nullptr);
auto has_error = result->HasError();
REQUIRE(has_error == false);
auto row_count = result->row_count();
REQUIRE(row_count == 0);
auto rows_changed = result->rows_changed();
REQUIRE(rows_changed == 0);
// this succeeds because the result is materialized if a stream-result method hasn't being used yet
auto column_data = result->ColumnData<uint32_t>(0);
REQUIRE(column_data != nullptr);
// this materializes the result
auto is_null = result->IsNull(0, 0);
REQUIRE(is_null == false);
}
TEST_CASE("Test streaming arrow results in C API", "[capi][arrow]") {
CAPITester tester;
CAPIPrepared prepared;
CAPIPending pending;
duckdb::unique_ptr<CAPIResult> result;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE(prepared.Prepare(tester, "SELECT i::UINT32 FROM range(1000000) tbl(i)"));
REQUIRE(pending.PendingStreaming(prepared));
while (true) {
auto state = pending.ExecuteTask();
REQUIRE(state != DUCKDB_PENDING_ERROR);
if (state == DUCKDB_PENDING_RESULT_READY) {
break;
}
}
result = pending.Execute();
REQUIRE(result);
REQUIRE(!result->HasError());
auto chunk = result->StreamChunk();
// Check handle null out_array
duckdb_result_arrow_array(result->InternalResult(), chunk->GetChunk(), nullptr);
int nb_row = 0;
while (chunk) {
ArrowArray *arrow_array = new ArrowArray();
duckdb_result_arrow_array(result->InternalResult(), chunk->GetChunk(), (duckdb_arrow_array *)&arrow_array);
nb_row += arrow_array->length;
chunk = result->StreamChunk();
arrow_array->release(arrow_array);
delete arrow_array;
}
REQUIRE(nb_row == 1000000);
}
TEST_CASE("Test query progress and interrupt in C API", "[capi]") {
CAPITester tester;
CAPIPrepared prepared;
CAPIPending pending;
duckdb::unique_ptr<CAPIResult> result;
// test null handling
REQUIRE(duckdb_query_progress(nullptr).percentage == -1.0);
duckdb_interrupt(nullptr);
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
REQUIRE_NO_FAIL(tester.Query("SET threads=1"));
REQUIRE_NO_FAIL(tester.Query("create table tbl as select range a, mod(range,10) b from range(10000);"));
REQUIRE_NO_FAIL(tester.Query("create table tbl_2 as select range a from range(10000);"));
REQUIRE_NO_FAIL(tester.Query("set enable_progress_bar=true;"));
REQUIRE_NO_FAIL(tester.Query("set enable_progress_bar_print=false;"));
// test no progress before query
REQUIRE(duckdb_query_progress(tester.connection).percentage == -1.0);
// test zero progress with query
REQUIRE(prepared.Prepare(tester, "select count(*) from tbl where a = (select min(a) from tbl_2)"));
REQUIRE(pending.PendingStreaming(prepared));
REQUIRE(duckdb_query_progress(tester.connection).percentage == 0.0);
// test progress
while (duckdb_query_progress(tester.connection).percentage == 0.0) {
auto state = pending.ExecuteTask();
REQUIRE(state == DUCKDB_PENDING_RESULT_NOT_READY);
}
REQUIRE(duckdb_query_progress(tester.connection).percentage >= 0.0);
// test interrupt
duckdb_interrupt(tester.connection);
while (true) {
auto state = pending.ExecuteTask();
REQUIRE(state != DUCKDB_PENDING_RESULT_READY);
if (state == DUCKDB_PENDING_ERROR) {
break;
}
}
}

View File

@@ -0,0 +1,127 @@
#include "capi_tester.hpp"
#include "duckdb.h"
using namespace duckdb;
using namespace std;
TEST_CASE("Test the table description in the C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
duckdb_table_description table_description = nullptr;
tester.Query("SET threads=1;");
// Test a non-existent table.
auto status = duckdb_table_description_create(tester.connection, nullptr, "test", &table_description);
REQUIRE(status == DuckDBError);
duckdb_table_description_destroy(&table_description);
status = duckdb_table_description_create_ext(tester.connection, "hello", "world", "test", &table_description);
REQUIRE(status == DuckDBError);
duckdb_table_description_destroy(&table_description);
// Create an in-memory table and a table in an external file.
tester.Query("CREATE TABLE test (i INTEGER, j INTEGER default 5)");
auto test_dir = TestDirectoryPath();
auto attach_query = "ATTACH '" + test_dir + "/ext_description.db'";
tester.Query(attach_query);
tester.Query("CREATE TABLE ext_description.test(my_column INTEGER)");
// Test invalid catalog and schema.
status =
duckdb_table_description_create_ext(tester.connection, "non-existent", nullptr, "test", &table_description);
REQUIRE(status == DuckDBError);
duckdb_table_description_destroy(&table_description);
status = duckdb_table_description_create(tester.connection, "non-existent", "test", &table_description);
REQUIRE(status == DuckDBError);
duckdb_table_description_destroy(&table_description);
status = duckdb_table_description_create(tester.connection, nullptr, "test", &table_description);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_table_description_error(table_description) == nullptr);
bool has_default;
SECTION("Passing nullptr to has_default") {
REQUIRE(duckdb_column_has_default(table_description, 2, nullptr) == DuckDBError);
REQUIRE(duckdb_column_has_default(nullptr, 2, &has_default) == DuckDBError);
}
SECTION("Out of range column for has_default") {
REQUIRE(duckdb_column_has_default(table_description, 2, &has_default) == DuckDBError);
}
SECTION("In range column - not default") {
REQUIRE(duckdb_column_has_default(table_description, 0, &has_default) == DuckDBSuccess);
REQUIRE(has_default == false);
}
SECTION("In range column - default") {
REQUIRE(duckdb_column_has_default(table_description, 1, &has_default) == DuckDBSuccess);
REQUIRE(has_default == true);
}
duckdb_table_description_destroy(&table_description);
// Let's get information about the external table.
status =
duckdb_table_description_create_ext(tester.connection, "ext_description", nullptr, "test", &table_description);
REQUIRE(status == DuckDBSuccess);
REQUIRE(duckdb_table_description_error(table_description) == nullptr);
SECTION("Passing nullptr to get_column_count") {
REQUIRE(duckdb_table_description_get_column_count(nullptr) == 0);
}
SECTION("Passing nullptr to get_name") {
REQUIRE(duckdb_table_description_get_column_name(nullptr, 0) == nullptr);
}
SECTION("Passing nullptr to get_type") {
REQUIRE(duckdb_table_description_get_column_type(nullptr, 0) == nullptr);
}
SECTION("Out of range column for get_name") {
REQUIRE(duckdb_table_description_get_column_name(table_description, 1) == nullptr);
}
SECTION("Out of range column for get_type") {
REQUIRE(duckdb_table_description_get_column_type(table_description, 1) == nullptr);
}
SECTION("get the column count") {
auto column_count = duckdb_table_description_get_column_count(table_description);
REQUIRE(column_count == 1);
}
SECTION("In range column - get the name") {
auto column_name = duckdb_table_description_get_column_name(table_description, 0);
string expected = "my_column";
REQUIRE(!expected.compare(column_name));
duckdb_free(column_name);
}
SECTION("In range column - get the type") {
auto column_type = duckdb_table_description_get_column_type(table_description, 0);
auto type_id = duckdb_get_type_id(column_type);
REQUIRE(type_id == DUCKDB_TYPE_INTEGER);
duckdb_destroy_logical_type(&column_type);
}
duckdb_table_description_destroy(&table_description);
}
TEST_CASE("Test getting the table names of a query in the C API", "[capi]") {
CAPITester tester;
REQUIRE(tester.OpenDatabase(nullptr));
tester.Query("CREATE SCHEMA schema1");
tester.Query("CREATE SCHEMA \"schema.2\"");
tester.Query("CREATE TABLE schema1.\"table.1\"(i INT)");
tester.Query("CREATE TABLE \"schema.2\".\"table.2\"(i INT)");
string query = "SELECT * FROM schema1.\"table.1\", \"schema.2\".\"table.2\"";
auto table_name_values = duckdb_get_table_names(tester.connection, query.c_str(), true);
auto size = duckdb_get_list_size(table_name_values);
REQUIRE(size == 2);
duckdb::unordered_set<string> expected_names = {"schema1.\"table.1\"", "\"schema.2\".\"table.2\""};
for (idx_t i = 0; i < size; i++) {
auto name_value = duckdb_get_list_child(table_name_values, i);
auto name = duckdb_get_varchar(name_value);
REQUIRE(expected_names.count(name) == 1);
duckdb_free(name);
duckdb_destroy_value(&name_value);
}
duckdb_destroy_value(&table_name_values);
tester.Cleanup();
}

View File

@@ -0,0 +1,64 @@
#include "capi_tester.hpp"
#include "duckdb.h"
using namespace duckdb;
using namespace std;
int64_t Difference(int64_t left, int64_t right) {
return abs(left - right);
}
void CompareDuckDBDecimal(const duckdb_decimal &left, const duckdb_decimal &right) {
REQUIRE(left.scale == right.scale);
REQUIRE(left.width == right.width);
REQUIRE(left.value.upper == right.value.upper);
}
void TestFetchAsDecimal(CAPITester &tester, string query, string type_cast) {
auto result = tester.Query(StringUtil::Format(query, type_cast));
REQUIRE_NO_FAIL(*result);
// (ANYTHING BUT DECIMAL) -> DECIMAL results in 0
duckdb_decimal expected_res;
expected_res.scale = 0;
expected_res.width = 0;
expected_res.value.lower = 0;
expected_res.value.upper = 0;
auto converted_res = result->Fetch<duckdb_decimal>(0, 0);
CompareDuckDBDecimal(expected_res, converted_res);
}
TEST_CASE("Test CAPI duckdb_decimal_as_properties", "[capi]") {
CAPITester tester;
// open the database in in-memory mode
REQUIRE(tester.OpenDatabase(nullptr));
//! From DOUBLE
TestFetchAsDecimal(tester, "SELECT CAST(123.45678 AS %s)", "DOUBLE");
//! From FLOAT
TestFetchAsDecimal(tester, "SELECT CAST(123.45678 AS %s)", "FLOAT");
//! From HUGEINT
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "HUGEINT");
//! From UHUGEINT
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "UHUGEINT");
//! From BIGINT
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "BIGINT");
//! From UBIGINT
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "UBIGINT");
//! From INTEGER
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "INTEGER");
//! From UINTEGER
TestFetchAsDecimal(tester, "SELECT CAST(123124 AS %s)", "UINTEGER");
//! From SMALLINT
TestFetchAsDecimal(tester, "SELECT CAST(12312 AS %s)", "SMALLINT");
//! From USMALLINT
TestFetchAsDecimal(tester, "SELECT CAST(12312 AS %s)", "USMALLINT");
//! From TINYINT
TestFetchAsDecimal(tester, "SELECT CAST(-123 AS %s)", "TINYINT");
//! From UTINYINT
TestFetchAsDecimal(tester, "SELECT CAST(255 AS %s)", "UTINYINT");
//! From VARCHAR
TestFetchAsDecimal(tester, "SELECT CAST(123124.2342 AS %s)", "VARCHAR");
}

View File

@@ -0,0 +1,438 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test MAP getters", "[capi]") {
auto uint_val = duckdb_create_uint64(42);
REQUIRE(uint_val);
auto size = duckdb_get_map_size(nullptr);
REQUIRE(size == 0);
size = duckdb_get_map_size(uint_val);
REQUIRE(size == 0);
auto key = duckdb_get_map_key(nullptr, 0);
REQUIRE(!key);
key = duckdb_get_map_key(uint_val, 0);
REQUIRE(!key);
auto value = duckdb_get_map_value(nullptr, 0);
REQUIRE(!value);
value = duckdb_get_map_value(uint_val, 0);
REQUIRE(!value);
duckdb_destroy_value(&uint_val);
}
TEST_CASE("Test LIST getters", "[capi]") {
duckdb_value list_vals[2];
list_vals[0] = duckdb_create_uint64(42);
list_vals[1] = duckdb_create_uint64(43);
duckdb_logical_type uint64_type = duckdb_create_logical_type(DUCKDB_TYPE_UBIGINT);
duckdb_value list_value = duckdb_create_list_value(uint64_type, list_vals, 2);
duckdb_destroy_value(&list_vals[0]);
duckdb_destroy_value(&list_vals[1]);
duckdb_destroy_logical_type(&uint64_type);
auto size = duckdb_get_list_size(nullptr);
REQUIRE(size == 0);
size = duckdb_get_list_size(list_value);
REQUIRE(size == 2);
auto val = duckdb_get_list_child(nullptr, 0);
REQUIRE(!val);
duckdb_destroy_value(&val);
val = duckdb_get_list_child(list_value, 0);
REQUIRE(val);
REQUIRE(duckdb_get_uint64(val) == 42);
duckdb_destroy_value(&val);
val = duckdb_get_list_child(list_value, 1);
REQUIRE(val);
REQUIRE(duckdb_get_uint64(val) == 43);
duckdb_destroy_value(&val);
val = duckdb_get_list_child(list_value, 2);
REQUIRE(!val);
duckdb_destroy_value(&val);
duckdb_destroy_value(&list_value);
}
TEST_CASE("Test ENUM getters", "[capi]") {
const char *mnames[5] = {"apple", "banana", "cherry", "orange", "elderberry"};
duckdb_logical_type enum_type = duckdb_create_enum_type(mnames, 5);
duckdb_value enum_val = duckdb_create_enum_value(enum_type, 2);
REQUIRE(enum_val);
auto val = duckdb_get_enum_value(nullptr);
REQUIRE(val == 0);
val = duckdb_get_enum_value(enum_val);
REQUIRE(val == 2);
duckdb_destroy_value(&enum_val);
enum_val = duckdb_create_enum_value(enum_type, 4);
REQUIRE(enum_val);
val = duckdb_get_enum_value(enum_val);
REQUIRE(val == 4);
duckdb_destroy_value(&enum_val);
enum_val = duckdb_create_enum_value(enum_type, 5);
REQUIRE(!enum_val);
enum_val = duckdb_create_enum_value(enum_type, 6);
REQUIRE(!enum_val);
duckdb_destroy_value(&enum_val);
duckdb_destroy_logical_type(&enum_type);
}
TEST_CASE("Test STRUCT getters", "[capi]") {
duckdb_logical_type mtypes[2] = {duckdb_create_logical_type(DUCKDB_TYPE_UBIGINT),
duckdb_create_logical_type(DUCKDB_TYPE_BIGINT)};
const char *mnames[2] = {"a", "b"};
duckdb_logical_type struct_type = duckdb_create_struct_type(mtypes, mnames, 2);
duckdb_destroy_logical_type(&mtypes[0]);
duckdb_destroy_logical_type(&mtypes[1]);
duckdb_value svals[2] = {duckdb_create_uint64(42), duckdb_create_int64(-42)};
duckdb_value struct_val = duckdb_create_struct_value(struct_type, svals);
duckdb_destroy_logical_type(&struct_type);
duckdb_destroy_value(&svals[0]);
duckdb_destroy_value(&svals[1]);
auto val = duckdb_get_struct_child(nullptr, 0);
REQUIRE(!val);
val = duckdb_get_struct_child(struct_val, 0);
REQUIRE(val);
REQUIRE(duckdb_get_uint64(val) == 42);
duckdb_destroy_value(&val);
val = duckdb_get_struct_child(struct_val, 1);
REQUIRE(val);
REQUIRE(duckdb_get_int64(val) == -42);
duckdb_destroy_value(&val);
val = duckdb_get_struct_child(struct_val, 2);
REQUIRE(!val);
duckdb_destroy_value(&struct_val);
}
TEST_CASE("Test NULL value", "[capi]") {
auto null_value = duckdb_create_null_value();
REQUIRE(null_value);
REQUIRE(!duckdb_is_null_value(nullptr));
auto uint_val = duckdb_create_uint64(42);
REQUIRE(!duckdb_is_null_value(uint_val));
REQUIRE(duckdb_is_null_value(null_value));
duckdb_destroy_value(&uint_val);
duckdb_destroy_value(&null_value);
}
TEST_CASE("Test BIGNUM value", "[capi]") {
{
uint8_t data[] {0};
duckdb_bignum input {data, 1, false};
auto value = duckdb_create_bignum(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIGNUM);
auto output = duckdb_get_bignum(value);
REQUIRE(output.is_negative == input.is_negative);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
{
uint8_t data[] {1};
duckdb_bignum input {data, 1, true};
auto value = duckdb_create_bignum(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIGNUM);
auto output = duckdb_get_bignum(value);
REQUIRE(output.is_negative == input.is_negative);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
{ // max bignum == max double == 2^1023 * (1 + (1 2^52)) == 2^1024 - 2^971 ==
// 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368
uint8_t data[] {
// little endian
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
duckdb_bignum input {data, 128, false};
auto value = duckdb_create_bignum(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIGNUM);
auto output = duckdb_get_bignum(value);
REQUIRE(output.is_negative == input.is_negative);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
{ // min bignum == min double == -(2^1023 * (1 + (1 2^52))) == -(2^1024 - 2^971) ==
// -179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368
uint8_t data[] {
// little endian (absolute value)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
duckdb_bignum input {data, 128, true};
auto value = duckdb_create_bignum(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIGNUM);
auto output = duckdb_get_bignum(value);
REQUIRE(output.is_negative == input.is_negative);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
}
TEST_CASE("Test DECIMAL value", "[capi]") {
{
auto hugeint = Hugeint::POWERS_OF_TEN[4] - hugeint_t(1);
duckdb_decimal input {4, 1, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_SMALLINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = -(Hugeint::POWERS_OF_TEN[4] - hugeint_t(1));
duckdb_decimal input {4, 1, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_SMALLINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = Hugeint::POWERS_OF_TEN[9] - hugeint_t(1);
duckdb_decimal input {9, 4, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_INTEGER);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = -(Hugeint::POWERS_OF_TEN[9] - hugeint_t(1));
duckdb_decimal input {9, 4, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_INTEGER);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = Hugeint::POWERS_OF_TEN[18] - hugeint_t(1);
duckdb_decimal input {18, 6, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_BIGINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = -(Hugeint::POWERS_OF_TEN[18] - hugeint_t(1));
duckdb_decimal input {18, 8, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_BIGINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = Hugeint::POWERS_OF_TEN[38] - hugeint_t(1);
duckdb_decimal input {38, 10, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_HUGEINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
{
auto hugeint = -(Hugeint::POWERS_OF_TEN[38] - hugeint_t(1));
duckdb_decimal input {38, 10, {hugeint.lower, hugeint.upper}};
auto value = duckdb_create_decimal(input);
auto type = duckdb_get_value_type(value);
REQUIRE(duckdb_get_type_id(type) == DUCKDB_TYPE_DECIMAL);
REQUIRE(duckdb_decimal_width(type) == input.width);
REQUIRE(duckdb_decimal_scale(type) == input.scale);
REQUIRE(duckdb_decimal_internal_type(type) == DUCKDB_TYPE_HUGEINT);
auto output = duckdb_get_decimal(value);
REQUIRE(output.width == input.width);
REQUIRE(output.scale == input.scale);
REQUIRE(output.value.lower == input.value.lower);
REQUIRE(output.value.upper == input.value.upper);
duckdb_destroy_value(&value);
}
}
TEST_CASE("Test BIT value", "[capi]") {
{
uint8_t data[] {5, 0xf9, 0x56}; // 0b11111001 0b01010110
duckdb_bit input {data, 3};
auto value = duckdb_create_bit(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIT);
auto output = duckdb_get_bit(value);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
{
uint8_t data[] {0, 0x00};
duckdb_bit input {data, 2};
auto value = duckdb_create_bit(input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(value)) == DUCKDB_TYPE_BIT);
auto output = duckdb_get_bit(value);
REQUIRE(output.size == input.size);
REQUIRE_FALSE(memcmp(output.data, input.data, input.size));
duckdb_free(output.data);
duckdb_destroy_value(&value);
}
}
TEST_CASE("Test UUID value", "[capi]") {
{
duckdb_uhugeint uhugeint_input {0x0000000000000000, 0x0000000000000000};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
{
duckdb_uhugeint uhugeint_input {0x0000000000000001, 0x0000000000000000};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
{
duckdb_uhugeint uhugeint_input {0xffffffffffffffff, 0xffffffffffffffff};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
{
duckdb_uhugeint uhugeint_input {0xfffffffffffffffe, 0xffffffffffffffff};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
{
duckdb_uhugeint uhugeint_input {0xffffffffffffffff, 0x8fffffffffffffff};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
{
duckdb_uhugeint uhugeint_input {0x0000000000000000, 0x7000000000000000};
auto uuid_value = duckdb_create_uuid(uhugeint_input);
REQUIRE(duckdb_get_type_id(duckdb_get_value_type(uuid_value)) == DUCKDB_TYPE_UUID);
auto uhugeint_output = duckdb_get_uuid(uuid_value);
REQUIRE(uhugeint_output.lower == uhugeint_input.lower);
REQUIRE(uhugeint_output.upper == uhugeint_input.upper);
duckdb_destroy_value(&uuid_value);
}
}
TEST_CASE("Test SQL string conversion", "[capi]") {
auto uint_val = duckdb_create_uint64(42);
auto uint_val_str = duckdb_value_to_string(uint_val);
REQUIRE(string(uint_val_str).compare("42") == 0);
duckdb_destroy_value(&uint_val);
duckdb_free(uint_val_str);
}

View File

@@ -0,0 +1,276 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
string get_string_from_duckdb_string_t(duckdb_string_t *input) {
const char *ptr = duckdb_string_is_inlined(*input) ? input->value.inlined.inlined : input->value.pointer.ptr;
return string(ptr, duckdb_string_t_length(*input));
}
duckdb_vector create_src_vector_for_copy_selection_test(duckdb_logical_type type) {
// Create a source vector of BIGINTs with 6 elements.
auto vector = duckdb_create_vector(type, 6);
auto data = (int64_t *)duckdb_vector_get_data(vector);
duckdb_vector_ensure_validity_writable(vector);
auto src_validity = duckdb_vector_get_validity(vector);
// Populate with data: {10, 20, NULL, 40, 50, 60}
data[0] = 10;
data[1] = 20;
src_validity[0] = ~0x04;
data[3] = 40;
data[4] = 50;
data[5] = 60;
return vector;
}
duckdb_selection_vector create_selection_vector_for_copy_selection_test() {
// Selects rows in the order: 5, 3, 2, 0
idx_t selection_data[] = {5, 3, 2, 0};
auto sel = duckdb_create_selection_vector(4);
sel_t *sel_data = duckdb_selection_vector_get_data_ptr(sel);
for (idx_t i = 0; i < 4; ++i) {
sel_data[i] = selection_data[i];
}
return sel;
}
TEST_CASE("Test duckdb_vector_copy_sel", "[capi]") {
duckdb_logical_type type = duckdb_create_logical_type(DUCKDB_TYPE_BIGINT);
SECTION("Test basic selection copy") {
auto src_vector = create_src_vector_for_copy_selection_test(type);
auto sel_vector = create_selection_vector_for_copy_selection_test();
auto dst_vector = duckdb_create_vector(type, 4);
auto dst_data = (int64_t *)duckdb_vector_get_data(dst_vector);
duckdb_vector_ensure_validity_writable(dst_vector);
auto dst_validity = duckdb_vector_get_validity(dst_vector);
// Copy 4 elements from the start of the selection vector to the start of the destination.
duckdb_vector_copy_sel(src_vector, dst_vector, sel_vector, 4, 0, 0);
// Verify the copied data: should be {60, 40, NULL, 10}
REQUIRE(dst_data[0] == 60);
REQUIRE((dst_validity[0] & 0x01) == 0x01);
REQUIRE(dst_data[1] == 40);
REQUIRE((dst_validity[0] & 0x02) == 0x02);
// Check that the NULL was copied correctly
REQUIRE((~dst_validity[0] & 0x04) == 0x04);
REQUIRE(dst_data[3] == 10);
REQUIRE((dst_validity[0] & 0x08) == 0x08);
duckdb_destroy_vector(&src_vector);
duckdb_destroy_vector(&dst_vector);
duckdb_destroy_selection_vector(sel_vector);
}
SECTION("Test copy with source and destination offsets") {
auto src_vector = create_src_vector_for_copy_selection_test(type);
auto sel_vector = create_selection_vector_for_copy_selection_test();
// Create a destination vector pre-filled with some data.
auto dst_vector = duckdb_create_vector(type, 6);
auto dst_data = (int64_t *)duckdb_vector_get_data(dst_vector);
duckdb_vector_ensure_validity_writable(dst_vector);
for (int i = 0; i < 6; i++) {
dst_data[i] = 999;
}
// Copy 2 elements, starting from offset 1 in `sel` (`{3, 2}`).
// Copy them into `dst_vector` starting at offset 2.
duckdb_vector_copy_sel(src_vector, dst_vector, sel_vector, 3, 1, 2);
// Verify destination: should be {999, 999, 40, NULL, 999, 999}
auto dst_validity = duckdb_vector_get_validity(dst_vector);
// Unchanged elements
REQUIRE(dst_data[0] == 999);
REQUIRE(dst_data[1] == 999);
REQUIRE(dst_data[4] == 999);
REQUIRE(dst_data[5] == 999);
// Copied elements
REQUIRE(dst_data[2] == 40);
REQUIRE((dst_validity[0] & 0x04) == 0x04);
REQUIRE((~dst_validity[0] & 0x08) == 0x08); // The NULL value from src[2]
duckdb_destroy_vector(&src_vector);
duckdb_destroy_vector(&dst_vector);
duckdb_destroy_selection_vector(sel_vector);
}
SECTION("Test copy with zero count") {
auto src_vector = create_src_vector_for_copy_selection_test(type);
auto sel_vector = create_selection_vector_for_copy_selection_test();
auto dst_vector = duckdb_create_vector(type, 4);
auto dst_data = (int64_t *)duckdb_vector_get_data(dst_vector);
for (int i = 0; i < 4; i++) {
dst_data[i] = 123; // Pre-fill
}
// copy 0 elements.
duckdb_vector_copy_sel(src_vector, dst_vector, sel_vector, 0, 0, 0);
for (int i = 0; i < 4; i++) {
REQUIRE(dst_data[i] == 123);
}
duckdb_destroy_vector(&src_vector);
duckdb_destroy_vector(&dst_vector);
duckdb_destroy_selection_vector(sel_vector);
}
duckdb_destroy_logical_type(&type);
}
void copy_data_chunk_using_vector_copy_sel(duckdb_data_chunk src, duckdb_data_chunk dst) {
idx_t src_size = duckdb_data_chunk_get_size(src);
auto incr_sel_vector = duckdb_create_selection_vector(src_size);
sel_t *data_ptr = duckdb_selection_vector_get_data_ptr(incr_sel_vector);
for (sel_t i = 0; i < sel_t(src_size); i++) {
data_ptr[i] = i;
}
for (idx_t i = 0; i < duckdb_data_chunk_get_column_count(src); i++) {
duckdb_vector src_vector = duckdb_data_chunk_get_vector(src, i);
duckdb_vector dst_vector = duckdb_data_chunk_get_vector(dst, i);
duckdb_vector_copy_sel(src_vector, dst_vector, incr_sel_vector, src_size, 0, 0);
}
duckdb_data_chunk_set_size(dst, src_size);
duckdb_destroy_selection_vector(incr_sel_vector);
}
TEST_CASE("Test copying data_chunk by using duckdb_vector_copy_sel", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
SECTION("Test basic data chunk copy") {
duckdb_logical_type types[] = {duckdb_create_logical_type(DUCKDB_TYPE_INTEGER),
duckdb_create_logical_type(DUCKDB_TYPE_VARCHAR)};
auto src_chunk = duckdb_create_data_chunk(types, 2);
auto dst_chunk = duckdb_create_data_chunk(types, 2);
int32_t *int_data =
reinterpret_cast<int32_t *>(duckdb_vector_get_data(duckdb_data_chunk_get_vector(src_chunk, 0)));
int_data[0] = 42;
int_data[1] = 99;
auto varchar_vector = duckdb_data_chunk_get_vector(src_chunk, 1);
duckdb_vector_assign_string_element(varchar_vector, 0, "hello");
duckdb_vector_assign_string_element(varchar_vector, 1, "world");
duckdb_data_chunk_set_size(src_chunk, 2);
copy_data_chunk_using_vector_copy_sel(src_chunk, dst_chunk);
REQUIRE(duckdb_data_chunk_get_size(dst_chunk) == 2);
REQUIRE(duckdb_data_chunk_get_column_count(dst_chunk) == 2);
int32_t *dst_int_data = (int32_t *)duckdb_vector_get_data(duckdb_data_chunk_get_vector(dst_chunk, 0));
CHECK(dst_int_data[0] == 42);
CHECK(dst_int_data[1] == 99);
auto dst_vector = duckdb_data_chunk_get_vector(dst_chunk, 1);
auto validity = duckdb_vector_get_validity(dst_vector);
auto string_data = (duckdb_string_t *)duckdb_vector_get_data(dst_vector);
CHECK(duckdb_validity_row_is_valid(validity, 0));
CHECK(duckdb_validity_row_is_valid(validity, 1));
CHECK(get_string_from_duckdb_string_t(&string_data[0]).compare("hello") == 0);
CHECK(get_string_from_duckdb_string_t(&string_data[1]).compare("world") == 0);
duckdb_destroy_data_chunk(&src_chunk);
duckdb_destroy_data_chunk(&dst_chunk);
for (size_t i = 0; i < 2; i++) {
duckdb_destroy_logical_type(&types[i]);
}
}
}
void reference_data_chunk_using_vector_reference_vector(duckdb_data_chunk src, duckdb_data_chunk dst,
const idx_t *ref_indices, idx_t ref_len) {
duckdb_data_chunk_reset(dst);
idx_t src_size = duckdb_data_chunk_get_size(src);
for (idx_t i = 0; i < ref_len; i++) {
idx_t idx = ref_indices[i];
auto src_vector = duckdb_data_chunk_get_vector(src, idx);
auto dst_vector = duckdb_data_chunk_get_vector(dst, i);
duckdb_vector_reference_vector(dst_vector, src_vector);
}
duckdb_data_chunk_set_size(dst, src_size);
}
TEST_CASE("Test referencing data chunks by using duckdb_vector_reference_vector", "[capi]") {
CAPITester tester;
duckdb::unique_ptr<CAPIResult> result;
REQUIRE(tester.OpenDatabase(nullptr));
duckdb_logical_type src_types[] = {duckdb_create_logical_type(DUCKDB_TYPE_INTEGER),
duckdb_create_logical_type(DUCKDB_TYPE_DOUBLE),
duckdb_create_logical_type(DUCKDB_TYPE_BIGINT)};
auto src_chunk = duckdb_create_data_chunk(src_types, 3);
auto src_int_vector = duckdb_data_chunk_get_vector(src_chunk, 0);
auto src_double_vector = duckdb_data_chunk_get_vector(src_chunk, 1);
auto src_bigint_vector = duckdb_data_chunk_get_vector(src_chunk, 2);
auto src_int_data = (int32_t *)duckdb_vector_get_data(src_int_vector);
auto src_double_data = (double *)duckdb_vector_get_data(src_double_vector);
auto src_bigint_data = (int64_t *)duckdb_vector_get_data(src_bigint_vector);
src_int_data[0] = 42;
src_int_data[1] = 99;
src_double_data[0] = 0.5;
src_double_data[1] = 1.5;
src_bigint_data[0] = 1000;
src_bigint_data[1] = 2000;
duckdb_data_chunk_set_size(src_chunk, 2);
duckdb_logical_type dst_types[] = {duckdb_create_logical_type(DUCKDB_TYPE_BIGINT),
duckdb_create_logical_type(DUCKDB_TYPE_INTEGER)};
auto dst_chunk = duckdb_create_data_chunk(dst_types, 2);
idx_t ref_indices[] = {2, 0};
reference_data_chunk_using_vector_reference_vector(src_chunk, dst_chunk, ref_indices, 2);
REQUIRE(duckdb_data_chunk_get_column_count(dst_chunk) == 2);
REQUIRE(duckdb_data_chunk_get_size(dst_chunk) == 2);
auto dst_type_0 = duckdb_vector_get_column_type(duckdb_data_chunk_get_vector(dst_chunk, 0));
auto dst_type_1 = duckdb_vector_get_column_type(duckdb_data_chunk_get_vector(dst_chunk, 1));
REQUIRE(duckdb_get_type_id(dst_type_0) == DUCKDB_TYPE_BIGINT);
REQUIRE(duckdb_get_type_id(dst_type_1) == DUCKDB_TYPE_INTEGER);
duckdb_destroy_logical_type(&dst_type_0);
duckdb_destroy_logical_type(&dst_type_1);
// Verify that the data pointers are the same
auto dst_bigint_vector = duckdb_data_chunk_get_vector(dst_chunk, 0);
auto dst_int_vector = duckdb_data_chunk_get_vector(dst_chunk, 1);
REQUIRE(duckdb_vector_get_data(dst_bigint_vector) == duckdb_vector_get_data(src_bigint_vector));
REQUIRE(duckdb_vector_get_data(dst_int_vector) == duckdb_vector_get_data(src_int_vector));
src_bigint_data[0] = 9999;
auto dst_bigint_data = (int64_t *)duckdb_vector_get_data(dst_bigint_vector);
REQUIRE(dst_bigint_data[0] == 9999);
duckdb_destroy_data_chunk(&dst_chunk);
for (size_t i = 0; i < 2; i++) {
duckdb_destroy_logical_type(&dst_types[i]);
}
duckdb_destroy_data_chunk(&src_chunk);
for (size_t i = 0; i < 3; i++) {
duckdb_destroy_logical_type(&src_types[i]);
}
}

View File

@@ -0,0 +1,187 @@
#include "capi_tester.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test C API examples from the website", "[capi]") {
// NOTE: if any of these break and need to be changed, the website also needs to be updated!
SECTION("connect") {
duckdb_database db;
duckdb_connection con;
if (duckdb_open(NULL, &db) == DuckDBError) {
// handle error
}
if (duckdb_connect(db, &con) == DuckDBError) {
// handle error
}
// run queries...
// cleanup
duckdb_disconnect(&con);
duckdb_close(&db);
}
SECTION("config") {
duckdb_database db;
duckdb_config config;
// create the configuration object
if (duckdb_create_config(&config) == DuckDBError) {
REQUIRE(1 == 0);
}
// set some configuration options
duckdb_set_config(config, "access_mode", "READ_WRITE");
duckdb_set_config(config, "threads", "8");
duckdb_set_config(config, "max_memory", "8GB");
duckdb_set_config(config, "default_order", "DESC");
// open the database using the configuration
if (duckdb_open_ext(NULL, &db, config, NULL) == DuckDBError) {
REQUIRE(1 == 0);
}
// cleanup the configuration object
duckdb_destroy_config(&config);
// run queries...
// cleanup
duckdb_close(&db);
}
SECTION("query") {
duckdb_database db;
duckdb_connection con;
duckdb_state state;
duckdb_result result;
duckdb_open(NULL, &db);
duckdb_connect(db, &con);
// create a table
state = duckdb_query(con, "CREATE TABLE integers(i INTEGER, j INTEGER);", NULL);
if (state == DuckDBError) {
REQUIRE(1 == 0);
}
// insert three rows into the table
state = duckdb_query(con, "INSERT INTO integers VALUES (3, 4), (5, 6), (7, NULL);", NULL);
if (state == DuckDBError) {
REQUIRE(1 == 0);
}
// query rows again
state = duckdb_query(con, "SELECT * FROM integers", &result);
if (state == DuckDBError) {
REQUIRE(1 == 0);
}
// handle the result
idx_t row_count = duckdb_row_count(&result);
idx_t column_count = duckdb_column_count(&result);
for (idx_t row = 0; row < row_count; row++) {
for (idx_t col = 0; col < column_count; col++) {
// if (col > 0) printf(",");
auto str_val = duckdb_value_varchar(&result, col, row);
// printf("%s", str_val);
REQUIRE(1 == 1);
duckdb_free(str_val);
}
// printf("\n");
}
int32_t *i_data = (int32_t *)duckdb_column_data(&result, 0);
int32_t *j_data = (int32_t *)duckdb_column_data(&result, 1);
bool *i_mask = duckdb_nullmask_data(&result, 0);
bool *j_mask = duckdb_nullmask_data(&result, 1);
for (idx_t row = 0; row < row_count; row++) {
if (i_mask[row]) {
// printf("NULL");
} else {
REQUIRE(i_data[row] > 0);
// printf("%d", i_data[row]);
}
// printf(",");
if (j_mask[row]) {
// printf("NULL");
} else {
REQUIRE(j_data[row] > 0);
// printf("%d", j_data[row]);
}
// printf("\n");
}
// destroy the result after we are done with it
duckdb_destroy_result(&result);
duckdb_disconnect(&con);
duckdb_close(&db);
}
SECTION("prepared") {
duckdb_database db;
duckdb_connection con;
duckdb_open(NULL, &db);
duckdb_connect(db, &con);
duckdb_query(con, "CREATE TABLE integers(i INTEGER, j INTEGER)", NULL);
duckdb_prepared_statement stmt;
duckdb_result result;
if (duckdb_prepare(con, "INSERT INTO integers VALUES ($1, $2)", &stmt) == DuckDBError) {
REQUIRE(1 == 0);
}
duckdb_bind_int32(stmt, 1, 42); // the parameter index starts counting at 1!
duckdb_bind_int32(stmt, 2, 43);
// NULL as second parameter means no result set is requested
duckdb_execute_prepared(stmt, NULL);
duckdb_destroy_prepare(&stmt);
// we can also query result sets using prepared statements
if (duckdb_prepare(con, "SELECT * FROM integers WHERE i = ?", &stmt) == DuckDBError) {
REQUIRE(1 == 0);
}
duckdb_bind_int32(stmt, 1, 42);
duckdb_execute_prepared(stmt, &result);
// do something with result
// clean up
duckdb_destroy_result(&result);
duckdb_destroy_prepare(&stmt);
duckdb_disconnect(&con);
duckdb_close(&db);
}
SECTION("appender") {
duckdb_database db;
duckdb_connection con;
duckdb_open(NULL, &db);
duckdb_connect(db, &con);
duckdb_query(con, "CREATE TABLE people(id INTEGER, name VARCHAR)", NULL);
duckdb_appender appender;
if (duckdb_appender_create(con, NULL, "people", &appender) == DuckDBError) {
REQUIRE(1 == 0);
}
duckdb_append_int32(appender, 1);
duckdb_append_varchar(appender, "Mark");
duckdb_appender_end_row(appender);
duckdb_append_int32(appender, 2);
duckdb_append_varchar(appender, "Hannes");
duckdb_appender_end_row(appender);
duckdb_appender_destroy(&appender);
duckdb_result result;
duckdb_query(con, "SELECT * FROM people", &result);
REQUIRE(duckdb_value_int32(&result, 0, 0) == 1);
REQUIRE(duckdb_value_int32(&result, 0, 1) == 2);
REQUIRE(string(duckdb_value_varchar_internal(&result, 1, 0)) == "Mark");
REQUIRE(string(duckdb_value_varchar_internal(&result, 1, 1)) == "Hannes");
// error conditions: we cannot
REQUIRE(duckdb_value_varchar_internal(&result, 0, 0) == nullptr);
REQUIRE(duckdb_value_varchar_internal(nullptr, 0, 0) == nullptr);
duckdb_destroy_result(&result);
duckdb_disconnect(&con);
duckdb_close(&db);
}
}

View File

@@ -0,0 +1,35 @@
#include "catch.hpp"
#include "duckdb.h"
using namespace std;
TEST_CASE("Simple In-Memory DB Start Up and Shutdown", "[simplestartup]") {
duckdb_database database;
duckdb_connection connection;
// open and close a database in in-memory mode
REQUIRE(duckdb_open(NULL, &database) == DuckDBSuccess);
REQUIRE(duckdb_connect(database, &connection) == DuckDBSuccess);
duckdb_disconnect(&connection);
duckdb_close(&database);
}
TEST_CASE("Multiple In-Memory DB Start Up and Shutdown", "[multiplestartup]") {
duckdb_database database[10];
duckdb_connection connection[100];
// open and close 10 databases
// and open 10 connections per database
for (size_t i = 0; i < 10; i++) {
REQUIRE(duckdb_open(NULL, &database[i]) == DuckDBSuccess);
for (size_t j = 0; j < 10; j++) {
REQUIRE(duckdb_connect(database[i], &connection[i * 10 + j]) == DuckDBSuccess);
}
}
for (size_t i = 0; i < 10; i++) {
for (size_t j = 0; j < 10; j++) {
duckdb_disconnect(&connection[i * 10 + j]);
}
duckdb_close(&database[i]);
}
}

View File

@@ -0,0 +1,40 @@
#include "catch.hpp"
#define DUCKDB_API_NO_DEPRECATED
#include "duckdb.h"
using namespace std;
// we only use functions that are cool to use in the 1.0 API
TEST_CASE("Test without deprecated or future moved functions", "[capi]") {
duckdb_database database;
duckdb_connection connection;
duckdb_prepared_statement statement;
duckdb_result result;
REQUIRE(duckdb_open(NULL, &database) == DuckDBSuccess);
REQUIRE(duckdb_connect(database, &connection) == DuckDBSuccess);
REQUIRE(duckdb_prepare(connection, "SELECT ?::INTEGER AS a", &statement) == DuckDBSuccess);
REQUIRE(duckdb_bind_int32(statement, 1, 42) == DuckDBSuccess);
REQUIRE(duckdb_execute_prepared(statement, &result) == DuckDBSuccess);
REQUIRE(duckdb_column_count(&result) == 1);
REQUIRE(string(duckdb_column_name(&result, 0)) == "a");
REQUIRE(duckdb_column_type(&result, 0) == DUCKDB_TYPE_INTEGER);
auto chunk = duckdb_fetch_chunk(result);
REQUIRE(chunk);
auto vector = duckdb_data_chunk_get_vector(chunk, 0);
REQUIRE(vector);
auto validity = duckdb_vector_get_validity(vector);
REQUIRE(duckdb_validity_row_is_valid(validity, 0));
auto data = (int *)duckdb_vector_get_data(vector);
REQUIRE(data);
REQUIRE(data[0] == 42);
duckdb_destroy_data_chunk(&chunk);
duckdb_destroy_result(&result);
duckdb_destroy_prepare(&statement);
duckdb_disconnect(&connection);
duckdb_close(&database);
}

View File

@@ -0,0 +1 @@
CALL dbgen(sf=0.01);

View File

@@ -0,0 +1,798 @@
# failure in serialization
# this means either
# (a) the test does not translate nicely to the serialization test (e.g. involves dropping + recreating a table)
# (b) this tests a new feature that is not available in the old version
test/sql/aggregate/aggregates/approx_top_k.test
test/sql/aggregate/aggregates/arg_min_max_n.test
test/sql/aggregate/aggregates/arg_min_max_n_tpch.test
test/sql/aggregate/aggregates/binning.test
test/sql/aggregate/aggregates/histogram_table_function.test
test/sql/aggregate/aggregates/test_aggregate_types_scalar.test
test/sql/aggregate/aggregates/test_any_value.test
test/sql/aggregate/aggregates/test_arg_min_max.test
test/sql/aggregate/aggregates/test_arg_min_max_null.test
test/sql/aggregate/aggregates/test_avg.test
test/sql/aggregate/aggregates/test_binned_histogram.test
test/sql/aggregate/aggregates/test_bit_and.test
test/sql/aggregate/aggregates/test_bit_or.test
test/sql/aggregate/aggregates/test_bit_xor.test
test/sql/aggregate/aggregates/test_bitstring_agg.test
test/sql/aggregate/aggregates/test_count_all_types.test
test/sql/aggregate/aggregates/test_last.test
test/sql/aggregate/aggregates/test_product.test
test/sql/aggregate/aggregates/test_regression.test
test/sql/aggregate/aggregates/test_state_export.test
test/sql/aggregate/distinct/distinct_on_order_by.test
test/sql/aggregate/distinct/grouped/string_agg.test
test/sql/aggregate/distinct/test_distinct.test
test/sql/aggregate/group/group_by_limits.test
test/sql/aggregate/group/test_group_by.test
test/sql/aggregate/qualify/test_qualify_view.test
test/sql/aggregate/qualify/test_qualify_view_no_view_dependencies.test
test/sql/alter/add_col/test_add_col_transactions.test
test/sql/alter/alter_col/test_not_null_in_tran.test
test/sql/alter/alter_col/test_not_null_multi_tran.test
test/sql/alter/alter_type/test_alter_type_transactions.test
test/sql/alter/drop_col/test_drop_col_transactions.test
test/sql/alter/rename_col/test_rename_col.test
test/sql/alter/rename_col/test_rename_col_rollback.test
test/sql/alter/rename_col/test_rename_col_transactions.test
test/sql/alter/rename_table/test_rename_table.test
test/sql/alter/rename_table/test_rename_table_chain_commit.test
test/sql/alter/rename_table/test_rename_table_collision.test
test/sql/alter/rename_table/test_rename_table_constraints.test
test/sql/alter/rename_table/test_rename_table_many_transactions.test
test/sql/alter/rename_table/test_rename_table_transactions.test
test/sql/alter/rename_view/test_rename_view.test
test/sql/alter/rename_view/test_rename_view_many_transactions.test
test/sql/alter/rename_view/test_rename_view_transactions.test
test/sql/attach/attach_all_types.test
test/sql/attach/attach_checkpoint_vacuum.test
test/sql/attach/attach_copy.test
test/sql/attach/attach_custom_block_size.test
test/sql/attach/attach_database_size.test
test/sql/attach/attach_dependencies.test
test/sql/attach/attach_did_you_mean.test
test/sql/attach/attach_different_alias.test
test/sql/attach/attach_duckdb_type.test
test/sql/attach/attach_enums.test
test/sql/attach/attach_export_import.test
test/sql/attach/attach_filepath_roundtrip.test
test/sql/attach/attach_foreign_key.test
test/sql/attach/attach_httpfs.test
test/sql/attach/attach_if_not_exists.test
test/sql/attach/attach_index.test
test/sql/attach/attach_issue_7660.test
test/sql/attach/attach_persistent.test
test/sql/attach/attach_pragma_storage_info.test
test/sql/attach/attach_read_only.test
test/sql/attach/attach_read_only_transaction.test
test/sql/attach/attach_reserved.test
test/sql/attach/attach_s3.test
test/sql/attach/attach_same_db.test
test/sql/attach/attach_sequence.test
test/sql/attach/attach_table_constraints.test
test/sql/attach/attach_table_info.test
test/sql/attach/attach_transactionality.test
test/sql/attach/attach_wal_alter.test
test/sql/attach/reattach_schema.test
test/sql/binder/column_value_alias_group.test
test/sql/binder/test_case_insensitive_binding.test
test/sql/binder/test_null_type_propagation.test
test/sql/cast/string_to_list_cast.test
test/sql/cast/string_to_struct_cast.test
test/sql/cast/test_boolean_cast.test
test/sql/catalog/case_insensitive_alter.test
test/sql/catalog/case_insensitive_binder.test
test/sql/catalog/case_insensitive_caps.test
test/sql/catalog/case_insensitive_using.test
test/sql/catalog/comment_on.test
test/sql/catalog/comment_on_extended.test
test/sql/catalog/comment_on_wal.test
test/sql/catalog/dependencies/add_column_to_table_referenced_by_view.test
test/sql/catalog/dependencies/test_alter_dependency_ownership.test
test/sql/catalog/dependencies/test_default_value_dependency.test
test/sql/catalog/dependencies/test_prepare_dependencies_transactions.test
test/sql/catalog/dependencies/test_schema_dependency.test
test/sql/catalog/drop_create_rollback.test
test/sql/catalog/function/query_function.test
test/sql/catalog/function/test_complex_macro.test
test/sql/catalog/function/test_macro_default_arg.test
test/sql/catalog/function/test_macro_default_arg_with_dependencies.test
test/sql/catalog/function/test_macro_issue_13104.test
test/sql/catalog/function/test_macro_overloads.test
test/sql/catalog/function/test_sequence_macro.test
test/sql/catalog/function/test_simple_macro.test
test/sql/catalog/function/test_table_macro.test
test/sql/catalog/function/test_table_macro_complex.test
test/sql/catalog/function/test_table_macro_groups.test
test/sql/catalog/sequence/sequence_cycle.test
test/sql/catalog/sequence/sequence_offset_increment.test
test/sql/catalog/sequence/sequence_overflow.test
test/sql/catalog/sequence/test_duckdb_sequences.test
test/sql/catalog/sequence/test_sequence.test
test/sql/catalog/test_schema.test
test/sql/catalog/test_schema_conflict.test
test/sql/catalog/test_set_schema.test
test/sql/catalog/test_set_search_path.test
test/sql/catalog/test_standard_schema.test
test/sql/catalog/test_temporary.test
test/sql/catalog/view/test_view.test
test/sql/catalog/view/test_view_alias.test
test/sql/catalog/view/test_view_schema_change.test
test/sql/catalog/view/test_view_schema_change_with_dependencies.test
test/sql/collate/collate_order_by_alias.test
test/sql/collate/collate_subquery.test
test/sql/collate/test_collate_list.test
test/sql/collate/test_icu_collate.test
test/sql/constraints/foreignkey/test_fk_export.test
test/sql/constraints/foreignkey/test_fk_multiple.test
test/sql/constraints/foreignkey/test_fk_self_referencing.test
test/sql/constraints/foreignkey/test_fk_temporary.test
test/sql/constraints/foreignkey/test_foreignkey.test
test/sql/constraints/primarykey/test_primary_key.test
test/sql/constraints/unique/test_unique.test
test/sql/constraints/unique/test_unique_multi_column.test
test/sql/constraints/unique/test_unique_string.test
test/sql/constraints/unique/test_unique_temp.test
test/sql/copy/csv/7702.test
test/sql/copy/csv/auto/test_auto_5378.test
test/sql/copy/csv/auto/test_auto_8860.test
test/sql/copy/csv/auto/test_auto_column_type_opt.test
test/sql/copy/csv/auto/test_auto_cranlogs.test
test/sql/copy/csv/auto/test_auto_greek_ncvoter.test
test/sql/copy/csv/auto/test_auto_greek_utf8.test
test/sql/copy/csv/auto/test_auto_imdb.test
test/sql/copy/csv/auto/test_auto_lineitem.test
test/sql/copy/csv/auto/test_auto_ontime.test
test/sql/copy/csv/auto/test_auto_web_page.test
test/sql/copy/csv/auto/test_csv_auto.test
test/sql/copy/csv/auto/test_date_format_bug_linux.test
test/sql/copy/csv/auto/test_fallback_all_varchar.test
test/sql/copy/csv/auto/test_header_completion.test
test/sql/copy/csv/auto/test_header_detection.test
test/sql/copy/csv/auto/test_normalize_names.test
test/sql/copy/csv/auto/test_sample_size.test
test/sql/copy/csv/auto/test_sniffer_blob.test
test/sql/copy/csv/auto/test_type_candidates.test
test/sql/copy/csv/auto/test_type_detection.test
test/sql/copy/csv/batched_write/batch_csv_write.test
test/sql/copy/csv/code_cov/buffer_manager_finalize.test
test/sql/copy/csv/code_cov/csv_disk_reload.test
test/sql/copy/csv/code_cov/csv_state_machine_invalid_utf.test
test/sql/copy/csv/column_names.test
test/sql/copy/csv/copy_disable_parallelism.test
test/sql/copy/csv/csv_dtypes.test
test/sql/copy/csv/csv_dtypes_union_by_name.test
test/sql/copy/csv/csv_error_message.test
test/sql/copy/csv/csv_external_access.test
test/sql/copy/csv/csv_hive.test
test/sql/copy/csv/csv_home_directory.test
test/sql/copy/csv/csv_limit_copy.test
test/sql/copy/csv/csv_names.test
test/sql/copy/csv/csv_null_padding.test
test/sql/copy/csv/csv_projection_pushdown.test
test/sql/copy/csv/csv_roundtrip_single_null.test
test/sql/copy/csv/csv_windows_mixed_separators.test
test/sql/copy/csv/glob/copy_csv_glob.test
test/sql/copy/csv/glob/copy_csv_glob_s3.test
test/sql/copy/csv/glob/read_csv_glob.test
test/sql/copy/csv/glob/read_csv_glob_crawl.test
test/sql/copy/csv/glob/read_csv_glob_s3.test
test/sql/copy/csv/overwrite/test_copy_overwrite.test
test/sql/copy/csv/parallel/csv_parallel_buffer_size.test
test/sql/copy/csv/parallel/csv_parallel_httpfs.test
test/sql/copy/csv/parallel/csv_parallel_null_option.test
test/sql/copy/csv/parallel/test_5438.test
test/sql/copy/csv/parallel/test_5566.test
test/sql/copy/csv/parallel/test_multiple_files.test
test/sql/copy/csv/parallel/test_parallel_csv.test
test/sql/copy/csv/read_csv_variable.test
test/sql/copy/csv/recursive_read_csv.test
test/sql/copy/csv/rejects/csv_incorrect_columns_amount_rejects.test
test/sql/copy/csv/rejects/csv_rejects_auto.test
test/sql/copy/csv/rejects/csv_rejects_flush_cast.test
test/sql/copy/csv/rejects/csv_rejects_flush_message.test
test/sql/copy/csv/rejects/csv_rejects_maximum_line.test
test/sql/copy/csv/rejects/csv_rejects_read.test
test/sql/copy/csv/rejects/csv_rejects_two_tables.test
test/sql/copy/csv/rejects/csv_unquoted_rejects.test
test/sql/copy/csv/rejects/test_invalid_parameters.test
test/sql/copy/csv/rejects/test_invalid_utf_rejects.test
test/sql/copy/csv/rejects/test_mixed.test
test/sql/copy/csv/rejects/test_multiple_errors_same_line.test
test/sql/copy/csv/struct_padding.test
test/sql/copy/csv/test_12596.test
test/sql/copy/csv/test_all_quotes.test
test/sql/copy/csv/test_allow_quoted_nulls_option.test
test/sql/copy/csv/test_bgzf_read.test
test/sql/copy/csv/test_big_header.test
test/sql/copy/csv/test_blob.test
test/sql/copy/csv/test_comment_midline.test
test/sql/copy/csv/test_comment_option.test
test/sql/copy/csv/test_compression_flag.test
test/sql/copy/csv/test_copy.test
test/sql/copy/csv/test_copy_default.test
test/sql/copy/csv/test_copy_gzip.test
test/sql/copy/csv/test_copy_many_empty_lines.test
test/sql/copy/csv/test_copy_null.test
test/sql/copy/csv/test_csv_duplicate_columns.test
test/sql/copy/csv/test_csv_httpfs_prepared.test
test/sql/copy/csv/test_csv_json.test
test/sql/copy/csv/test_csv_no_trailing_newline.test
test/sql/copy/csv/test_csv_projection_pushdown.test
test/sql/copy/csv/test_csv_projection_pushdown_glob.test
test/sql/copy/csv/test_csv_remote.test
test/sql/copy/csv/test_csv_timestamp_tz_icu.test
test/sql/copy/csv/test_date.test
test/sql/copy/csv/test_date_sniffer.test
test/sql/copy/csv/test_dateformat.test
test/sql/copy/csv/test_double_sniffer.test
test/sql/copy/csv/test_empty_quote.test
test/sql/copy/csv/test_escape_long_value.test
test/sql/copy/csv/test_export_force_quotes.test
test/sql/copy/csv/test_export_not_null.test
test/sql/copy/csv/test_force_not_null.test
test/sql/copy/csv/test_force_quote.test
test/sql/copy/csv/test_glob_reorder_lineitem.test
test/sql/copy/csv/test_glob_reorder_null.test
test/sql/copy/csv/test_greek_utf8.test
test/sql/copy/csv/test_ignore_errors.test
test/sql/copy/csv/test_ignore_errors_end_of_chunk.test
test/sql/copy/csv/test_ignore_mid_null_line.test
test/sql/copy/csv/test_imdb.test
test/sql/copy/csv/test_issue3562_assertion.test
test/sql/copy/csv/test_lineitem.test
test/sql/copy/csv/test_long_line.test
test/sql/copy/csv/test_many_columns.test
test/sql/copy/csv/test_mismatch_schemas.test
test/sql/copy/csv/test_missing_row.test
test/sql/copy/csv/test_mixed_line_endings.test
test/sql/copy/csv/test_ncvoter.test
test/sql/copy/csv/test_nfc.test
test/sql/copy/csv/test_nfc_suite.test
test/sql/copy/csv/test_non_unicode_header.test
test/sql/copy/csv/test_null_padding_projection.test
test/sql/copy/csv/test_ontime.test
test/sql/copy/csv/test_partition_compression.test
test/sql/copy/csv/test_quoted_newline.test
test/sql/copy/csv/test_read_csv.test
test/sql/copy/csv/test_replacement_scan_alias.test
test/sql/copy/csv/test_skip_bom.test
test/sql/copy/csv/test_sniff_csv.test
test/sql/copy/csv/test_sniff_csv_options.test
test/sql/copy/csv/test_sniffer_tab_delimiter.test
test/sql/copy/csv/test_time.test
test/sql/copy/csv/test_timestamp_offset.test
test/sql/copy/csv/test_timestamptz_12926.test
test/sql/copy/csv/test_union_by_name.test
test/sql/copy/csv/test_web_page.test
test/sql/copy/csv/test_windows_newline.test
test/sql/copy/csv/tsv_copy.test
test/sql/copy/csv/write_header_default.test
test/sql/copy/csv/zstd_crash.test
test/sql/copy/csv/zstd_fs.test
test/sql/copy/file_size_bytes.test
test/sql/copy/format_uuid.test
test/sql/copy/parquet/batched_write/batch_memory_usage.test
test/sql/copy/parquet/batched_write/batched_parquet_write.test
test/sql/copy/parquet/delta_byte_array_length_mismatch.test
test/sql/copy/parquet/delta_byte_array_multiple_pages.test
test/sql/copy/parquet/dictionary_compression_ratio_threshold.test
test/sql/copy/parquet/hive_partitioning_struct.test
test/sql/copy/parquet/hive_timestamps.test
test/sql/copy/parquet/infer_copy_format.test
test/sql/copy/parquet/kv_metadata.test
test/sql/copy/parquet/lineitem_arrow.test
test/sql/copy/parquet/multi_file_conversion_error.test
test/sql/copy/parquet/parquet_12621.test
test/sql/copy/parquet/parquet_13053_duplicate_column_names.test
test/sql/copy/parquet/parquet_3896.test
test/sql/copy/parquet/parquet_3989.test
test/sql/copy/parquet/parquet_5209.test
test/sql/copy/parquet/parquet_5968.test
test/sql/copy/parquet/parquet_6044.test
test/sql/copy/parquet/parquet_6933.test
test/sql/copy/parquet/parquet_copy_type_mismatch.test
test/sql/copy/parquet/parquet_encryption.test
test/sql/copy/parquet/parquet_encryption_httpfs.test
test/sql/copy/parquet/parquet_filename.test
test/sql/copy/parquet/parquet_glob.test
test/sql/copy/parquet/parquet_glob_s3.test
test/sql/copy/parquet/parquet_hive.test
test/sql/copy/parquet/parquet_hive2.test
test/sql/copy/parquet/parquet_hive_empty.test
test/sql/copy/parquet/parquet_hive_null.test
test/sql/copy/parquet/parquet_http_prefetch.test
test/sql/copy/parquet/parquet_metadata_cache.test
test/sql/copy/parquet/parquet_stats.test
test/sql/copy/parquet/parquet_write_repeated_lists.test
test/sql/copy/parquet/snowflake_lineitem.test
test/sql/copy/parquet/test_parquet_duplicate_columns.test
test/sql/copy/parquet/test_parquet_force_download.test
test/sql/copy/parquet/test_parquet_remote.test
test/sql/copy/parquet/test_parquet_remote_foreign_files.test
test/sql/copy/parquet/timetz_parquet.test
test/sql/copy/parquet/union_by_name_pushdown.test
test/sql/copy/parquet/writer/parquet_test_all_types.test
test/sql/copy/parquet/writer/parquet_write_booleans.test
test/sql/copy/parquet/writer/parquet_write_compression_level.test
test/sql/copy/parquet/writer/parquet_write_date.test
test/sql/copy/parquet/writer/parquet_write_decimals.test
test/sql/copy/parquet/writer/parquet_write_enums.test
test/sql/copy/parquet/writer/parquet_write_field_id.test
test/sql/copy/parquet/writer/parquet_write_home_directory.test
test/sql/copy/parquet/writer/parquet_write_hugeint.test
test/sql/copy/parquet/writer/parquet_write_interval.test
test/sql/copy/parquet/writer/parquet_write_issue_5779.test
test/sql/copy/parquet/writer/parquet_write_string_distinct.test
test/sql/copy/parquet/writer/parquet_write_strings.test
test/sql/copy/parquet/writer/parquet_write_timestamp.test
test/sql/copy/parquet/writer/parquet_write_uhugeint.test
test/sql/copy/parquet/writer/parquet_write_uuid.test
test/sql/copy/parquet/writer/row_group_size_bytes.test
test/sql/copy/parquet/writer/test_copy_overwrite_parquet.test
test/sql/copy/parquet/writer/test_parquet_write.test
test/sql/copy/parquet/writer/test_parquet_write_complex.test
test/sql/copy/parquet/writer/write_complex_nested.test
test/sql/copy/parquet/writer/write_list.test
test/sql/copy/parquet/writer/write_map.test
test/sql/copy/parquet/writer/write_stats_big_string.test
test/sql/copy/parquet/writer/write_stats_null_count.test
test/sql/copy/parquet/writer/write_struct.test
test/sql/copy/partitioned/hive_filter_pushdown.test
test/sql/copy/partitioned/hive_partition_append.test
test/sql/copy/partitioned/hive_partition_compression.test
test/sql/copy/partitioned/hive_partition_duplicate_name.test
test/sql/copy/partitioned/hive_partition_escape.test
test/sql/copy/partitioned/hive_partition_join_pushdown.test
test/sql/copy/partitioned/hive_partition_recursive_cte.test
test/sql/copy/partitioned/hive_partitioned_auto_detect.test
test/sql/copy/partitioned/hive_partitioned_write.test
test/sql/copy/partitioned/hive_partitioning_overwrite.test
test/sql/copy/partitioned/partition_issue_6304.test
test/sql/copy/partitioned/skip_partition_column_writes.test
test/sql/copy/per_thread_output.test
test/sql/copy/return_files.test
test/sql/copy/row_groups_per_file.test
test/sql/copy/s3/download_config.test
test/sql/copy/s3/fully_qualified_s3_url.test
test/sql/copy/s3/hive_partitioned_write_s3.test
test/sql/copy/s3/http_proxy.test
test/sql/copy/s3/http_secret.test
test/sql/copy/s3/metadata_cache.test
test/sql/copy/s3/s3_hive_partition.test
test/sql/copy/s3/s3_presigned_read.test
test/sql/copy/s3/starstar.test
test/sql/copy/s3/upload_small_file.test
test/sql/copy/s3/url_encode.test
test/sql/copy/tmp_file.test
test/sql/copy_database/copy_database_errors.test
test/sql/copy_database/copy_database_gen_col.test
test/sql/copy_database/copy_database_index.test
test/sql/create/create_table_compression.test
test/sql/cte/materialized/test_recursive_cte_tutorial_materialized.test
test/sql/cte/test_cte.test
test/sql/cte/test_cte_in_cte.test
test/sql/cte/test_recursive_cte_tutorial.test
test/sql/cte/test_recursive_cte_tutorial.test
test/sql/delete/test_segment_deletes.test
test/sql/detailed_profiler/test_detailed_profiler.test
test/sql/explain/test_explain.test
test/sql/explain/test_explain_analyze.test
test/sql/export/empty_export.test
test/sql/export/export_database.test
test/sql/export/export_external_access.test
test/sql/export/export_functions.test
test/sql/export/export_generated_columns.test
test/sql/export/export_macros.test
test/sql/export/export_quoted_enum.test
test/sql/export/export_quoted_structs.test
test/sql/export/export_quoted_union.test
test/sql/export/parquet/export_parquet_bit.test
test/sql/export/parquet/export_parquet_enum.test
test/sql/export/parquet/export_parquet_hugeint.test
test/sql/export/parquet/export_parquet_json.test
test/sql/export/parquet/export_parquet_list.test
test/sql/export/parquet/export_parquet_map.test
test/sql/export/parquet/export_parquet_struct.test
test/sql/export/parquet/export_parquet_union.test
test/sql/export/parquet_export.test
test/sql/extensions/version_is_valid_sqlite.test
test/sql/filter/test_struct_pushdown.test
test/sql/fts/issue_12330.test
test/sql/fts/test_fts_attach.test
test/sql/fts/test_issue_10254.test
test/sql/fts/test_issue_10281.test
test/sql/function/date/test_date_part.test
test/sql/function/generic/can_cast_implicitly.test
test/sql/function/generic/test_in.test
test/sql/function/generic/test_null_if.test
test/sql/function/interval/test_date_part.test
test/sql/function/list/aggregates/any_value.test
test/sql/function/list/aggregates/avg.test
test/sql/function/list/aggregates/bit_and.test
test/sql/function/list/aggregates/bit_or.test
test/sql/function/list/aggregates/bit_xor.test
test/sql/function/list/aggregates/first.test
test/sql/function/list/aggregates/last.test
test/sql/function/list/aggregates/max.test
test/sql/function/list/aggregates/min.test
test/sql/function/list/array_to_string_comma_default.test
test/sql/function/list/lambdas/list_comprehension.test
test/sql/function/list/lambdas/table_functions.test
test/sql/function/list/list_contains.test
test/sql/function/list/list_has_any_and_has_all.test
test/sql/function/list/list_inner_product.test
test/sql/function/list/list_position.test
test/sql/function/list/list_sort_having.test
test/sql/function/list/list_zip.test
test/sql/function/list/repeat_list.test
test/sql/function/numeric/test_fdiv_fmod.test
test/sql/function/numeric/test_random.test
test/sql/function/numeric/test_trigo.test
test/sql/function/operator/test_bitwise_ops.test
test/sql/function/operator/test_bitwise_ops_types.test
test/sql/function/operator/test_in_empty_table.test
test/sql/function/string/md5.test
test/sql/function/string/regex_search.test
test/sql/function/string/sha1.test
test/sql/function/string/sha256.test
test/sql/function/string/test_damerau_levenshtein.test
test/sql/function/string/test_format_extensions.test
test/sql/function/string/test_mismatches.test
test/sql/function/string/test_printf.test
test/sql/function/string/test_url_encode.test
test/sql/function/time/test_date_part.test
test/sql/function/timestamp/age.test
test/sql/function/timestamp/test_date_part.test
test/sql/function/timestamp/test_icu_age.test
test/sql/function/timestamp/test_now.test
test/sql/function/timetz/test_date_part.test
test/sql/function/uuid/test_uuid.test
test/sql/generated_columns/virtual/rename.test
test/sql/httpfs/internal_issue_2490.test
test/sql/index/art/create_drop/test_art_create_index_delete.test
test/sql/index/art/create_drop/test_art_create_many_duplicates.test
test/sql/index/art/create_drop/test_art_many_versions.test
test/sql/index/art/insert_update_delete/test_art_update_other_column.test
test/sql/index/art/issues/test_art_issue_6603.test
test/sql/index/art/issues/test_art_issue_7349.test
test/sql/index/art/nodes/test_art_leaf.test
test/sql/index/art/nodes/test_art_node_16.test
test/sql/index/art/nodes/test_art_node_256.test
test/sql/index/art/nodes/test_art_node_4.test
test/sql/index/art/nodes/test_art_node_48.test
test/sql/index/art/scan/test_art_adaptive_scan.test
test/sql/index/art/scan/test_art_many_matches.test
test/sql/index/art/scan/test_art_negative_range_scan.test
test/sql/index/art/scan/test_art_range_scan.test
test/sql/index/art/scan/test_art_scan_thresholds.test
test/sql/index/art/storage/test_art_auto_checkpoint.test
test/sql/index/art/storage/test_art_duckdb_versions.test
test/sql/index/art/storage/test_art_import.test
test/sql/index/art/storage/test_art_import_export.test
test/sql/index/art/types/test_art_varchar.test
test/sql/index/art/vacuum/test_art_vacuum_rollback.test
test/sql/insert/unaligned_interleaved_appends.test
test/sql/join/iejoin/iejoin_issue_6861.test
test/sql/join/iejoin/iejoin_issue_7278.test
test/sql/join/iejoin/merge_join_switch.test
test/sql/join/iejoin/predicate_expressions.test
test/sql/join/iejoin/test_iejoin.test
test/sql/join/iejoin/test_iejoin_east_west.test
test/sql/join/iejoin/test_iejoin_events.test
test/sql/join/iejoin/test_iejoin_null_keys.test
test/sql/join/iejoin/test_iejoin_overlaps.test
test/sql/join/inner/empty_tinyint_column.test
test/sql/join/inner/equality_join_limits.test
test/sql/join/inner/test_using_chain.test
test/sql/join/natural/natural_join.test
test/sql/join/semianti/antijoin.test
test/sql/join/semianti/right_anti.test
test/sql/join/semianti/right_semi.test
test/sql/join/semianti/semijoin.test
test/sql/json/issues/internal_issue2732.test
test/sql/json/issues/issue12188.test
test/sql/json/issues/issue12861.test
test/sql/json/issues/issue13212.test
test/sql/json/issues/read_json_memory_usage.test
test/sql/json/scalar/test_json_exists.test
test/sql/json/scalar/test_json_path.test
test/sql/json/scalar/test_json_pretty.test
test/sql/json/scalar/test_json_value.test
test/sql/json/table/json_multi_file_reader.test
test/sql/json/table/read_json.test
test/sql/json/table/read_json_objects.test
test/sql/json/test_json_empty_object.test
test/sql/json/test_json_export.test
test/sql/json/test_json_serialize_sql.test
test/sql/limit/test_preserve_insertion_order.test
test/sql/optimizer/expression/test_nop_arithmetic.test
test/sql/overflow/table_overflow.test
test/sql/parallelism/intraquery/parallel_sample.test
test/sql/parallelism/intraquery/test_parallel_nested_aggregates.test
test/sql/parallelism/intraquery/test_persistent_parallelism.test
test/sql/parallelism/intraquery/test_simple_parallelism.test
test/sql/parallelism/intraquery/test_verify_parallelism.test
test/sql/parser/test_columns_unpacked.test
test/sql/parser/test_value_functions.test
test/sql/pg_catalog/pg_privilege.test
test/sql/pivot/optional_pivots.test
test/sql/pivot/pivot_6390.test
test/sql/pivot/pivot_bigquery.test
test/sql/pivot/pivot_databricks.test
test/sql/pivot/pivot_empty.test
test/sql/pivot/pivot_example.test
test/sql/pivot/pivot_expressions.test
test/sql/pivot/pivot_in_boolean.test
test/sql/pivot/pivot_in_subquery.test
test/sql/pivot/pivot_struct_aggregate.test
test/sql/pivot/pivot_subquery.test
test/sql/pivot/top_level_pivot_syntax.test
test/sql/pragma/pragma_database_size_readonly.test
test/sql/pragma/test_custom_optimizer_profiling.test
test/sql/pragma/test_custom_profiling_settings.test
test/sql/pragma/test_metadata_info.test
test/sql/pragma/test_pragma_database_list.test
test/sql/pragma/test_pragma_database_size.test
test/sql/pragma/test_pragma_functions.test
test/sql/pragma/test_pragma_parsing.test
test/sql/pragma/test_pragma_version.test
test/sql/pragma/test_query_log.test
test/sql/pragma/test_show_tables.test
test/sql/pragma/test_storage_info.test
test/sql/pragma/test_table_info.test
test/sql/prepared/prepare_copy.test
test/sql/prepared/prepare_maintain_types.test
test/sql/prepared/prepared_null_binding.test
test/sql/prepared/test_prepare_delete.test
test/sql/prepared/test_prepare_delete_update.test
test/sql/projection/test_row_id_expression.test
test/sql/returning/returning_update.test
test/sql/secrets/create_secret_binding.test
test/sql/secrets/create_secret_defaults.test
test/sql/secrets/create_secret_hffs.test
test/sql/secrets/create_secret_minio.test
test/sql/secrets/create_secret_name_conflicts.test
test/sql/secrets/create_secret_non_writable_persistent_dir.test
test/sql/secrets/create_secret_overwriting.test
test/sql/secrets/create_secret_persistence.test
test/sql/secrets/create_secret_persistence_error_handling.test
test/sql/secrets/create_secret_r2.test
test/sql/secrets/create_secret_r2_serialization.test
test/sql/secrets/create_secret_s3_serialization.test
test/sql/secrets/create_secret_scope_matching.test
test/sql/secrets/create_secret_settings.test
test/sql/secrets/create_secret_storage_backends.test
test/sql/secrets/create_secret_transactional.test
test/sql/select/test_multi_column_reference.test
test/sql/setops/test_union_all_by_name.test
test/sql/settings/test_disabled_file_system_httpfs.test
test/sql/settings/test_disabled_file_systems.test
test/sql/show_select/test_describe_quoted.test
test/sql/show_select/test_summarize_quoted.test
test/sql/storage/bc/test_view_v092.test
test/sql/storage/buffer_manager_temp_dir.test
test/sql/storage/catalog/test_macro_storage.test
test/sql/storage/catalog/test_sequence_uncommitted_transaction.test
test/sql/storage/catalog/test_store_default_sequence.test
test/sql/storage/catalog/test_store_rename_column.test
test/sql/storage/catalog/test_store_rename_table.test
test/sql/storage/catalog/test_store_rename_view.test
test/sql/storage/catalog/test_store_sequences.test
test/sql/storage/catalog/test_store_temporary.test
test/sql/storage/catalog/test_table_macro_storage.test
test/sql/storage/catalog/test_view_explicit_aliases.test
test/sql/storage/catalog/test_view_storage.test
test/sql/storage/catalog/test_view_storage_no_view_dependencies.test
test/sql/storage/commit_abort.test
test/sql/storage/compact_block_size/block_size_with_rollback.test
test/sql/storage/compact_block_size/compact_block_size.test
test/sql/storage/compact_block_size/compact_vector_size.test
test/sql/storage/compact_block_size/create_table_compression.test
test/sql/storage/compact_block_size/default_block_size.test
test/sql/storage/compact_block_size/ensure_bitpacking.test
test/sql/storage/compact_block_size/ensure_no_bitpacking.test
test/sql/storage/compact_block_size/insertion_order_odd_batches.test
test/sql/storage/compact_block_size/mixed_block_sizes.test
test/sql/storage/compression/alp/alp_inf_null_nan.test
test/sql/storage/compression/alp/alp_nulls.test
test/sql/storage/compression/alp/alp_nulls_simple.test
test/sql/storage/compression/alprd/alprd_inf_null_nan.test
test/sql/storage/compression/alprd/alprd_nulls.test
test/sql/storage/compression/alprd/alprd_nulls_simple.test
test/sql/storage/compression/bitpacking/bitpacking_hugeint.test
test/sql/storage/compression/bitpacking/bitpacking_size_calculation.test
test/sql/storage/compression/bitpacking/bitpacking_uhugeint.test
test/sql/storage/multiple_clients_checkpoint_pending_updates.test
test/sql/storage/shutdown_running_transaction.test
test/sql/storage/shutdown_running_transaction_updates.test
test/sql/storage/temp_directory/max_swap_space_error.test
test/sql/storage/temp_directory/max_swap_space_explicit.test
test/sql/storage/temp_directory/max_swap_space_inmemory.test
test/sql/storage/temp_directory/max_swap_space_persistent.test
test/sql/storage/unicode_filename.test
test/sql/storage/wal/test_wal_bc.test
test/sql/storage/wal/wal_lazy_creation.test
test/sql/storage/wal/wal_sequence_uncommitted_transaction.test
test/sql/storage/wal/wal_storage_types.test
test/sql/storage/wal/wal_store_default_sequence.test
test/sql/storage/wal/wal_store_rename_column.test
test/sql/storage/wal/wal_store_rename_table.test
test/sql/storage/wal/wal_store_rename_view.test
test/sql/storage/wal/wal_store_sequences.test
test/sql/storage/wal/wal_store_temporary.test
test/sql/storage/wal/wal_view_explicit_aliases.test
test/sql/storage/wal/wal_view_explicit_aliases_no_view_dependencies.test
test/sql/storage/wal/wal_view_storage.test
test/sql/storage/wal/wal_view_storage_no_view_dependencies.test
test/sql/subquery/any_all/subquery_in.test
test/sql/subquery/lateral/lateral_binding_views.test
test/sql/subquery/lateral/test_lateral_join.test
test/sql/subquery/scalar/test_correlated_subquery.test
test/sql/subquery/scalar/test_correlated_subquery_cte.test
test/sql/subquery/scalar/test_issue_6136.test
test/sql/subquery/scalar/test_scalar_subquery.test
test/sql/subquery/scalar/test_scalar_subquery_cte.test
test/sql/subquery/scalar/test_uncorrelated_scalar_subquery.test
test/sql/table_function/duckdb_constraints_fk.test
test/sql/table_function/duckdb_databases.test
test/sql/table_function/information_schema.test
test/sql/table_function/range_function_lateral.test
test/sql/table_function/range_timestamp.test
test/sql/table_function/sqlite_master_connections.test
test/sql/tpcds/dsdgen_readonly.test
test/sql/tpch/dbgen_readonly.test
test/sql/transactions/aborted_transaction_commit.test
test/sql/transactions/test_basic_transactions.test
test/sql/transactions/test_from_update_conflict.test
test/sql/transactions/test_index_abort.test
test/sql/transactions/test_index_rollback_flushed_data.test
test/sql/transactions/test_index_transaction_local.test
test/sql/transactions/test_interleaved_versions.test
test/sql/transactions/test_multi_transaction_append.test
test/sql/transactions/test_read_only_transactions.test
test/sql/transactions/test_transactional_sequences.test
test/sql/transactions/transaction_insert_delete_chunks.test
test/sql/transactions/transaction_insert_mixed_deletes.test
test/sql/types/alias/test_alias_table.test
test/sql/types/bit/test_bit.test
test/sql/types/enum/test_enum_schema.test
test/sql/types/enum/test_enum_table.test
test/sql/types/enum/test_enum_to_numbers.test
test/sql/types/interval/interval_try_cast.test
test/sql/types/list/unnest_complex_types.test
test/sql/types/list/unnest_table_function.test
test/sql/types/list/unnest_types.test
test/sql/types/map/map_const_and_col_combination.test
test/sql/types/nested/array/array_aggregate.test
test/sql/types/nested/array/array_fuzzer_failures.test
test/sql/types/nested/array/array_roundtrip_csv.test
test/sql/types/nested/array/array_roundtrip_json.test
test/sql/types/nested/array/array_roundtrip_parquet.test
test/sql/types/nested/array/array_simple.test
test/sql/types/nested/array/array_tupleformat.test
test/sql/types/nested/list/test_list_extract.test
test/sql/types/nested/map/map_from_entries/column_null_entry.test
test/sql/types/nested/map/test_map_cardinality.test
test/sql/types/nested/map/test_map_concat.test
test/sql/types/nested/map/test_map_contains.test
test/sql/types/nested/map/test_map_subscript.test
test/sql/types/nested/map/test_map_subscript_composite.test
test/sql/types/nested/map/test_map_vector_types.test
test/sql/types/nested/struct/struct_aggregates_types.test
test/sql/types/null/test_null.test
test/sql/types/struct/struct_cast.test
test/sql/types/struct/unnest_struct_mix.test
test/sql/types/time/test_time.test
test/sql/types/union/struct_to_json_union.test
test/sql/types/union/union_cast.test
test/sql/types/union/union_join.test
test/sql/types/bignum/test_big_bignum.test
test/sql/types/bignum/test_double_bignum.test
test/sql/types/bignum/test_int_bignum_conversion.test
test/sql/types/bignum/test_varchar_bignum_conversion.test
test/sql/types/bignum/test_varchar_bignum_unhappy.test
test/sql/types/bignum/test_bignum_boundaries.test
test/sql/types/bignum/test_bignum_comparisons.test
test/sql/types/bignum/test_bignum_double.test
test/sql/types/bignum/test_bignum_hugeint.test
test/sql/types/bignum/test_bignum_implicit_cast.test
test/sql/update/string_update_transaction_local_7348.test
test/sql/update/test_cascading_updates.test
test/sql/update/test_update_delete_same_tuple.test
test/sql/update/test_update_issue_3170.test
test/sql/update/test_update_many_updaters.test
test/sql/update/test_update_many_updaters_nulls.test
test/sql/upsert/postgres/planner_preprocessing.test
test/sql/upsert/upsert_conflict_in_different_chunk.test
test/sql/upsert/upsert_distinct_bug.test
test/sql/upsert/upsert_lambda.test
test/sql/upsert/upsert_local_no_tuples.test
test/sql/variables/test_variables.test
test/sql/window/test_empty_frames.test
test/sql/window/test_thread_count.test
test/sql/window/test_tpcds_q49.test
test/sql/window/test_window_bool.test
test/sql/window/test_window_order_collate.test
test/sql/window/test_window_range.test
test/sql/window/test_window_tpcds.test
test/sql/pg_catalog/system_functions.test
test/sql/copy/csv/test_soccer_kaggle.test
test/sql/copy/csv/test_headers_12089.test
test/sql/order/test_order_range_mapping.test
test/sql/order/test_order_by_exceptions.test
test/sql/order/test_order_by.test
test/sql/order/test_limit.test
test/sql/table_function/duckdb_constraints.test
test/sql/table_function/duckdb_constraints_issue12863.test
test/sql/table_function/information_schema_fkey_constraint_names.test
# infinite loop bug in older version
test/sql/cte/test_recursive_cte_union.test
test/sql/cte/test_recursive_cte_union_all.test
# collation fixes
test/sql/collate/collate_ordered_aggregate.test
# timetz ordering fixes
test/sql/types/time/test_time_tz.test
test/sql/function/operator/test_date_arithmetic.test
# histogram/list_distinct type fix
test/sql/function/list/list_distinct.test
test/sql/function/list/aggregates/histogram.test
test/sql/aggregate/aggregates/test_mode.test
test/sql/aggregate/aggregates/test_histogram.test
test/sql/aggregate/aggregates/test_approx_quantile.test
# least/greatest type fix
test/sql/function/generic/test_least_greatest.test
test/sql/function/generic/least_greatest_enum.test
test/sql/types/uhugeint/test_uhugeint_functions.test
# null type changes
test/sql/types/list/list_concat_null.test
test/sql/function/string/test_concat_function.test
test/sql/function/string/test_concat_binding.test
test/sql/function/string/test_concat.test
# lateral join plan fix
test/sql/subquery/lateral/lateral_grouping_sets.test
# median/quantile type fixes
test/sql/types/nested/array/array_summarize.test
test/sql/show_select/test_summarize.test
# information schema fixes
test/sql/table_function/information_schema_issue12867.test
# test_all_types changes
test/sql/json/scalar/json_nested_casts.test
test/sql/function/string/hex.test
test/sql/function/list/lambdas/lambdas_and_group_by.test
test/sql/subquery/exists/test_exists_union_by_name.test
# extension load mismatch
test/sql/function/timestamp/test_try_strptime.test
test/sql/function/timestamp/test_strptime.test
test/sql/function/timestamp/test_icu_strftime.test
test/sql/extensions/version_is_valid_httpfs.test
# quantile bug fixes
test/sql/aggregate/aggregates/test_quantile_disc_list.test
test/sql/aggregate/aggregates/test_quantile_disc.test
test/sql/aggregate/aggregates/test_median.test
# nanosecond fix
test/sql/function/timestamp/test_strftime_timestamp_ns.test
# concat type change
test/sql/types/blob/test_blob_function.test
# rowsort (FIXME: this can be addressed in the testing framework)
test/sql/function/string/regex_capture.test
# dollar quote parser fix
test/sql/parser/dollar_quotes_internal_issue2224.test

View File

@@ -0,0 +1,24 @@
SELECT l_returnflag, l_linestatus, sum(l_quantity) AS sum_qty, sum(l_extendedprice) AS sum_base_price, sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge, avg(l_quantity) AS avg_qty, avg(l_extendedprice) AS avg_price, avg(l_discount) AS avg_disc, count(*) AS count_order FROM lineitem WHERE l_shipdate <= CAST('1998-09-02' AS date) GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus;
SELECT s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment FROM part, supplier, partsupp, nation, region WHERE p_partkey = ps_partkey AND s_suppkey = ps_suppkey AND p_size = 15 AND p_type LIKE '%BRASS' AND s_nationkey = n_nationkey AND n_regionkey = r_regionkey AND r_name = 'EUROPE' AND ps_supplycost = ( SELECT min(ps_supplycost) FROM partsupp, supplier, nation, region WHERE p_partkey = ps_partkey AND s_suppkey = ps_suppkey AND s_nationkey = n_nationkey AND n_regionkey = r_regionkey AND r_name = 'EUROPE') ORDER BY s_acctbal DESC, n_name, s_name, p_partkey LIMIT 100;
SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) AS revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < CAST('1995-03-15' AS date) AND l_shipdate > CAST('1995-03-15' AS date) GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate LIMIT 10;
SELECT o_orderpriority, count(*) AS order_count FROM orders WHERE o_orderdate >= CAST('1993-07-01' AS date) AND o_orderdate < CAST('1993-10-01' AS date) AND EXISTS ( SELECT * FROM lineitem WHERE l_orderkey = o_orderkey AND l_commitdate < l_receiptdate) GROUP BY o_orderpriority ORDER BY o_orderpriority;
SELECT n_name, sum(l_extendedprice * (1 - l_discount)) AS revenue FROM customer, orders, lineitem, supplier, nation, region WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND l_suppkey = s_suppkey AND c_nationkey = s_nationkey AND s_nationkey = n_nationkey AND n_regionkey = r_regionkey AND r_name = 'ASIA' AND o_orderdate >= CAST('1994-01-01' AS date) AND o_orderdate < CAST('1995-01-01' AS date) GROUP BY n_name ORDER BY revenue DESC;
SELECT sum(l_extendedprice * l_discount) AS revenue FROM lineitem WHERE l_shipdate >= CAST('1994-01-01' AS date) AND l_shipdate < CAST('1995-01-01' AS date) AND l_discount BETWEEN 0.05 AND 0.07 AND l_quantity < 24;
SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT n1.n_name AS supp_nation, n2.n_name AS cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, nation n1, nation n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ((n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')) AND l_shipdate BETWEEN CAST('1995-01-01' AS date) AND CAST('1996-12-31' AS date)) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year;
SELECT o_year, sum( CASE WHEN nation = 'BRAZIL' THEN volume ELSE 0 END) / sum(volume) AS mkt_share FROM ( SELECT extract(year FROM o_orderdate) AS o_year, l_extendedprice * (1 - l_discount) AS volume, n2.n_name AS nation FROM part, supplier, lineitem, orders, customer, nation n1, nation n2, region WHERE p_partkey = l_partkey AND s_suppkey = l_suppkey AND l_orderkey = o_orderkey AND o_custkey = c_custkey AND c_nationkey = n1.n_nationkey AND n1.n_regionkey = r_regionkey AND r_name = 'AMERICA' AND s_nationkey = n2.n_nationkey AND o_orderdate BETWEEN CAST('1995-01-01' AS date) AND CAST('1996-12-31' AS date) AND p_type = 'ECONOMY ANODIZED STEEL') AS all_nations GROUP BY o_year ORDER BY o_year;
SELECT nation, o_year, sum(amount) AS sum_profit FROM ( SELECT n_name AS nation, extract(year FROM o_orderdate) AS o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity AS amount FROM part, supplier, lineitem, partsupp, orders, nation WHERE s_suppkey = l_suppkey AND ps_suppkey = l_suppkey AND ps_partkey = l_partkey AND p_partkey = l_partkey AND o_orderkey = l_orderkey AND s_nationkey = n_nationkey AND p_name LIKE '%green%') AS profit GROUP BY nation, o_year ORDER BY nation, o_year DESC;
SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) AS revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= CAST('1993-10-01' AS date) AND o_orderdate < CAST('1994-01-01' AS date) AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20;
SELECT ps_partkey, sum(ps_supplycost * ps_availqty) AS value FROM partsupp, supplier, nation WHERE ps_suppkey = s_suppkey AND s_nationkey = n_nationkey AND n_name = 'GERMANY' GROUP BY ps_partkey HAVING sum(ps_supplycost * ps_availqty) > ( SELECT sum(ps_supplycost * ps_availqty) * 0.0001000000 FROM partsupp, supplier, nation WHERE ps_suppkey = s_suppkey AND s_nationkey = n_nationkey AND n_name = 'GERMANY') ORDER BY value DESC;
SELECT l_shipmode, sum( CASE WHEN o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' THEN 1 ELSE 0 END) AS high_line_count, sum( CASE WHEN o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' THEN 1 ELSE 0 END) AS low_line_count FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_shipmode IN ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= CAST('1994-01-01' AS date) AND l_receiptdate < CAST('1995-01-01' AS date) GROUP BY l_shipmode ORDER BY l_shipmode;
SELECT c_count, count(*) AS custdist FROM ( SELECT c_custkey, count(o_orderkey) FROM customer LEFT OUTER JOIN orders ON c_custkey = o_custkey AND o_comment NOT LIKE '%special%requests%' GROUP BY c_custkey) AS c_orders (c_custkey, c_count) GROUP BY c_count ORDER BY custdist DESC, c_count DESC;
SELECT 100.00 * sum( CASE WHEN p_type LIKE 'PROMO%' THEN l_extendedprice * (1 - l_discount) ELSE 0 END) / sum(l_extendedprice * (1 - l_discount)) AS promo_revenue FROM lineitem, part WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < CAST('1995-10-01' AS date);
SELECT s_suppkey, s_name, s_address, s_phone, total_revenue FROM supplier, ( SELECT l_suppkey AS supplier_no, sum(l_extendedprice * (1 - l_discount)) AS total_revenue FROM lineitem WHERE l_shipdate >= CAST('1996-01-01' AS date) AND l_shipdate < CAST('1996-04-01' AS date) GROUP BY supplier_no) revenue0 WHERE s_suppkey = supplier_no AND total_revenue = ( SELECT max(total_revenue) FROM ( SELECT l_suppkey AS supplier_no, sum(l_extendedprice * (1 - l_discount)) AS total_revenue FROM lineitem WHERE l_shipdate >= CAST('1996-01-01' AS date) AND l_shipdate < CAST('1996-04-01' AS date) GROUP BY supplier_no) revenue1) ORDER BY s_suppkey;
SELECT p_brand, p_type, p_size, count(DISTINCT ps_suppkey) AS supplier_cnt FROM partsupp, part WHERE p_partkey = ps_partkey AND p_brand <> 'Brand#45' AND p_type NOT LIKE 'MEDIUM POLISHED%' AND p_size IN (49, 14, 23, 45, 19, 3, 36, 9) AND ps_suppkey NOT IN ( SELECT s_suppkey FROM supplier WHERE s_comment LIKE '%Customer%Complaints%') GROUP BY p_brand, p_type, p_size ORDER BY supplier_cnt DESC, p_brand, p_type, p_size;
SELECT sum(l_extendedprice) / 7.0 AS avg_yearly FROM lineitem, part WHERE p_partkey = l_partkey AND p_brand = 'Brand#23' AND p_container = 'MED BOX' AND l_quantity < ( SELECT 0.2 * avg(l_quantity) FROM lineitem WHERE l_partkey = p_partkey);
SELECT c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) FROM customer, orders, lineitem WHERE o_orderkey IN ( SELECT l_orderkey FROM lineitem GROUP BY l_orderkey HAVING sum(l_quantity) > 300) AND c_custkey = o_custkey AND o_orderkey = l_orderkey GROUP BY c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice ORDER BY o_totalprice DESC, o_orderdate LIMIT 100;
SELECT sum(l_extendedprice * (1 - l_discount)) AS revenue FROM lineitem, part WHERE (p_partkey = l_partkey AND p_brand = 'Brand#12' AND p_container IN ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') AND l_quantity >= 1 AND l_quantity <= 1 + 10 AND p_size BETWEEN 1 AND 5 AND l_shipmode IN ('AIR', 'AIR REG') AND l_shipinstruct = 'DELIVER IN PERSON') OR (p_partkey = l_partkey AND p_brand = 'Brand#23' AND p_container IN ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') AND l_quantity >= 10 AND l_quantity <= 10 + 10 AND p_size BETWEEN 1 AND 10 AND l_shipmode IN ('AIR', 'AIR REG') AND l_shipinstruct = 'DELIVER IN PERSON') OR (p_partkey = l_partkey AND p_brand = 'Brand#34' AND p_container IN ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') AND l_quantity >= 20 AND l_quantity <= 20 + 10 AND p_size BETWEEN 1 AND 15 AND l_shipmode IN ('AIR', 'AIR REG') AND l_shipinstruct = 'DELIVER IN PERSON');
SELECT s_name, s_address FROM supplier, nation WHERE s_suppkey IN ( SELECT ps_suppkey FROM partsupp WHERE ps_partkey IN ( SELECT p_partkey FROM part WHERE p_name LIKE 'forest%') AND ps_availqty > ( SELECT 0.5 * sum(l_quantity) FROM lineitem WHERE l_partkey = ps_partkey AND l_suppkey = ps_suppkey AND l_shipdate >= CAST('1994-01-01' AS date) AND l_shipdate < CAST('1995-01-01' AS date))) AND s_nationkey = n_nationkey AND n_name = 'CANADA' ORDER BY s_name;
SELECT s_name, count(*) AS numwait FROM supplier, lineitem l1, orders, nation WHERE s_suppkey = l1.l_suppkey AND o_orderkey = l1.l_orderkey AND o_orderstatus = 'F' AND l1.l_receiptdate > l1.l_commitdate AND EXISTS ( SELECT * FROM lineitem l2 WHERE l2.l_orderkey = l1.l_orderkey AND l2.l_suppkey <> l1.l_suppkey) AND NOT EXISTS ( SELECT * FROM lineitem l3 WHERE l3.l_orderkey = l1.l_orderkey AND l3.l_suppkey <> l1.l_suppkey AND l3.l_receiptdate > l3.l_commitdate) AND s_nationkey = n_nationkey AND n_name = 'SAUDI ARABIA' GROUP BY s_name ORDER BY numwait DESC, s_name LIMIT 100;
SELECT * FROM range(10);
SELECT * FROM range(0, 100);
SELECT * FROM generate_series(0, 10) t(i);

Binary file not shown.

View File

@@ -0,0 +1,133 @@
#include "catch.hpp"
#include "duckdb/common/serializer/buffered_file_reader.hpp"
#include "duckdb/common/serializer/buffered_file_writer.hpp"
#include "duckdb/parser/parser.hpp"
#include "duckdb/planner/planner.hpp"
#include "duckdb/parser/statement/logical_plan_statement.hpp"
#include "duckdb/common/serializer/binary_serializer.hpp"
#include "duckdb/common/serializer/binary_deserializer.hpp"
#include "test_helpers.hpp"
#include "tpch_extension.hpp"
#include <fstream>
using namespace duckdb;
using namespace std;
string get_full_file_name(const string &file_name) {
auto my_name = string("test_plan_serialization_bwc.cpp");
auto path = string(__FILE__);
return path.replace(path.rfind(my_name), my_name.length(), file_name);
}
void load_db(Connection &con) {
std::ifstream queries(get_full_file_name("db_load.sql"));
string query;
while (std::getline(queries, query)) {
REQUIRE_NO_FAIL(con.Query(query));
}
}
void test_deserialization(const string &file_location);
const char *PERSISTENT_FILE_NAME = "serialized_plans.binary";
TEST_CASE("Generate serialized plans file", "[.][serialization]") {
string file_location;
if (std::getenv("GEN_PLAN_STORAGE") != nullptr) {
// there is no way in catch2 to only run a test if explicitly requested. Hidden tests will
// run when "*" is used - which we do to run slow tests. To avoid re-generating the bin file
// we require an env variable to be explicitly set.
//
// set `GEN_PLAN_STORAGE` as an environment variable to generate the serialized file
file_location = get_full_file_name(PERSISTENT_FILE_NAME);
} else {
file_location = TestCreatePath("serialized_plans.new.binary");
}
DuckDB db;
Connection con(db);
load_db(con);
BufferedFileWriter target(db.GetFileSystem(), file_location);
std::ifstream queries(get_full_file_name("queries.sql"));
string query;
while (std::getline(queries, query)) {
con.BeginTransaction();
Parser p;
p.ParseQuery(query);
Planner planner(*con.context);
planner.CreatePlan(std::move(p.statements[0]));
auto plan = std::move(planner.plan);
BinarySerializer serializer(target);
serializer.Begin();
plan->Serialize(serializer);
serializer.End();
con.Rollback();
}
target.Sync();
test_deserialization(file_location);
}
TEST_CASE("Test deserialized plans from file", "[.][serialization]") {
test_deserialization(get_full_file_name(PERSISTENT_FILE_NAME));
}
void test_deserialization(const string &file_location) {
DuckDB db;
Connection con(db);
load_db(con);
BufferedFileReader file_source(db.GetFileSystem(), file_location.c_str());
std::ifstream queries(get_full_file_name("queries.sql"));
string query;
while (std::getline(queries, query)) {
INFO("evaluating " << query)
con.BeginTransaction();
BinaryDeserializer deserializer(file_source);
deserializer.Set<ClientContext &>(*con.context);
deserializer.Begin();
auto deserialized_plan = LogicalOperator::Deserialize(deserializer);
deserializer.End();
deserialized_plan->ResolveOperatorTypes();
auto deserialized_results =
con.context->Query(make_uniq<LogicalPlanStatement>(std::move(deserialized_plan)), false);
REQUIRE_NO_FAIL(*deserialized_results);
Parser p;
p.ParseQuery(query);
Planner planner(*con.context);
planner.CreatePlan(std::move(p.statements[0]));
auto expected_plan = std::move(planner.plan);
expected_plan->ResolveOperatorTypes();
auto expected_results = con.Query(query);
REQUIRE_NO_FAIL(*expected_results);
if (deserialized_results->names.size() == expected_results->names.size()) {
// ignore names
deserialized_results->names = expected_results->names;
}
if (!deserialized_results->Equals(*expected_results)) {
fprintf(stderr, "-----------------------------------\n");
fprintf(stderr, "Deserialized result does not match!\n");
fprintf(stderr, "-----------------------------------\n");
fprintf(stderr, "Query: %s\n", query.c_str());
fprintf(stderr, "-------------Deserialized----------\n");
deserialized_results->Print();
fprintf(stderr, "---------------Expected------------\n");
expected_results->Print();
fprintf(stderr, "-----------------------------------\n");
FAIL("Deserialized result does not match");
}
con.Rollback();
}
}

757
external/duckdb/test/api/test_api.cpp vendored Normal file
View File

@@ -0,0 +1,757 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/parser/parser.hpp"
#include "duckdb/planner/logical_operator.hpp"
#include "duckdb/main/connection_manager.hpp"
#include "duckdb/parser/statement/select_statement.hpp"
#include "duckdb/parser/query_node/select_node.hpp"
#include <chrono>
#include <thread>
using namespace duckdb;
using namespace std;
TEST_CASE("Test comment in CPP API", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
con.SendQuery("--ups");
//! Should not crash
REQUIRE(1);
}
TEST_CASE("Test StarExpression replace_list parameter", "[api]") {
DuckDB db(nullptr);
Connection con(db);
auto sql = "select * replace(i * $n as i) from range(1, 10) t(i)";
auto stmts = con.ExtractStatements(sql);
auto &select_stmt = stmts[0]->Cast<SelectStatement>();
auto &select_node = select_stmt.node->Cast<SelectNode>();
REQUIRE(select_node.select_list[0]->HasParameter());
}
TEST_CASE("Test using connection after database is gone", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
// check that the connection works
auto result = conn->Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
// destroy the database
db.reset();
// try to use the connection
// it still works: the database remains until all connections are destroyed
REQUIRE_NO_FAIL(conn->Query("SELECT 42"));
// now try it with an open transaction
db = make_uniq<DuckDB>(nullptr);
conn = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(conn->Query("BEGIN TRANSACTION"));
result = conn->Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
db.reset();
REQUIRE_NO_FAIL(conn->Query("SELECT 42"));
}
TEST_CASE("Test destroying connections with open transactions", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
{
Connection con(*db);
con.Query("BEGIN TRANSACTION");
con.Query("CREATE TABLE test(i INTEGER);");
}
auto conn = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE test(i INTEGER)"));
}
static void long_running_query(Connection *conn, bool *correct) {
*correct = true;
auto result = conn->Query("SELECT i1.i FROM integers i1, integers i2, integers i3, integers i4, integers i5, "
"integers i6, integers i7, integers i8, integers i9, integers i10,"
"integers i11, integers i12, integers i13");
// the query should fail
*correct = result->HasError();
}
TEST_CASE("Test closing database during long running query", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
// create the database
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(conn->Query("INSERT INTO integers FROM range(10000)"));
conn->DisableProfiling();
// perform a long running query in the background (many cross products)
bool correct = true;
auto background_thread = thread(long_running_query, conn.get(), &correct);
// wait a little bit
std::this_thread::sleep_for(std::chrono::milliseconds(100));
// destroy the database
conn->Interrupt();
db.reset();
// wait for the thread
background_thread.join();
REQUIRE(correct);
// try to use the connection
REQUIRE_NO_FAIL(conn->Query("SELECT 42"));
}
TEST_CASE("Test closing result after database is gone", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
// check that the connection works
auto result = conn->Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
// destroy the database
db.reset();
conn.reset();
result.reset();
// now the streaming result
db = make_uniq<DuckDB>(nullptr);
conn = make_uniq<Connection>(*db);
// check that the connection works
auto streaming_result = conn->SendQuery("SELECT 42");
// destroy the database
db.reset();
conn.reset();
REQUIRE(CHECK_COLUMN(streaming_result, 0, {42}));
streaming_result.reset();
}
TEST_CASE("Test closing database with open prepared statements", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
auto p1 = conn->Prepare("CREATE TABLE a (i INTEGER)");
REQUIRE_NO_FAIL(p1->Execute());
auto p2 = conn->Prepare("INSERT INTO a VALUES (42)");
REQUIRE_NO_FAIL(p2->Execute());
db.reset();
conn.reset();
// the prepared statements are still valid
// the database is only destroyed when the prepared statements are destroyed
REQUIRE_NO_FAIL(p2->Execute());
p1.reset();
p2.reset();
}
static void parallel_query(Connection *conn, bool *correct, size_t threadnr) {
correct[threadnr] = true;
for (size_t i = 0; i < 100; i++) {
auto result = conn->Query("SELECT * FROM integers ORDER BY i");
if (!CHECK_COLUMN(result, 0, {1, 2, 3, Value()})) {
correct[threadnr] = false;
}
}
}
TEST_CASE("Test temp_directory defaults", "[api][.]") {
const char *db_paths[] = {nullptr, "", ":memory:"};
for (auto &path : db_paths) {
auto db = make_uniq<DuckDB>(path);
auto conn = make_uniq<Connection>(*db);
REQUIRE(db->instance->config.options.temporary_directory == ".tmp");
}
}
TEST_CASE("Test parallel usage of single client", "[api][.]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(conn->Query("INSERT INTO integers VALUES (1), (2), (3), (NULL)"));
bool correct[20];
thread threads[20];
for (size_t i = 0; i < 20; i++) {
threads[i] = thread(parallel_query, conn.get(), correct, i);
}
for (size_t i = 0; i < 20; i++) {
threads[i].join();
REQUIRE(correct[i]);
}
}
static void parallel_query_with_new_connection(DuckDB *db, bool *correct, size_t threadnr) {
correct[threadnr] = true;
for (size_t i = 0; i < 100; i++) {
auto conn = make_uniq<Connection>(*db);
auto result = conn->Query("SELECT * FROM integers ORDER BY i");
if (!CHECK_COLUMN(result, 0, {1, 2, 3, Value()})) {
correct[threadnr] = false;
}
}
}
TEST_CASE("Test making and dropping connections in parallel to a single database", "[api][.]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(conn->Query("INSERT INTO integers VALUES (1), (2), (3), (NULL)"));
bool correct[20];
thread threads[20];
for (size_t i = 0; i < 20; i++) {
threads[i] = thread(parallel_query_with_new_connection, db.get(), correct, i);
}
for (size_t i = 0; i < 100; i++) {
auto result = conn->Query("SELECT * FROM integers ORDER BY i");
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3, Value()}));
}
for (size_t i = 0; i < 20; i++) {
threads[i].join();
REQUIRE(correct[i]);
}
auto result = conn->Query("SELECT * FROM integers ORDER BY i");
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3, Value()}));
}
TEST_CASE("Test multiple result sets", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
con.DisableQueryVerification();
con.EnableQueryVerification();
con.ForceParallelism();
result = con.Query("SELECT 42; SELECT 84");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
result = std::move(result->next);
REQUIRE(CHECK_COLUMN(result, 0, {84}));
REQUIRE(!result->next);
// also with stream api
result = con.SendQuery("SELECT 42; SELECT 84");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
result = std::move(result->next);
REQUIRE(CHECK_COLUMN(result, 0, {84}));
REQUIRE(!result->next);
}
TEST_CASE("Test streaming API errors", "[api]") {
duckdb::unique_ptr<QueryResult> result, result2;
DuckDB db(nullptr);
Connection con(db);
// multiple streaming result
result = con.SendQuery("SELECT 42;");
result2 = con.SendQuery("SELECT 42;");
// "result" is invalidated
REQUIRE_THROWS(CHECK_COLUMN(result, 0, {42}));
// "result2" we can read
REQUIRE(CHECK_COLUMN(result2, 0, {42}));
// streaming result followed by non-streaming result
result = con.SendQuery("SELECT 42;");
result2 = con.Query("SELECT 42;");
// "result" is invalidated
REQUIRE_THROWS(CHECK_COLUMN(result, 0, {42}));
// "result2" we can read
REQUIRE(CHECK_COLUMN(result2, 0, {42}));
// error in binding
result = con.SendQuery("SELECT * FROM nonexistanttable");
REQUIRE(!result->ToString().empty());
REQUIRE(result->type == QueryResultType::MATERIALIZED_RESULT);
REQUIRE_FAIL(result);
// error in stream that only happens after fetching
result = con.SendQuery(
"SELECT x::INT FROM (SELECT x::VARCHAR x FROM range(10) tbl(x) UNION ALL SELECT 'hello' x) tbl(x);");
while (!result->HasError()) {
auto chunk = result->Fetch();
if (!chunk || chunk->size() == 0) {
break;
}
}
REQUIRE(!result->ToString().empty());
REQUIRE_FAIL(result);
// same query but call Materialize
result = con.SendQuery(
"SELECT x::INT FROM (SELECT x::VARCHAR x FROM range(10) tbl(x) UNION ALL SELECT 'hello' x) tbl(x);");
REQUIRE(!result->ToString().empty());
REQUIRE(result->type == QueryResultType::STREAM_RESULT);
result = ((StreamQueryResult &)*result).Materialize();
REQUIRE_FAIL(result);
// same query but call materialize after fetching
result = con.SendQuery(
"SELECT x::INT FROM (SELECT x::VARCHAR x FROM range(10) tbl(x) UNION ALL SELECT 'hello' x) tbl(x);");
while (!result->HasError()) {
auto chunk = result->Fetch();
if (!chunk || chunk->size() == 0) {
break;
}
}
REQUIRE(!result->ToString().empty());
REQUIRE(result->type == QueryResultType::STREAM_RESULT);
result = ((StreamQueryResult &)*result).Materialize();
REQUIRE_FAIL(result);
}
TEST_CASE("Test fetch API", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
duckdb::unique_ptr<QueryResult> result;
// fetch from an error
result = con.Query("SELECT 'hello'::INT");
REQUIRE_THROWS(result->Fetch());
result = con.SendQuery("CREATE TABLE test (a INTEGER);");
result = con.Query("select a from test where 1 <> 1");
REQUIRE(CHECK_COLUMN(result, 0, {}));
result = con.SendQuery("INSERT INTO test VALUES (42)");
result = con.SendQuery("SELECT a from test");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
auto materialized_result = con.Query("select a from test");
REQUIRE(CHECK_COLUMN(materialized_result, 0, {42}));
// override fetch result
result = con.SendQuery("SELECT a from test");
result = con.SendQuery("SELECT a from test");
result = con.SendQuery("SELECT a from test");
result = con.SendQuery("SELECT a from test");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
TEST_CASE("Test fetch API not to completion", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
// remove connection with active stream result
auto result = conn->SendQuery("SELECT 42");
// close the connection
conn.reset();
// now try to fetch a chunk, this should not return a nullptr
auto chunk = result->Fetch();
REQUIRE(chunk);
// Only if we would call Fetch again would we Close the QueryResult
// this is testing that it can get cleaned up without this.
db.reset();
}
TEST_CASE("Test fetch API robustness", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
// remove connection with active stream result
auto result = conn->SendQuery("SELECT 42");
// close the connection
conn.reset();
// now try to fetch a chunk, this should not return a nullptr
auto chunk = result->Fetch();
REQUIRE(chunk);
// now close the entire database
conn = make_uniq<Connection>(*db);
result = conn->SendQuery("SELECT 42");
db.reset();
// fetch should not fail
chunk = result->Fetch();
REQUIRE(chunk);
// new queries on the connection should not fail either
REQUIRE_NO_FAIL(conn->SendQuery("SELECT 42"));
// override fetch result
db = make_uniq<DuckDB>(nullptr);
conn = make_uniq<Connection>(*db);
auto result1 = conn->SendQuery("SELECT 42");
auto result2 = conn->SendQuery("SELECT 84");
REQUIRE_NO_FAIL(*result1);
REQUIRE_NO_FAIL(*result2);
// result1 should be closed now
REQUIRE_THROWS(result1->Fetch());
// result2 should work
REQUIRE(result2->Fetch());
// test materialize
result1 = conn->SendQuery("SELECT 42");
REQUIRE(result1->type == QueryResultType::STREAM_RESULT);
auto materialized = ((StreamQueryResult &)*result1).Materialize();
result2 = conn->SendQuery("SELECT 84");
// we can read materialized still, even after opening a new result
REQUIRE(CHECK_COLUMN(materialized, 0, {42}));
REQUIRE(CHECK_COLUMN(result2, 0, {84}));
}
static void VerifyStreamResult(duckdb::unique_ptr<QueryResult> result) {
REQUIRE(result->types[0] == LogicalType::INTEGER);
size_t current_row = 0;
int current_expected_value = 0;
size_t expected_rows = 500 * 5;
while (true) {
auto chunk = result->Fetch();
if (!chunk || chunk->size() == 0) {
break;
}
auto col1_data = FlatVector::GetData<int>(chunk->data[0]);
for (size_t k = 0; k < chunk->size(); k++) {
if (current_row % 500 == 0) {
current_expected_value++;
}
REQUIRE(col1_data[k] == current_expected_value);
current_row++;
}
}
REQUIRE(current_row == expected_rows);
}
TEST_CASE("Test fetch API with big results", "[api][.]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
// create table that consists of multiple chunks
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test(a INTEGER)"));
for (size_t i = 0; i < 500; i++) {
REQUIRE_NO_FAIL(con.Query("INSERT INTO test VALUES (1); INSERT INTO test VALUES (2); INSERT INTO test VALUES "
"(3); INSERT INTO test VALUES (4); INSERT INTO test VALUES (5);"));
}
REQUIRE_NO_FAIL(con.Query("COMMIT"));
// stream the results using the Fetch() API
auto result = con.SendQuery("SELECT CAST(a AS INTEGER) FROM test ORDER BY a");
VerifyStreamResult(std::move(result));
// we can also stream a materialized result
auto materialized = con.Query("SELECT CAST(a AS INTEGER) FROM test ORDER BY a");
VerifyStreamResult(std::move(materialized));
// return multiple results using the stream API
result = con.SendQuery("SELECT CAST(a AS INTEGER) FROM test ORDER BY a; SELECT CAST(a AS INTEGER) FROM test ORDER "
"BY a; SELECT CAST(a AS INTEGER) FROM test ORDER BY a;");
auto next = std::move(result->next);
while (next) {
auto nextnext = std::move(next->next);
VerifyStreamResult(std::move(nextnext));
next = std::move(nextnext);
}
VerifyStreamResult(std::move(result));
}
TEST_CASE("Test TryFlushCachingOperators interrupted ExecutePushInternal", "[api][.]") {
DuckDB db;
Connection con(db);
con.Query("create table tbl as select 100000 a from range(2) t(a);");
con.Query("pragma threads=1");
// Use PhysicalCrossProduct with a very low amount of produced tuples, this caches the result in the
// CachingOperatorState This gets flushed with FinalExecute in PipelineExecutor::TryFlushCachingOperator
auto pending_query = con.PendingQuery("select unnest(range(a.a)) from tbl a, tbl b;");
// Through `unnest(range(a.a.))` this FinalExecute multiple chunks, more than the ExecutionBudget can handle with
// PROCESS_PARTIAL
pending_query->ExecuteTask();
// query the connection as normal after
auto res = pending_query->Execute();
REQUIRE(!res->HasError());
auto &materialized_res = res->Cast<MaterializedQueryResult>();
idx_t initial_tuples = 2 * 2;
REQUIRE(materialized_res.RowCount() == initial_tuples * 100000);
for (idx_t i = 0; i < initial_tuples; i++) {
for (idx_t j = 0; j < 100000; j++) {
auto value = static_cast<idx_t>(materialized_res.GetValue<int64_t>(0, (i * 100000) + j));
REQUIRE(value == j);
}
}
}
TEST_CASE("Test streaming query during stack unwinding", "[api]") {
DuckDB db;
Connection con(db);
try {
auto result = con.SendQuery("SELECT * FROM range(1000000)");
throw std::runtime_error("hello");
} catch (...) {
}
}
TEST_CASE("Test prepare dependencies with multiple connections", "[catalog]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
auto con = make_uniq<Connection>(db);
auto con2 = make_uniq<Connection>(db);
auto con3 = make_uniq<Connection>(db);
// simple prepare: begin transaction before the second client calls PREPARE
REQUIRE_NO_FAIL(con->Query("CREATE TABLE integers(i INTEGER)"));
// open a transaction in con2, this forces the prepared statement to be kept around until this transaction is closed
REQUIRE_NO_FAIL(con2->Query("BEGIN TRANSACTION"));
// we prepare a statement in con
REQUIRE_NO_FAIL(con->Query("PREPARE s1 AS SELECT * FROM integers"));
// now we drop con while the second client still has an active transaction
con.reset();
// now commit the transaction in the second client
REQUIRE_NO_FAIL(con2->Query("COMMIT"));
con = make_uniq<Connection>(db);
// three transactions
// open a transaction in con2, this forces the prepared statement to be kept around until this transaction is closed
REQUIRE_NO_FAIL(con2->Query("BEGIN TRANSACTION"));
// create a prepare, this creates a dependency from s1 -> integers
REQUIRE_NO_FAIL(con->Query("PREPARE s1 AS SELECT * FROM integers"));
// drop the client
con.reset();
// now begin a transaction in con3
REQUIRE_NO_FAIL(con3->Query("BEGIN TRANSACTION"));
// drop the table integers with cascade, this should drop s1 as well
REQUIRE_NO_FAIL(con3->Query("DROP TABLE integers CASCADE"));
REQUIRE_NO_FAIL(con2->Query("COMMIT"));
REQUIRE_NO_FAIL(con3->Query("COMMIT"));
}
TEST_CASE("Test connection API", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
// extract a plan node
REQUIRE_NOTHROW(con.ExtractPlan("SELECT 42"));
// can only extract one statement at a time
REQUIRE_THROWS(con.ExtractPlan("SELECT 42; SELECT 84"));
// append to a table
con.Query("CREATE TABLE integers(i integer);");
auto table_info = con.TableInfo("integers");
DataChunk chunk;
REQUIRE_NOTHROW(con.Append(*table_info, chunk));
// no transaction active
REQUIRE_THROWS(con.Commit());
REQUIRE_THROWS(con.Rollback());
// cannot start a transaction within a transaction
REQUIRE_NOTHROW(con.BeginTransaction());
REQUIRE_THROWS(con.BeginTransaction());
con.SetAutoCommit(false);
REQUIRE(!con.IsAutoCommit());
con.SetAutoCommit(true);
REQUIRE(con.IsAutoCommit());
}
TEST_CASE("Test parser tokenize", "[api]") {
Parser parser;
REQUIRE_NOTHROW(parser.Tokenize("SELECT * FROM table WHERE i+1=3 AND j='hello'; --tokenize example query"));
}
TEST_CASE("Test opening an invalid database file", "[api]") {
duckdb::unique_ptr<DuckDB> db;
bool success = false;
try {
db = make_uniq<DuckDB>("duckdb:data/parquet-testing/blob.parquet");
success = true;
} catch (std::exception &ex) {
REQUIRE(StringUtil::Contains(ex.what(), "DuckDB"));
}
REQUIRE(!success);
try {
db = make_uniq<DuckDB>("duckdb:data/parquet-testing/h2oai/h2oai_group_small.parquet");
success = true;
} catch (std::exception &ex) {
REQUIRE(StringUtil::Contains(ex.what(), "DuckDB"));
}
REQUIRE(!success);
}
TEST_CASE("Test large number of connections to a single database", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto context = make_uniq<ClientContext>((*db).instance);
auto &connection_manager = ConnectionManager::Get(*context);
duckdb::vector<duckdb::unique_ptr<Connection>> connections;
size_t createdConnections = 5000;
size_t remainingConnections = 500;
size_t toRemove = createdConnections - remainingConnections;
for (size_t i = 0; i < createdConnections; i++) {
auto conn = make_uniq<Connection>(*db);
connections.push_back(std::move(conn));
}
REQUIRE(connection_manager.GetConnectionCount() == createdConnections);
for (size_t i = 0; i < toRemove; i++) {
connections.erase(connections.begin());
}
REQUIRE(connection_manager.GetConnectionCount() == remainingConnections);
}
TEST_CASE("Issue #4583: Catch Insert/Update/Delete errors", "[api]") {
DuckDB db(nullptr);
Connection con(db);
duckdb::unique_ptr<QueryResult> result;
con.EnableQueryVerification();
REQUIRE_NO_FAIL(con.Query("CREATE TABLE t0 (c0 int);"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO t0 VALUES (1);"));
result = con.SendQuery(
"INSERT INTO t0(VALUES('\\x15\\x00\\x00\\x00\\x00@\\x01\\x0A\\x27:!\\x0A\\x00\\x00x12e\"\\x00'::BLOB));");
//! Should not terminate the process
REQUIRE_FAIL(result);
result = con.SendQuery("SELECT MIN(c0) FROM t0;");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
}
TEST_CASE("Issue #14130: InsertStatement::ToString causes InternalException later on", "[api][.]") {
auto db = DuckDB(nullptr);
auto conn = Connection(db);
conn.Query("CREATE TABLE foo(a int, b varchar, c int)");
auto query = "INSERT INTO Foo values (1, 'qwerty', 42)";
auto stmts = conn.ExtractStatements(query);
auto &stmt = stmts[0];
// Issue was here: calling ToString destroyed the 'alias' of the ValuesList
stmt->ToString();
// Which caused an 'InternalException: expected non-empty binding_name' here
auto prepared_stmt = conn.Prepare(std::move(stmt));
REQUIRE(!prepared_stmt->HasError());
REQUIRE_NO_FAIL(prepared_stmt->Execute());
}
TEST_CASE("Issue #6284: CachingPhysicalOperator in pull causes issues", "[api][.]") {
DBConfig config;
config.options.maximum_threads = 8;
DuckDB db(nullptr, &config);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("select setseed(0.1); CREATE TABLE T0 AS SELECT DISTINCT (RANDOM()*9999999)::BIGINT "
"record_nb, 0.0 x_0, 1.0 y_0 FROM range(1000000) tbl"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE T1 AS SELECT record_nb, 0.0 x_1, 1.0 y_1 FROM T0"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE T2 AS SELECT record_nb, 0.0 x_2, 1.0 y_2 FROM T0"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE T3 AS SELECT record_nb, 0.0 x_3, 1.0 y_3 FROM T0"));
auto result = con.SendQuery(R"(
SELECT T0.record_nb,
T1.x_1 x_1,
T1.y_1 y_1,
T2.x_2 x_2,
T2.y_2 y_2,
T3.x_3 x_3,
T3.y_3 y_3
FROM T0
INNER JOIN T1 on T0.record_nb = T1.record_nb
INNER JOIN T2 on T0.record_nb = T2.record_nb
INNER JOIN T3 on T0.record_nb = T3.record_nb
)");
idx_t count = 0;
while (true) {
auto chunk = result->Fetch();
if (!chunk) {
break;
}
if (chunk->size() == 0) {
break;
}
count += chunk->size();
}
REQUIRE(951382 == count);
}
TEST_CASE("Fuzzer 50 - Alter table heap-use-after-free", "[api]") {
// FIXME: not fixed yet
return;
DuckDB db(nullptr);
Connection con(db);
con.SendQuery("CREATE TABLE t0(c0 INT);");
con.SendQuery("ALTER TABLE t0 ADD c1 TIMESTAMP_SEC;");
}
TEST_CASE("Test loading database with enable_external_access set to false", "[api]") {
DBConfig config;
config.options.enable_external_access = false;
auto path = TestCreatePath("external_access_test");
DuckDB db(path, &config);
Connection con(db);
REQUIRE_FAIL(con.Query("ATTACH 'mydb.db' AS external_access_test"));
}
TEST_CASE("Test insert returning in CPP API", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.Query("CREATE TABLE test(val VARCHAR);");
con.Query("INSERT INTO test(val) VALUES ('query_1')");
auto res = con.Query("INSERT INTO test(val) VALUES ('query_2') returning *");
REQUIRE(CHECK_COLUMN(res, 0, {"query_2"}));
con.Query("INSERT INTO test(val) VALUES (?);", "query_arg_1");
auto returning_args = con.Query("INSERT INTO test(val) VALUES (?) RETURNING *;", "query_arg_2");
REQUIRE(CHECK_COLUMN(returning_args, 0, {"query_arg_2"}));
con.Prepare("INSERT INTO test(val) VALUES (?);")->Execute("prepared_arg_1");
auto prepared_returning_args =
con.Prepare("INSERT INTO test(val) VALUES (?) returning *;")->Execute("prepared_arg_2");
REQUIRE(CHECK_COLUMN(prepared_returning_args, 0, {"prepared_arg_2"}));
// make sure all inserts actually inserted
auto result = con.Query("SELECT * from test;");
REQUIRE(CHECK_COLUMN(result, 0,
{"query_1", "query_2", "query_arg_1", "query_arg_2", "prepared_arg_1", "prepared_arg_2"}));
}
TEST_CASE("Test a logical execute still has types after an optimization pass", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.Query("PREPARE test AS SELECT 42::INTEGER;");
const auto query_plan = con.ExtractPlan("EXECUTE test");
REQUIRE((query_plan->type == LogicalOperatorType::LOGICAL_EXECUTE));
REQUIRE((query_plan->types.size() == 1));
REQUIRE((query_plan->types[0].id() == LogicalTypeId::INTEGER));
}
TEST_CASE("Test SqlStatement::ToString for UPDATE, INSERT, DELETE statements with alias of RETURNING clause", "[api]") {
DuckDB db(nullptr);
Connection con(db);
std::string sql;
con.Query("CREATE TABLE test(id INT);");
sql = "INSERT INTO test (id) VALUES (1) RETURNING id AS inserted";
auto stmts = con.ExtractStatements(sql);
REQUIRE(stmts[0]->ToString() == "INSERT INTO test (id ) (VALUES (1)) RETURNING id AS inserted");
sql = "UPDATE test SET id = 1 RETURNING id AS updated";
stmts = con.ExtractStatements(sql);
REQUIRE(stmts[0]->ToString() == sql);
sql = "DELETE FROM test WHERE (id = 1) RETURNING id AS deleted";
stmts = con.ExtractStatements(sql);
REQUIRE(stmts[0]->ToString() == sql);
}

View File

@@ -0,0 +1,290 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/main/appender.hpp"
#include "duckdb/common/types/hugeint.hpp"
using namespace duckdb;
using namespace std;
template <class SRC>
void TestAppendingSingleDecimalValue(SRC value, Value expected_result, uint8_t width, uint8_t scale) {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
duckdb::unique_ptr<Appender> appender;
duckdb::unique_ptr<QueryResult> result;
REQUIRE_NO_FAIL(conn->Query(StringUtil::Format("CREATE TABLE decimals(i DECIMAL(%d,%d))", width, scale)));
appender = make_uniq<Appender>(*conn, "decimals");
appender->BeginRow();
appender->Append<SRC>(value);
appender->EndRow();
appender->Flush();
result = conn->Query("SELECT * FROM decimals");
REQUIRE(CHECK_COLUMN(result, 0, {expected_result}));
}
TEST_CASE("Test appending to a decimal column", "[api]") {
TestAppendingSingleDecimalValue<int32_t>(1, Value::DECIMAL(1000, 4, 3), 4, 3);
TestAppendingSingleDecimalValue<int16_t>(-9999, Value::DECIMAL(-9999, 4, 0), 4, 0);
TestAppendingSingleDecimalValue<int16_t>(9999, Value::DECIMAL(9999, 4, 0), 4, 0);
TestAppendingSingleDecimalValue<int32_t>(99999999, Value::DECIMAL(99999999, 8, 0), 8, 0);
TestAppendingSingleDecimalValue<const char *>("1.234", Value::DECIMAL(1234, 4, 3), 4, 3);
TestAppendingSingleDecimalValue<const char *>("123.4", Value::DECIMAL(1234, 4, 1), 4, 1);
hugeint_t hugeint_value;
bool result;
result = Hugeint::TryConvert<const char *>("3245234123123", hugeint_value);
REQUIRE(result);
TestAppendingSingleDecimalValue<const char *>("3245234.123123", Value::DECIMAL(hugeint_value, 19, 6), 19, 6);
int64_t bigint_reference_value = 3245234123123;
TestAppendingSingleDecimalValue<const char *>("3245234.123123", Value::DECIMAL(bigint_reference_value, 13, 6), 13,
6);
// Precision loss
TestAppendingSingleDecimalValue<float>(12.3124324f, Value::DECIMAL(123124320, 9, 7), 9, 7);
// Precision loss
result = Hugeint::TryConvert<const char *>("12345234234312432287744000", hugeint_value);
REQUIRE(result);
TestAppendingSingleDecimalValue<double>(12345234234.31243244234324, Value::DECIMAL(hugeint_value, 26, 15), 26, 15);
}
TEST_CASE("Test using appender after connection is gone", "[api]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
duckdb::unique_ptr<Appender> appender;
duckdb::unique_ptr<QueryResult> result;
// create an appender for a non-existing table fails
REQUIRE_THROWS(make_uniq<Appender>(*conn, "integers"));
// now create the table and create the appender
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE integers(i INTEGER)"));
appender = make_uniq<Appender>(*conn, "integers");
// we can use the appender
appender->BeginRow();
appender->Append<int32_t>(1);
appender->EndRow();
appender->Flush();
result = conn->Query("SELECT * FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// removing the connection invalidates the appender
conn.reset();
appender->BeginRow();
appender->Append<int32_t>(2);
appender->EndRow();
// the connection is gone
REQUIRE_THROWS(appender->Flush());
}
TEST_CASE("Test appender and connection destruction order", "[api]") {
for (idx_t i = 0; i < 6; i++) {
auto db = make_uniq<DuckDB>(nullptr);
auto con = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(con->Query("CREATE TABLE integers(i INTEGER)"));
auto appender = make_uniq<Appender>(*con, "integers");
switch (i) {
case 0:
// db - con - appender
db.reset();
con.reset();
appender.reset();
break;
case 1:
// db - appender - con
db.reset();
appender.reset();
con.reset();
break;
case 2:
// con - db - appender
con.reset();
db.reset();
appender.reset();
break;
case 3:
// con - appender - db
con.reset();
appender.reset();
db.reset();
break;
case 4:
// appender - con - db
appender.reset();
con.reset();
db.reset();
break;
default:
// appender - db - con
appender.reset();
db.reset();
con.reset();
break;
}
}
}
TEST_CASE("Test using appender after table is dropped", "[api]") {
DuckDB db(nullptr);
Connection con(db);
// create the table
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
// now create the appender
Appender appender(con, "integers");
// appending works initially
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
appender.Flush();
// now drop the table
REQUIRE_NO_FAIL(con.Query("DROP TABLE integers"));
// now appending fails
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
REQUIRE_THROWS(appender.Flush());
}
TEST_CASE("Test using appender after table is altered", "[api]") {
DuckDB db(nullptr);
Connection con(db);
// create the table
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
// now create the appender
Appender appender(con, "integers");
// appending works initially
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
// now create a new table with the same name but with different types
REQUIRE_NO_FAIL(con.Query("DROP TABLE integers"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i VARCHAR, j INTEGER)"));
// now appending fails
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
REQUIRE_THROWS(appender.Flush());
}
TEST_CASE("Test appenders and transactions", "[api]") {
DuckDB db(nullptr);
Connection con(db);
duckdb::unique_ptr<QueryResult> result;
// create the table
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
// now create the appender
Appender appender(con, "integers");
// rollback an append
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
appender.Flush();
result = con.Query("SELECT * FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE_NO_FAIL(con.Query("ROLLBACK"));
result = con.Query("SELECT * FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {}));
// we can still use the appender in auto commit mode
appender.BeginRow();
appender.Append<int32_t>(1);
appender.EndRow();
appender.Flush();
result = con.Query("SELECT * FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
}
TEST_CASE("Test using multiple appenders", "[api]") {
DuckDB db(nullptr);
Connection con(db);
duckdb::unique_ptr<QueryResult> result;
// create the table
REQUIRE_NO_FAIL(con.Query("CREATE TABLE t1(i INTEGER)"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE t2(i VARCHAR, j DATE)"));
// now create the appender
Appender a1(con, "t1");
Appender a2(con, "t2");
// begin appending from both
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
a1.BeginRow();
a1.Append<int32_t>(1);
a1.EndRow();
a1.Flush();
a2.BeginRow();
a2.Append<const char *>("hello");
a2.Append<Value>(Value::DATE(1992, 1, 1));
a2.EndRow();
a2.Flush();
result = con.Query("SELECT * FROM t1");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = con.Query("SELECT * FROM t2");
REQUIRE(CHECK_COLUMN(result, 0, {"hello"}));
REQUIRE(CHECK_COLUMN(result, 1, {Value::DATE(1992, 1, 1)}));
REQUIRE_NO_FAIL(con.Query("ROLLBACK"));
result = con.Query("SELECT * FROM t1");
REQUIRE(CHECK_COLUMN(result, 0, {}));
}
TEST_CASE("Test usage of appender interleaved with connection usage", "[api]") {
DuckDB db(nullptr);
Connection con(db);
duckdb::unique_ptr<QueryResult> result;
// create the table
REQUIRE_NO_FAIL(con.Query("CREATE TABLE t1(i INTEGER)"));
Appender appender(con, "t1");
appender.AppendRow(1);
appender.Flush();
result = con.Query("SELECT * FROM t1");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
appender.AppendRow(2);
appender.Flush();
result = con.Query("SELECT * FROM t1");
REQUIRE(CHECK_COLUMN(result, 0, {1, 2}));
}
TEST_CASE("Test appender during stack unwinding", "[api]") {
// test appender exception
DuckDB db;
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
{
Appender appender(con, "integers");
appender.AppendRow(1);
// closing the apender throws an exception, because we changed the table's columns
REQUIRE_NO_FAIL(con.Query("ALTER TABLE integers ADD COLUMN j VARCHAR"));
REQUIRE_THROWS(appender.Close());
}
REQUIRE_NO_FAIL(con.Query("DROP TABLE integers"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
try {
// now we do the same, but we trigger the destructor of the appender during stack unwinding
Appender appender(con, "integers");
appender.AppendRow(1);
REQUIRE_NO_FAIL(con.Query("ALTER TABLE integers ADD COLUMN j VARCHAR"));
{ throw std::runtime_error("Hello"); }
} catch (...) {
}
}

View File

@@ -0,0 +1,83 @@
#include "test_helpers.hpp"
#include "duckdb/common/types/bignum.hpp"
#include "catch.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test Bignum::FromByteArray", "[api]") {
{ // 0
uint8_t data[] {0};
idx_t size = 1;
bool is_negative = false;
auto str = Bignum::FromByteArray(data, size, is_negative);
REQUIRE(str.size() == size + 3);
REQUIRE(uint8_t(str[0]) == 0x80);
REQUIRE(uint8_t(str[1]) == 0x00);
REQUIRE(uint8_t(str[2]) == uint8_t(size));
for (idx_t i = 0; i < size; i++) {
REQUIRE(uint8_t(str[3 + i]) == data[i]);
}
}
{ // -1
uint8_t data[] {1};
idx_t size = 1;
bool is_negative = true;
auto str = Bignum::FromByteArray(data, size, is_negative);
REQUIRE(str.size() == size + 3);
REQUIRE(uint8_t(str[0]) == 0x7f);
REQUIRE(uint8_t(str[1]) == 0xff);
REQUIRE(uint8_t(str[2]) == uint8_t(~size));
for (idx_t i = 0; i < size; i++) {
REQUIRE(uint8_t(str[3 + i]) == uint8_t(~data[i]));
}
}
{ // max bignum == max double == 2^1023 * (1 + (1 2^52)) == 2^1024 - 2^971 ==
// 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368
uint8_t data[] {
// little endian
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
idx_t size = 128;
bool is_negative = false;
auto str = Bignum::FromByteArray(data, size, is_negative);
REQUIRE(str.size() == size + 3);
REQUIRE(uint8_t(str[0]) == 0x80);
REQUIRE(uint8_t(str[1]) == 0x00);
REQUIRE(uint8_t(str[2]) == uint8_t(size));
for (idx_t i = 0; i < size; i++) {
REQUIRE(uint8_t(str[3 + i]) == data[i]);
}
}
{ // min bignum == min double == -(2^1023 * (1 + (1 2^52))) == -(2^1024 - 2^971) ==
// -179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368
uint8_t data[] {
// little endian (absolute value)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
idx_t size = 128;
bool is_negative = true;
auto str = Bignum::FromByteArray(data, size, is_negative);
REQUIRE(str.size() == size + 3);
REQUIRE(uint8_t(str[0]) == 0x7f);
REQUIRE(uint8_t(str[1]) == 0xff);
REQUIRE(uint8_t(str[2]) == uint8_t(~size));
for (idx_t i = 0; i < size; i++) {
REQUIRE(uint8_t(str[3 + i]) == uint8_t(~data[i]));
}
}
}

117
external/duckdb/test/api/test_config.cpp vendored Normal file
View File

@@ -0,0 +1,117 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include <set>
#include <map>
using namespace duckdb;
using namespace std;
TEST_CASE("Test DB config configuration", "[api]") {
DBConfig config;
auto options = config.GetOptions();
map<string, duckdb::vector<string>> test_options;
test_options["access_mode"] = {"automatic", "read_only", "read_write"};
test_options["default_order"] = {"asc", "desc"};
test_options["default_null_order"] = {"nulls_first", "nulls_last"};
test_options["enable_external_access"] = {"true", "false"};
test_options["max_memory"] = {"-1", "16GB"};
test_options["threads"] = {"1", "4"};
REQUIRE(config.GetOptionByName("unknownoption") == nullptr);
for (auto &option : options) {
auto op = config.GetOptionByName(option.name);
REQUIRE(op);
auto entry = test_options.find(option.name);
if (entry != test_options.end()) {
for (auto &str_val : entry->second) {
Value val(str_val);
REQUIRE_NOTHROW(config.SetOption(option, val));
}
Value invalid_val("___this_is_probably_invalid");
REQUIRE_THROWS(config.SetOption(option, invalid_val));
}
}
}
TEST_CASE("Test allowed options", "[api]") {
case_insensitive_map_t<Value> config_dict;
string option;
SECTION("allowed_directories") {
config_dict.emplace("allowed_directories", Value::LIST({Value("test")}));
option = "allowed_directories";
}
SECTION("allowed_paths") {
config_dict.emplace("allowed_paths", Value::LIST({Value("test")}));
option = "allowed_paths";
}
SECTION("enable_logging") {
config_dict.emplace("enable_logging", Value::BOOLEAN(false));
option = "enable_logging";
}
SECTION("disabled_filesystems") {
config_dict.emplace("disabled_filesystems", Value::BOOLEAN(false));
option = "disabled_filesystems";
}
SECTION("logging_mode") {
config_dict.emplace("logging_mode", Value::BOOLEAN(false));
option = "logging_mode";
}
SECTION("logging_storage") {
config_dict.emplace("logging_storage", Value::BOOLEAN(false));
option = "logging_storage";
}
SECTION("logging_level") {
config_dict.emplace("logging_level", Value::BOOLEAN(false));
option = "logging_level";
}
SECTION("enabled_log_types") {
config_dict.emplace("enabled_log_types", Value::BOOLEAN(false));
option = "enabled_log_types";
}
SECTION("disabled_log_types") {
config_dict.emplace("disabled_log_types", Value::BOOLEAN(false));
option = "disabled_log_types";
}
try {
DBConfig config(config_dict, false);
} catch (std::exception &ex) {
ErrorData error_data(ex);
REQUIRE(error_data.Type() == ExceptionType::INVALID_INPUT);
REQUIRE(error_data.RawMessage() ==
StringUtil::Format("Cannot change/set %s before the database is started", option));
}
}
TEST_CASE("Test user_agent", "[api]") {
{
// Default duckdb_api is cpp
DuckDB db(nullptr);
Connection con(db);
auto res = con.Query("PRAGMA user_agent");
REQUIRE_THAT(res->GetValue(0, 0).ToString(), Catch::Matchers::Matches("duckdb/.*(.*) cpp"));
}
{
// The latest provided duckdb_api is used
DBConfig config;
config.SetOptionByName("duckdb_api", "capi");
config.SetOptionByName("duckdb_api", "go");
DuckDB db("", &config);
Connection con(db);
auto res = con.Query("PRAGMA user_agent");
REQUIRE_THAT(res->GetValue(0, 0).ToString(), Catch::Matchers::Matches("duckdb/.*(.*) go"));
}
}

View File

@@ -0,0 +1,56 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/main/appender.hpp"
#include "duckdb/common/atomic.hpp"
using namespace duckdb;
using namespace std;
struct MyAllocateData : public PrivateAllocatorData {
MyAllocateData(atomic<idx_t> *memory_counter_p) : memory_counter(memory_counter_p) {
}
atomic<idx_t> *memory_counter;
};
data_ptr_t my_allocate_function(PrivateAllocatorData *private_data, idx_t size) {
auto my_allocate_data = (MyAllocateData *)private_data;
*my_allocate_data->memory_counter += size;
return data_ptr_cast(malloc(size));
}
void my_free_function(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t size) {
auto my_allocate_data = (MyAllocateData *)private_data;
*my_allocate_data->memory_counter -= size;
free(pointer);
}
data_ptr_t my_reallocate_function(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t old_size, idx_t size) {
auto my_allocate_data = (MyAllocateData *)private_data;
*my_allocate_data->memory_counter -= old_size;
*my_allocate_data->memory_counter += size;
return data_ptr_cast(realloc(pointer, size));
}
TEST_CASE("Test using a custom allocator", "[api][.]") {
atomic<idx_t> memory_counter;
memory_counter = 0;
REQUIRE(memory_counter.load() == 0);
DBConfig config;
config.allocator = make_uniq<Allocator>(my_allocate_function, my_free_function, my_reallocate_function,
make_uniq<MyAllocateData>(&memory_counter));
DuckDB db(nullptr, &config);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE tbl AS SELECT * FROM range(1000000)"));
// check that the memory counter reported anything
REQUIRE(memory_counter.load() > 0);
auto table_memory_usage = memory_counter.load();
REQUIRE_NO_FAIL(con.Query("DROP TABLE tbl"));
// check that the memory counter usage has decreased after we dropped the table
REQUIRE(memory_counter.load() < table_memory_usage);
}

48
external/duckdb/test/api/test_dbdir.cpp vendored Normal file
View File

@@ -0,0 +1,48 @@
#include "catch.hpp"
#include "duckdb/common/file_system.hpp"
#include "duckdb/storage/storage_manager.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
static void test_in_memory_initialization(string dbdir) {
duckdb::unique_ptr<FileSystem> fs = FileSystem::CreateLocal();
duckdb::unique_ptr<DuckDB> db;
duckdb::unique_ptr<Connection> con;
string in_memory_tmp = ".tmp";
// make sure the temporary folder does not exist
DeleteDatabase(dbdir);
fs->RemoveDirectory(in_memory_tmp);
// cannot create an in-memory database using ":memory:" argument
REQUIRE_NOTHROW(db = make_uniq<DuckDB>(dbdir));
REQUIRE_NOTHROW(con = make_uniq<Connection>(*db));
// force the in-memory directory to be created by creating a table bigger than the memory limit
REQUIRE_NO_FAIL(con->Query("PRAGMA memory_limit='2MB'"));
REQUIRE_NO_FAIL(con->Query("CREATE TABLE integers AS SELECT * FROM range(1000000)"));
// the temporary folder .tmp should be created in in-memory mode
REQUIRE(fs->DirectoryExists(in_memory_tmp));
// the database dir should not be created in in-memory mode
REQUIRE(!fs->DirectoryExists(dbdir));
// clean up
con.reset();
db.reset();
// make sure to clean up the database & temporary folder
DeleteDatabase(dbdir);
fs->RemoveDirectory(in_memory_tmp);
}
TEST_CASE("Test in-memory database initialization argument \":memory:\"", "[api][.]") {
test_in_memory_initialization(":memory:");
}
TEST_CASE("Test in-memory database initialization argument \"\"", "[api][.]") {
test_in_memory_initialization("");
}

View File

@@ -0,0 +1,31 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include <iostream>
#include <map>
#include <set>
using namespace duckdb;
using namespace std;
TEST_CASE("Test autoload of extension settings", "[api]") {
DBConfig config;
config.SetOptionByName("timezone", "America/Los_Angeles");
config.options.allow_unsigned_extensions = true;
config.options.autoload_known_extensions = true;
auto env_var = std::getenv("LOCAL_EXTENSION_REPO");
if (!env_var) {
return;
}
config.options.autoinstall_extension_repo = std::string(env_var);
REQUIRE(config.options.unrecognized_options.count("timezone"));
// Create a connection
duckdb::unique_ptr<DuckDB> db;
REQUIRE_NOTHROW(db = make_uniq<DuckDB>(nullptr, &config));
Connection con(*db);
auto res = con.Query("select current_setting('timezone')");
REQUIRE(CHECK_COLUMN(res, 0, {Value("America/Los_Angeles")}));
}

View File

@@ -0,0 +1,271 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/parser/parser.hpp"
#include "duckdb/planner/logical_operator.hpp"
#include <chrono>
#include <thread>
using namespace duckdb;
using namespace std;
TEST_CASE("Test GetTableNames", "[api]") {
DuckDB db(nullptr);
Connection con(db);
unordered_set<string> table_names;
// standard
table_names = con.GetTableNames("SELECT * FROM my_table");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// fetch a specific column
table_names = con.GetTableNames("SELECT col_a FROM my_table");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// multiple tables
table_names = con.GetTableNames("SELECT * FROM my_table1, my_table2, my_table3");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("my_table1"));
REQUIRE(table_names.count("my_table2"));
REQUIRE(table_names.count("my_table3"));
// same table is mentioned multiple times
table_names = con.GetTableNames("SELECT col_a FROM my_table, my_table m2, my_table m3");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// cte
table_names = con.GetTableNames("WITH cte AS (SELECT * FROM my_table) SELECT * FROM cte");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// subqueries
table_names = con.GetTableNames("SELECT * FROM (SELECT * FROM (SELECT * FROM my_table) bla) bla3");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// join
table_names = con.GetTableNames("SELECT col_a FROM my_table JOIN my_table2 ON (my_table.col_b=my_table2.col_d)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("my_table"));
REQUIRE(table_names.count("my_table2"));
// scalar subquery
table_names = con.GetTableNames("SELECT (SELECT COUNT(*) FROM my_table)");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table"));
// set operations
table_names =
con.GetTableNames("SELECT * FROM my_table UNION ALL SELECT * FROM my_table2 INTERSECT SELECT * FROM my_table3");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("my_table"));
REQUIRE(table_names.count("my_table2"));
REQUIRE(table_names.count("my_table3"));
// window functions
table_names = con.GetTableNames("SELECT row_number() OVER (ORDER BY (SELECT i+j FROM my_table2)) FROM my_table");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("my_table"));
REQUIRE(table_names.count("my_table2"));
// views are expanded
REQUIRE_NO_FAIL(con.Query("CREATE TABLE my_table_for_view(i INT)"));
REQUIRE_NO_FAIL(con.Query("CREATE VIEW v1 AS SELECT * FROM my_table_for_view"));
table_names = con.GetTableNames("SELECT col_a FROM v1");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("my_table_for_view"));
// * exclude
table_names = con.GetTableNames("select * exclude (x) from df");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("df"));
// * replace
table_names = con.GetTableNames("select * replace (42 as x) from df");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("df"));
// qualified with schema.table and catalog.schema.table
string query = "SELECT * FROM schema1.table1, catalog2.schema2.table2";
table_names = con.GetTableNames(query, true);
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("schema1.table1"));
REQUIRE(table_names.count("catalog2.schema2.table2"));
// qualified and escaped
query = "SELECT * FROM schema1.table1, catalog3.\"schema.2\".\"table.2\"";
table_names = con.GetTableNames(query, true);
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("schema1.table1"));
REQUIRE(table_names.count("catalog3.\"schema.2\".\"table.2\""));
// With alias
query = "SELECT * FROM schema1.table1 alias1, catalog3.\"schema.2\".\"table.2\" alias2";
table_names = con.GetTableNames(query, true); // qualified
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("schema1.table1 AS alias1"));
REQUIRE(table_names.count("catalog3.\"schema.2\".\"table.2\" AS alias2"));
table_names = con.GetTableNames(query); // default
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("table1"));
REQUIRE(table_names.count("table.2"));
// generate_series
table_names = con.GetTableNames("with series_generator as (select * from generate_series(TIMESTAMP '2001-04-10', "
"TIMESTAMP '2001-04-11', INTERVAL 1 HOUR)) select * from series_generator");
REQUIRE(table_names.empty());
if (!db.ExtensionIsLoaded("tpch")) {
return;
}
// TPCH
// run all TPC-H queries twice
// one WITHOUT the tables in the catalog
// once WITH the tables in the catalog
for (idx_t i = 0; i < 2; i++) {
table_names = con.GetTableNames("PRAGMA tpch(1)");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(2)");
REQUIRE(table_names.size() == 5);
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("partsupp"));
REQUIRE(table_names.count("nation"));
REQUIRE(table_names.count("region"));
table_names = con.GetTableNames("PRAGMA tpch(3)");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(4)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(5)");
REQUIRE(table_names.size() == 6);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("nation"));
REQUIRE(table_names.count("region"));
table_names = con.GetTableNames("PRAGMA tpch(6)");
REQUIRE(table_names.size() == 1);
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(7)");
REQUIRE(table_names.size() == 5);
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("nation"));
table_names = con.GetTableNames("PRAGMA tpch(8)");
REQUIRE(table_names.size() == 7);
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("nation"));
REQUIRE(table_names.count("region"));
table_names = con.GetTableNames("PRAGMA tpch(9)");
REQUIRE(table_names.size() == 6);
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("partsupp"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("nation"));
table_names = con.GetTableNames("PRAGMA tpch(10)");
REQUIRE(table_names.size() == 4);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("nation"));
table_names = con.GetTableNames("PRAGMA tpch(11)");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("partsupp"));
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("nation"));
table_names = con.GetTableNames("PRAGMA tpch(12)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(13)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
table_names = con.GetTableNames("PRAGMA tpch(14)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(15)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(16)");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("partsupp"));
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("supplier"));
table_names = con.GetTableNames("PRAGMA tpch(17)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("part"));
table_names = con.GetTableNames("PRAGMA tpch(18)");
REQUIRE(table_names.size() == 3);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(19)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("part"));
table_names = con.GetTableNames("PRAGMA tpch(20)");
REQUIRE(table_names.size() == 5);
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("nation"));
REQUIRE(table_names.count("partsupp"));
REQUIRE(table_names.count("part"));
REQUIRE(table_names.count("lineitem"));
table_names = con.GetTableNames("PRAGMA tpch(21)");
REQUIRE(table_names.size() == 4);
REQUIRE(table_names.count("supplier"));
REQUIRE(table_names.count("lineitem"));
REQUIRE(table_names.count("orders"));
REQUIRE(table_names.count("nation"));
table_names = con.GetTableNames("PRAGMA tpch(22)");
REQUIRE(table_names.size() == 2);
REQUIRE(table_names.count("customer"));
REQUIRE(table_names.count("orders"));
REQUIRE_NO_FAIL(con.Query("CALL dbgen(sf=0)"));
}
}

View File

@@ -0,0 +1,57 @@
#include "catch.hpp"
#include "duckdb/common/file_system.hpp"
#include "duckdb/storage/storage_manager.hpp"
#include "duckdb/common/insertion_order_preserving_map.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test Insertion Order Preserving Map: duplicate insert", "[api][.]") {
InsertionOrderPreservingMap<int> map;
map.insert("a", 1);
map.insert("b", 2);
map.insert("c", 3);
map.insert("b", 4);
auto it = map.find(string("c"));
map.erase(it);
int count = 0;
for (auto it = map.begin(); it != map.end(); it++) {
count++;
}
REQUIRE(count == 2);
}
TEST_CASE("Test Insertion Order Preserving Map: double erase", "[api][.]") {
InsertionOrderPreservingMap<idx_t> map;
map.insert("a", 1);
map.insert("b", 2);
map.insert("c", 3);
map.insert("b", 4);
map.erase(map.find("c"));
int count = 0;
for (auto it = map.begin(); it != map.end(); it++) {
count++;
}
REQUIRE(count == 2);
map.insert("a", 1);
map.insert("b", 2);
map.insert("c", 3);
map.insert("b", 4);
map.erase(map.find("c"));
map.erase(map.find("b"));
count = 0;
for (auto it = map.begin(); it != map.end(); it++) {
count++;
}
REQUIRE(count == 1);
REQUIRE(map.find("b") == map.end());
}

View File

@@ -0,0 +1,207 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/main/db_instance_cache.hpp"
#include "duckdb/storage/storage_extension.hpp"
#include "duckdb/transaction/duck_transaction_manager.hpp"
#include "duckdb/catalog/duck_catalog.hpp"
#include <chrono>
#include <iostream>
#include <thread>
using namespace duckdb;
static void background_thread_connect(DBInstanceCache *instance_cache, std::string *path) {
try {
DBConfig config;
auto connection = instance_cache->GetOrCreateInstance(*path, config, true);
connection.reset();
} catch (std::exception &ex) {
FAIL(ex.what());
}
}
TEST_CASE("Test parallel connection and destruction of connections with database instance cache", "[api][.]") {
DBInstanceCache instance_cache;
for (idx_t i = 0; i < 100; i++) {
auto path = TestCreatePath("instance_cache_parallel.db");
DBConfig config;
auto shared_db = instance_cache.GetOrCreateInstance(path, config, true);
std::thread background_thread(background_thread_connect, &instance_cache, &path);
shared_db.reset();
background_thread.join();
TestDeleteFile(path);
REQUIRE(1);
}
}
struct DelayingStorageExtension : StorageExtension {
DelayingStorageExtension() {
attach = [](optional_ptr<StorageExtensionInfo>, ClientContext &, AttachedDatabase &db, const string &,
AttachInfo &info, AttachOptions &) -> unique_ptr<Catalog> {
std::this_thread::sleep_for(std::chrono::seconds(5));
return make_uniq_base<Catalog, DuckCatalog>(db);
};
create_transaction_manager = [](optional_ptr<StorageExtensionInfo>, AttachedDatabase &db,
Catalog &) -> unique_ptr<TransactionManager> {
return make_uniq<DuckTransactionManager>(db);
};
}
};
TEST_CASE("Test db creation does not block instance cache", "[api]") {
DBInstanceCache instance_cache;
using namespace std::chrono;
auto second_creation_was_quick = false;
shared_ptr<DuckDB> stick_around;
std::thread t1 {[&instance_cache, &second_creation_was_quick, &stick_around]() {
DBConfig db_config;
db_config.storage_extensions["delay"] = make_uniq<DelayingStorageExtension>();
stick_around = instance_cache.GetOrCreateInstance("delay::memory:", db_config, true);
const auto start_time = steady_clock::now();
for (idx_t i = 0; i < 10; i++) {
db_config.storage_extensions["delay"] = make_uniq<DelayingStorageExtension>();
instance_cache.GetOrCreateInstance("delay::memory:", db_config, true);
}
const auto end_time = steady_clock::now();
second_creation_was_quick = duration_cast<seconds>(end_time - start_time).count() < 1;
}};
std::this_thread::sleep_for(seconds(2));
auto opening_slow_db_takes_remaining_time = false;
std::thread t2 {[&instance_cache, &opening_slow_db_takes_remaining_time]() {
DBConfig db_config;
const auto start_time = steady_clock::now();
instance_cache.GetOrCreateInstance("delay::memory:", db_config, true);
const auto end_time = steady_clock::now();
const auto duration = duration_cast<milliseconds>(end_time - start_time);
opening_slow_db_takes_remaining_time = duration > seconds(2) && duration < seconds(4);
}};
auto no_delay_for_db_creation = true;
std::thread t3 {[&instance_cache, &no_delay_for_db_creation]() {
const auto start_time = steady_clock::now();
DBConfig db_config;
while (start_time + seconds(3) < steady_clock::now()) {
auto db_start_time = steady_clock::now();
instance_cache.GetOrCreateInstance(":memory:", db_config, false);
no_delay_for_db_creation &= duration_cast<milliseconds>(steady_clock::now() - db_start_time).count() < 100;
}
}};
t1.join();
t2.join();
t3.join();
REQUIRE(second_creation_was_quick);
REQUIRE(opening_slow_db_takes_remaining_time);
REQUIRE(no_delay_for_db_creation);
}
TEST_CASE("Test attaching the same database path from different databases", "[api][.]") {
DBInstanceCache instance_cache;
auto test_path = TestCreatePath("instance_cache_reuse.db");
DBConfig config;
auto db1 = instance_cache.GetOrCreateInstance(":memory:", config, false);
auto db2 = instance_cache.GetOrCreateInstance(":memory:", config, false);
Connection con1(*db1);
Connection con2(*db2);
SECTION("Regular ATTACH conflict") {
string attach_query = "ATTACH '" + test_path + "' AS db_ref";
REQUIRE_NO_FAIL(con1.Query(attach_query));
// fails - already attached in db1
REQUIRE_FAIL(con2.Query(attach_query));
// if we detach from con1, we can now attach in con2
REQUIRE_NO_FAIL(con1.Query("DETACH db_ref"));
REQUIRE_NO_FAIL(con2.Query(attach_query));
// .. but not in con1 anymore!
REQUIRE_FAIL(con1.Query(attach_query));
}
SECTION("ATTACH IF NOT EXISTS") {
string attach_query = "ATTACH IF NOT EXISTS '" + test_path + "' AS db_ref";
REQUIRE_NO_FAIL(con1.Query(attach_query));
// fails - already attached in db1
REQUIRE_FAIL(con2.Query(attach_query));
}
}
TEST_CASE("Test attaching the same database path from different databases in read-only mode", "[api][.]") {
DBInstanceCache instance_cache;
auto test_path = TestCreatePath("instance_cache_reuse_readonly.db");
// create an empty database
{
DuckDB db(test_path);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE IF NOT EXISTS integers AS FROM (VALUES (1), (2), (3)) t(i)"));
}
DBConfig config;
auto db1 = instance_cache.GetOrCreateInstance(":memory:", config, false);
auto db2 = instance_cache.GetOrCreateInstance(":memory:", config, false);
auto db3 = instance_cache.GetOrCreateInstance(":memory:", config, false);
Connection con1(*db1);
Connection con2(*db2);
Connection con3(*db3);
SECTION("Regular ATTACH conflict") {
string attach_query = "ATTACH '" + test_path + "' AS db_ref";
string read_only_attach = attach_query + " (READ_ONLY)";
REQUIRE_NO_FAIL(con1.Query(read_only_attach));
// succeeds - we can attach the same database multiple times in read-only mode
REQUIRE_NO_FAIL(con2.Query(read_only_attach));
// fails - we cannot attach in read-write
REQUIRE_FAIL(con3.Query(attach_query));
// if we detach from con1, we still cannot attach in read-write in con3
REQUIRE_NO_FAIL(con1.Query("DETACH db_ref"));
REQUIRE_FAIL(con3.Query(attach_query));
// but if we detach in con2, we can attach in read-write mode now
REQUIRE_NO_FAIL(con2.Query("DETACH db_ref"));
REQUIRE_NO_FAIL(con3.Query(attach_query));
// and now we can no longer attach in read-only mode
REQUIRE_FAIL(con1.Query(read_only_attach));
}
SECTION("ATTACH IF EXISTS") {
string attach_query = "ATTACH IF NOT EXISTS '" + test_path + "' AS db_ref";
string read_only_attach = attach_query + " (READ_ONLY)";
REQUIRE_NO_FAIL(con1.Query(read_only_attach));
// succeeds - we can attach the same database multiple times in read-only mode
REQUIRE_NO_FAIL(con2.Query(read_only_attach));
// fails - we cannot attach in read-write
REQUIRE_FAIL(con3.Query(attach_query));
// if we detach from con1, we still cannot attach in read-write in con3
REQUIRE_NO_FAIL(con1.Query("DETACH db_ref"));
REQUIRE_FAIL(con3.Query(attach_query));
// but if we detach in con2, we can attach in read-write mode now
REQUIRE_NO_FAIL(con2.Query("DETACH db_ref"));
REQUIRE_NO_FAIL(con3.Query(attach_query));
// and now we can no longer attach in read-only mode
REQUIRE_FAIL(con1.Query(read_only_attach));
}
}

View File

@@ -0,0 +1,169 @@
#include "catch.hpp"
#include "duckdb/main/connection.hpp"
#include "duckdb/main/database.hpp"
#include "duckdb/main/extension/extension_loader.hpp"
#include "duckdb/main/extension_manager.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace Catch::Matchers;
struct TestClientContextState : ClientContextState {
vector<string> query_errors;
vector<string> transaction_errors;
TestClientContextState() = default;
TestClientContextState(const TestClientContextState &) = delete;
void QueryEnd(ClientContext &, optional_ptr<ErrorData> error) override {
if (error && error->HasError()) {
query_errors.push_back(error->Message());
}
}
void TransactionRollback(MetaTransaction &transaction, ClientContext &context,
optional_ptr<ErrorData> error) override {
if (error && error->HasError()) {
transaction_errors.push_back(error->Message());
}
}
};
shared_ptr<TestClientContextState> WithLifecycleState(const Connection &conn) {
auto &register_state = conn.context->registered_state;
auto state = make_shared_ptr<TestClientContextState>();
register_state->Insert("test_state", state);
return state;
}
TEST_CASE("Test ClientContextState", "[api]") {
DuckDB db(nullptr);
Connection conn(db);
conn.Query("CREATE TABLE my_table(i INT)");
auto state = WithLifecycleState(conn);
const TableFunction table_fun(
"raise_exception_tf", {},
[](ClientContext &, TableFunctionInput &, DataChunk &) {
throw std::runtime_error("This is a test exception.");
},
[](ClientContext &, TableFunctionBindInput &, vector<LogicalType> &return_types,
vector<string> &names) -> unique_ptr<FunctionData> {
return_types.push_back(LogicalType::VARCHAR);
names.push_back("message");
return nullptr;
});
ExtensionInfo extension_info {};
ExtensionActiveLoad load_info {*db.instance, extension_info, "test_extension"};
ExtensionLoader loader {load_info};
loader.RegisterFunction(table_fun);
SECTION("No error, No explicit transaction") {
REQUIRE_NO_FAIL(conn.Query("SELECT * FROM my_table"));
REQUIRE(state->query_errors.empty());
REQUIRE(state->transaction_errors.empty());
}
SECTION("Error, No explicit transaction") {
REQUIRE_FAIL(conn.Query("SELECT * FROM this_table_does_not_exist"));
REQUIRE((state->query_errors.size() == 1));
REQUIRE_THAT(state->query_errors.at(0), Contains("Table with name this_table_does_not_exist does not exist!"));
REQUIRE((state->transaction_errors.size() == 1));
REQUIRE_THAT(state->transaction_errors.at(0),
Contains("Table with name this_table_does_not_exist does not exist!"));
}
SECTION("No error, Explicit commit") {
conn.BeginTransaction();
REQUIRE_NO_FAIL(conn.Query("SELECT * FROM my_table"));
conn.Commit();
REQUIRE(state->query_errors.empty());
REQUIRE(state->transaction_errors.empty());
}
SECTION("No error, Explicit rollback") {
conn.BeginTransaction();
REQUIRE_NO_FAIL(conn.Query("SELECT * FROM my_table"));
conn.Rollback();
REQUIRE(state->query_errors.empty());
REQUIRE(state->transaction_errors.empty());
}
SECTION("Binding error, Explicit rollback") {
// These errors do not invalidate the transaction...
conn.BeginTransaction();
REQUIRE_FAIL(conn.Query("SELECT * FROM this_table_does_not_exist_1"));
REQUIRE_FAIL(conn.Query("SELECT * FROM this_table_does_not_exist_2"));
REQUIRE_NO_FAIL(conn.Query("SELECT * FROM my_table"));
conn.Rollback();
REQUIRE((state->query_errors.size() == 2));
REQUIRE_THAT(state->query_errors.at(0),
Contains("Table with name this_table_does_not_exist_1 does not exist!"));
REQUIRE_THAT(state->query_errors.at(1),
Contains("Table with name this_table_does_not_exist_2 does not exist!"));
REQUIRE((state->transaction_errors.empty()));
}
SECTION("Binding error, Explicit commit") {
// These errors do not invalidate the transaction...
conn.BeginTransaction();
REQUIRE_FAIL(conn.Query("SELECT * FROM this_table_does_not_exist_1"));
REQUIRE_FAIL(conn.Query("SELECT * FROM this_table_does_not_exist_2"));
REQUIRE_NO_FAIL(conn.Query("SELECT * FROM my_table"));
conn.Commit();
REQUIRE((state->query_errors.size() == 2));
REQUIRE_THAT(state->query_errors.at(0),
Contains("Table with name this_table_does_not_exist_1 does not exist!"));
REQUIRE_THAT(state->query_errors.at(1),
Contains("Table with name this_table_does_not_exist_2 does not exist!"));
REQUIRE((state->transaction_errors.empty()));
}
SECTION("Runtime error, Explicit commit") {
conn.BeginTransaction();
REQUIRE_NO_FAIL(conn.Query("INSERT INTO my_table VALUES (1)"));
REQUIRE_FAIL(conn.Query("SELECT * FROM raise_exception_tf()"));
REQUIRE_FAIL(conn.Query("CREATE TABLE my_table2(i INT)"));
conn.Commit();
REQUIRE((state->query_errors.size() == 2));
REQUIRE_THAT(state->query_errors.at(0), Contains("This is a test exception."));
REQUIRE_THAT(state->query_errors.at(1), Contains("Current transaction is aborted"));
REQUIRE((state->transaction_errors.size() == 1));
REQUIRE_THAT(state->transaction_errors.at(0), Contains("Failed to commit"));
REQUIRE_FAIL(conn.Query("SELECT * FROM my_table2"));
}
SECTION("Runtime error, No explicit transaction") {
REQUIRE_FAIL(conn.Query("SELECT * FROM raise_exception_tf()"));
REQUIRE((state->query_errors.size() == 1));
REQUIRE_THAT(state->query_errors.at(0), Contains("This is a test exception."));
REQUIRE((state->transaction_errors.size() == 1));
REQUIRE_THAT(state->transaction_errors.at(0), Contains("This is a test exception."));
}
SECTION("Manually invalidated transaction, Explicit commit") {
conn.BeginTransaction();
REQUIRE_NO_FAIL(conn.Query("CREATE TABLE my_table2(i INT)"));
auto &transaction = conn.context->ActiveTransaction();
ValidChecker::Invalidate(transaction, "42");
try {
conn.Commit();
} catch (...) {
// Ignore
}
REQUIRE((state->transaction_errors.size() == 1));
REQUIRE_FAIL(conn.Query("SELECT * FROM my_table2"));
}
SECTION("Manually invalidated transaction, Explicit rollback") {
conn.BeginTransaction();
REQUIRE_NO_FAIL(conn.Query("CREATE TABLE my_table2(i INT)"));
auto &transaction = conn.context->ActiveTransaction();
ValidChecker::Invalidate(transaction, "42");
conn.Rollback();
REQUIRE((state->transaction_errors.size() == 1));
REQUIRE_FAIL(conn.Query("SELECT * FROM my_table2"));
}
}
// ClientContextState

View File

@@ -0,0 +1,58 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/storage/object_cache.hpp"
using namespace duckdb;
using namespace std;
struct TestObject : public ObjectCacheEntry {
int value;
TestObject(int value) : value(value) {
}
string GetObjectType() override {
return ObjectType();
}
static string ObjectType() {
return "TestObject";
}
};
struct AnotherTestObject : public ObjectCacheEntry {
int value;
AnotherTestObject(int value) : value(value) {
}
string GetObjectType() override {
return ObjectType();
}
static string ObjectType() {
return "AnotherTestObject";
}
};
TEST_CASE("Test ObjectCache", "[api]") {
DuckDB db;
Connection con(db);
auto &context = *con.context;
auto &cache = ObjectCache::GetObjectCache(context);
REQUIRE(cache.GetObject("test") == nullptr);
cache.Put("test", make_shared_ptr<TestObject>(42));
REQUIRE(cache.GetObject("test") != nullptr);
cache.Delete("test");
REQUIRE(cache.GetObject("test") == nullptr);
REQUIRE(cache.GetOrCreate<TestObject>("test", 42) != nullptr);
REQUIRE(cache.Get<TestObject>("test") != nullptr);
REQUIRE(cache.GetOrCreate<TestObject>("test", 1337)->value == 42);
REQUIRE(cache.Get<TestObject>("test")->value == 42);
REQUIRE(cache.GetOrCreate<AnotherTestObject>("test", 13) == nullptr);
}

View File

@@ -0,0 +1,246 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include <thread>
#include "duckdb/common/string_util.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test Pending Query API", "[api][.]") {
DuckDB db;
Connection con(db);
SECTION("Materialized result") {
auto pending_query = con.PendingQuery("SELECT SUM(i) FROM range(1000000) tbl(i)");
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(499999500000)}));
// cannot fetch twice from the same pending query
REQUIRE_THROWS(pending_query->Execute());
REQUIRE_THROWS(pending_query->Execute());
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Streaming result") {
auto pending_query = con.PendingQuery("SELECT SUM(i) FROM range(1000000) tbl(i)", true);
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(499999500000)}));
// cannot fetch twice from the same pending query
REQUIRE_THROWS(pending_query->Execute());
REQUIRE_THROWS(pending_query->Execute());
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Execute tasks") {
auto pending_query = con.PendingQuery("SELECT SUM(i) FROM range(1000000) tbl(i)", true);
while (pending_query->ExecuteTask() == PendingExecutionResult::RESULT_NOT_READY)
;
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(499999500000)}));
// cannot fetch twice from the same pending query
REQUIRE_THROWS(pending_query->Execute());
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Create pending query while another pending query exists") {
auto pending_query = con.PendingQuery("SELECT SUM(i) FROM range(1000000) tbl(i)");
auto pending_query2 = con.PendingQuery("SELECT SUM(i) FROM range(1000000) tbl(i)", true);
// first pending query is now closed
REQUIRE_THROWS(pending_query->ExecuteTask());
REQUIRE_THROWS(pending_query->Execute());
// we can execute the second one
auto result = pending_query2->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(499999500000)}));
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Binding error in pending query") {
auto pending_query = con.PendingQuery("SELECT XXXSUM(i) FROM range(1000000) tbl(i)");
REQUIRE(pending_query->HasError());
REQUIRE_THROWS(pending_query->ExecuteTask());
REQUIRE_THROWS(pending_query->Execute());
// query the connection as normal after
auto result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Runtime error in pending query (materialized)") {
// this succeeds initially
auto pending_query =
con.PendingQuery("SELECT concat(SUM(i)::varchar, 'hello')::INT FROM range(1000000) tbl(i)");
REQUIRE(!pending_query->HasError());
// we only encounter the failure later on as we are executing the query
auto result = pending_query->Execute();
REQUIRE_FAIL(result);
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Runtime error in pending query (streaming)") {
// this succeeds initially
auto pending_query =
con.PendingQuery("SELECT concat(SUM(i)::varchar, 'hello')::INT FROM range(1000000) tbl(i)", true);
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(result->HasError());
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
}
SECTION("Pending results errors as JSON") {
con.Query("SET errors_as_json = true;");
auto pending_query = con.PendingQuery("SELCT 32;");
REQUIRE(pending_query->HasError());
REQUIRE(duckdb::StringUtil::Contains(pending_query->GetError(), "SYNTAX_ERROR"));
}
}
static void parallel_pending_query(Connection *conn, bool *correct, size_t threadnr) {
correct[threadnr] = true;
for (size_t i = 0; i < 100; i++) {
// run pending query and then execute it
auto executor = conn->PendingQuery("SELECT * FROM integers ORDER BY i");
try {
// this will randomly throw an exception if another thread calls pending query first
auto result = executor->Execute();
if (!CHECK_COLUMN(result, 0, {1, 2, 3, Value()})) {
correct[threadnr] = false;
}
} catch (...) {
continue;
}
}
}
TEST_CASE("Test parallel usage of pending query API", "[api][.]") {
auto db = make_uniq<DuckDB>(nullptr);
auto conn = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(conn->Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(conn->Query("INSERT INTO integers VALUES (1), (2), (3), (NULL)"));
bool correct[20];
thread threads[20];
for (size_t i = 0; i < 20; i++) {
threads[i] = thread(parallel_pending_query, conn.get(), correct, i);
}
for (size_t i = 0; i < 20; i++) {
threads[i].join();
REQUIRE(correct[i]);
}
}
TEST_CASE("Test Pending Query Prepared Statements API", "[api][.]") {
DuckDB db;
Connection con(db);
SECTION("Standard prepared") {
auto prepare = con.Prepare("SELECT SUM(i) FROM range(1000000) tbl(i) WHERE i>=$1");
REQUIRE(!prepare->HasError());
auto pending_query = prepare->PendingQuery(0);
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(499999500000)}));
// cannot fetch twice from the same pending query
REQUIRE_THROWS(pending_query->Execute());
REQUIRE_THROWS(pending_query->Execute());
// we can use the prepared query again, however
pending_query = prepare->PendingQuery(500000);
REQUIRE(!pending_query->HasError());
result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(374999750000)}));
// cannot fetch twice from the same pending query
REQUIRE_THROWS(pending_query->Execute());
REQUIRE_THROWS(pending_query->Execute());
}
SECTION("Error during prepare") {
auto prepare = con.Prepare("SELECT SUM(i+X) FROM range(1000000) tbl(i) WHERE i>=$1");
REQUIRE(prepare->HasError());
REQUIRE_FAIL(prepare->PendingQuery(0));
}
SECTION("Error during execution") {
duckdb::vector<Value> parameters;
auto prepared = con.Prepare("SELECT concat(SUM(i)::varchar, CASE WHEN SUM(i) IS NULL THEN 0 ELSE 'hello' "
"END)::INT FROM range(1000000) tbl(i) WHERE i>$1");
// this succeeds initially
parameters = {Value::INTEGER(0)};
auto pending_query = prepared->PendingQuery(parameters, true);
REQUIRE(!pending_query->HasError());
// still succeeds...
auto result = pending_query->Execute();
REQUIRE(result->HasError());
// query the connection as normal after
result = con.Query("SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
// if we change the parameter this works
parameters = {Value::INTEGER(2000000)};
pending_query = prepared->PendingQuery(parameters, true);
result = pending_query->Execute();
REQUIRE(!result->HasError());
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(0)}));
}
SECTION("Multiple prepared statements") {
auto prepare1 = con.Prepare("SELECT SUM(i) FROM range(1000000) tbl(i) WHERE i>=$1");
auto prepare2 = con.Prepare("SELECT SUM(i) FROM range(1000000) tbl(i) WHERE i<=$1");
REQUIRE(!prepare1->HasError());
REQUIRE(!prepare2->HasError());
// we can execute from both prepared statements individually
auto pending_query = prepare1->PendingQuery(500000);
REQUIRE(!pending_query->HasError());
auto result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(374999750000)}));
pending_query = prepare2->PendingQuery(500000);
REQUIRE(!pending_query->HasError());
result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(125000250000)}));
// we can overwrite pending queries all day long
for (idx_t i = 0; i < 10; i++) {
pending_query = prepare1->PendingQuery(500000);
pending_query = prepare2->PendingQuery(500000);
}
result = pending_query->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {Value::BIGINT(125000250000)}));
// however, we can't mix and match...
pending_query = prepare1->PendingQuery(500000);
auto pending_query2 = prepare2->PendingQuery(500000);
// this result is no longer open
REQUIRE_THROWS(pending_query->Execute());
}
}

View File

@@ -0,0 +1,135 @@
#include "catch.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
static void CreateSimpleTable(Connection &con) {
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a (i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
}
static void ModifySimpleTable(Connection &con) {
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (14)"));
REQUIRE_NO_FAIL(con.Query("DELETE FROM a where i=12"));
}
static void CheckSimpleQuery(Connection &con) {
auto statements = con.ExtractStatements("SELECT COUNT(*) FROM a WHERE i=12");
REQUIRE(statements.size() == 1);
duckdb::vector<duckdb::Value> values = {Value(12)};
auto pending_result = con.PendingQuery("SELECT COUNT(*) FROM a WHERE i=?", values, true);
if (pending_result->HasError()) {
printf("%s\n", pending_result->GetError().c_str());
}
REQUIRE(!pending_result->HasError());
auto result = pending_result->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {1}));
}
static void CheckCatalogErrorQuery(Connection &con) {
duckdb::vector<Value> values = {Value(12)};
auto pending_result = con.PendingQuery("SELECT COUNT(*) FROM b WHERE i=?", values, true);
REQUIRE((pending_result->HasError() && pending_result->GetErrorType() == ExceptionType::CATALOG));
}
static void CheckConversionErrorQuery(Connection &con) {
// Check query with invalid prepared value
duckdb::vector<Value> values = {Value("fawakaaniffoo")};
auto pending_result = con.PendingQuery("SELECT COUNT(*) FROM a WHERE i=?", values, true);
REQUIRE(!pending_result->HasError());
auto result = pending_result->Execute();
REQUIRE((result->HasError() && result->GetErrorType() == ExceptionType::CONVERSION));
}
static void CheckSimpleQueryAfterModification(Connection &con) {
duckdb::vector<Value> values = {Value(14)};
auto pending_result = con.PendingQuery("SELECT COUNT(*) FROM a WHERE i=?", values, true);
REQUIRE(!pending_result->HasError());
auto result = pending_result->Execute();
REQUIRE(CHECK_COLUMN(result, 0, {1}));
}
TEST_CASE("Pending Query with Parameters", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
CreateSimpleTable(con);
CheckSimpleQuery(con);
CheckSimpleQuery(con);
}
TEST_CASE("Pending Query with Parameters Catalog Error", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
CreateSimpleTable(con);
CheckCatalogErrorQuery(con);
// Verify things are still sane
CheckSimpleQuery(con);
}
TEST_CASE("Pending Query with Parameters Type Conversion Error", "[api]") {
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
CreateSimpleTable(con);
CheckConversionErrorQuery(con);
// Verify things are still sane
CheckSimpleQuery(con);
}
TEST_CASE("Pending Query with Parameters with transactions", "[api]") {
DuckDB db(nullptr);
Connection con1(db);
Connection con2(db);
duckdb::vector<Value> empty_values = {};
con1.EnableQueryVerification();
CreateSimpleTable(con1);
// CheckConversionErrorQuery(con1);
// Begin a transaction in the PrepareAndExecute
auto pending_result1 = con1.PendingQuery("BEGIN TRANSACTION", empty_values, true);
if (pending_result1->HasError()) {
printf("%s\n", pending_result1->GetError().c_str());
}
REQUIRE(!pending_result1->HasError());
auto result1 = pending_result1->Execute();
REQUIRE(!result1->HasError());
CheckSimpleQuery(con1);
// Modify table on other connection, leaving transaction open
con2.BeginTransaction();
ModifySimpleTable(con2);
CheckSimpleQueryAfterModification(con2);
// con1 sees nothing: both transactions are open
CheckSimpleQuery(con1);
con2.Commit();
// con1 still sees nothing: its transaction was started before con2's
CheckSimpleQuery(con1);
// con 1 commits
auto pending_result2 = con1.PendingQuery("COMMIT", empty_values, true);
auto result2 = pending_result2->Execute();
REQUIRE(!result2->HasError());
// now con1 should see changes from con2
CheckSimpleQueryAfterModification(con1);
CheckSimpleQueryAfterModification(con2);
}

View File

@@ -0,0 +1,156 @@
#include "catch.hpp"
#include "duckdb/execution/physical_plan_generator.hpp"
#include "duckdb/optimizer/optimizer.hpp"
#include "duckdb/parallel/thread_context.hpp"
#include "duckdb/planner/planner.hpp"
#include "test_helpers.hpp"
#include "duckdb/parser/parser.hpp"
#include <map>
#include <set>
using namespace duckdb;
using namespace std;
static void test_helper(string sql, duckdb::vector<string> fixtures = duckdb::vector<string>()) {
DuckDB db;
Connection con(db);
for (const auto &fixture : fixtures) {
con.SendQuery(fixture);
}
Parser p;
p.ParseQuery(sql);
for (auto &statement : p.statements) {
con.context->transaction.BeginTransaction();
// Should that be the default "ToString"?
string statement_sql(statement->query.c_str() + statement->stmt_location, statement->stmt_length);
Planner planner(*con.context);
planner.CreatePlan(std::move(statement));
auto plan = std::move(planner.plan);
Optimizer optimizer(*planner.binder, *con.context);
plan = optimizer.Optimize(std::move(plan));
// LogicalOperator's copy utilizes its serialize and deserialize methods
auto new_plan = plan->Copy(*con.context);
auto optimized_plan = optimizer.Optimize(std::move(new_plan));
con.context->transaction.Commit();
}
}
static void test_helper_multi_db(string sql, duckdb::vector<string> fixtures = duckdb::vector<string>()) {
DuckDB db;
Connection con(db);
REQUIRE_NO_FAIL(con.Query("ATTACH DATABASE ':memory:' AS new_db;"));
for (const auto &fixture : fixtures) {
con.SendQuery(fixture);
}
Parser p;
p.ParseQuery(sql);
for (auto &statement : p.statements) {
con.context->transaction.BeginTransaction();
// Should that be the default "ToString"?
string statement_sql(statement->query.c_str() + statement->stmt_location, statement->stmt_length);
Planner planner(*con.context);
planner.CreatePlan(std::move(statement));
auto plan = std::move(planner.plan);
Optimizer optimizer(*planner.binder, *con.context);
plan = optimizer.Optimize(std::move(plan));
// LogicalOperator's copy utilizes its serialize and deserialize methods
auto new_plan = plan->Copy(*con.context);
auto optimized_plan = optimizer.Optimize(std::move(new_plan));
con.context->transaction.Commit();
}
}
TEST_CASE("Test logical_set", "[serialization]") {
test_helper("SET memory_limit='10GB'");
}
TEST_CASE("Test logical_show", "[serialization]") {
test_helper("SHOW SELECT 42");
}
TEST_CASE("Test logical_explain", "[serialization]") {
test_helper("EXPLAIN SELECT 42");
}
TEST_CASE("Test logical_empty_result", "[serialization]") {
test_helper("SELECT * FROM (SELECT 42) WHERE 1>2");
}
TEST_CASE("Test create_table", "[serialization]") {
test_helper("CREATE TABLE tbl (foo INTEGER)");
}
TEST_CASE("Test insert_into", "[serialization]") {
test_helper("INSERT INTO tbl VALUES(1)", {"CREATE TABLE tbl (foo INTEGER)"});
}
TEST_CASE("Test logical_delete", "[serialization]") {
test_helper("DELETE FROM tbl", {"CREATE TABLE tbl (foo INTEGER)"});
}
// TODO: only select for now
// TEST_CASE("Test logical_create_index", "[serialization]") {
// test_helper("CREATE INDEX idx ON tbl (foo)", {"CREATE TABLE tbl (foo INTEGER)"});
//}
// TODO: only select for now
// TEST_CASE("Test logical_create_schema", "[serialization]") {
// test_helper("CREATE SCHEMA test");
//}
// TODO: only select for now
// TEST_CASE("Test logical_create_view", "[serialization]") {
// test_helper("CREATE VIEW test_view AS (SELECT 42)");
//}
TEST_CASE("Test logical_update", "[serialization]") {
test_helper("UPDATE tbl SET foo=42", {"CREATE TABLE tbl (foo INTEGER)"});
}
// TODO(stephwang): revisit this later since it doesn't work yet
// TEST_CASE("Test logical_copy_to_file", "[serialization]") {
// test_helper("COPY tbl TO 'test_table.csv' ( DELIMITER '|', HEADER )", {"CREATE TABLE tbl (foo INTEGER)"});
//}
// TODO(stephwang): revisit this later since it doesn't work yet
// TEST_CASE("Test logical_prepare", "[serialization]") {
// test_helper("PREPARE v1 AS SELECT 42");
//}
TEST_CASE("Test logical_simple with DROP", "[serialization]") {
test_helper("DROP TABLE tbl", {"CREATE TABLE tbl (foo INTEGER)"});
}
TEST_CASE("Test logical_simple with ALTER", "[serialization]") {
test_helper("ALTER TABLE tbl ADD COLUMN bar INTEGER", {"CREATE TABLE tbl (foo INTEGER)"});
}
TEST_CASE("Test logical_simple with LOAD", "[serialization]") {
test_helper("LOAD foo");
}
// below test cases are oriented towards multi-databases
TEST_CASE("Test create_table with catalog", "[serialization]") {
test_helper_multi_db("CREATE TABLE new_db.main.tbl(i INTEGER);");
}
TEST_CASE("Test logical_insert with catalog", "[serialization]") {
test_helper_multi_db("INSERT INTO new_db.main.tbl VALUES(1)", {"CREATE TABLE new_db.main.tbl (foo INTEGER)"});
}
TEST_CASE("Test logical_update with catalog", "[serialization]") {
test_helper_multi_db("UPDATE new_db.main.tbl SET foo=42", {"CREATE TABLE new_db.main.tbl (foo INTEGER)"});
}

View File

@@ -0,0 +1,542 @@
#include "catch.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test prepared statements API", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
// prepare no statements
REQUIRE_FAIL(con.Prepare(""));
// PrepareAndExecute with no values
duckdb::vector<Value> values;
REQUIRE_FAIL(con.PendingQuery("", values, false));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a (i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE strings(s VARCHAR)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO strings VALUES (NULL), ('test')"));
// query using a prepared statement
// integer:
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", 12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// strings:
result = con.Query("SELECT COUNT(*) FROM strings WHERE s=$1", "test");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// multiple parameters
result = con.Query("SELECT COUNT(*) FROM a WHERE i>$1 AND i<$2", 10, 13);
REQUIRE(CHECK_COLUMN(result, 0, {2}));
// test various integer types
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", (int8_t)12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", (int16_t)12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", (int32_t)12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", (int64_t)12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// create a prepared statement and use it to query
auto prepare = con.Prepare("SELECT COUNT(*) FROM a WHERE i=$1");
result = prepare->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = prepare->Execute(13);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE(prepare->named_param_map.size() == 1);
}
TEST_CASE("Test type resolution of function with parameter expressions", "[api]") {
DuckDB db(nullptr);
Connection con(db);
duckdb::unique_ptr<QueryResult> result;
con.EnableQueryVerification();
// can deduce type of prepared parameter here
auto prepared = con.Prepare("select 1 + $1");
REQUIRE(!prepared->error.HasError());
result = prepared->Execute(1);
REQUIRE(CHECK_COLUMN(result, 0, {2}));
// no prepared statement
REQUIRE_FAIL(con.SendQuery("SELECT ?"));
}
TEST_CASE("Test prepared statements and dependencies", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db), con2(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a(i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
// query using a prepared statement in con1
result = con.Query("SELECT COUNT(*) FROM a WHERE i=$1", 12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// now delete the table in con2
REQUIRE_NO_FAIL(con2.Query("DROP TABLE a"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a(i TINYINT)"));
// keep a prepared statement around
auto prepare = con.Prepare("SELECT COUNT(*) FROM a WHERE i=$1");
// we can drop the table
REQUIRE_NO_FAIL(con2.Query("DROP TABLE a"));
// now the prepared statement fails when executing
REQUIRE_FAIL(prepare->Execute(11));
}
TEST_CASE("Dropping connection with prepared statement resets dependencies", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
auto con = make_uniq<Connection>(db);
Connection con2(db);
REQUIRE_NO_FAIL(con->Query("CREATE TABLE a(i TINYINT)"));
REQUIRE_NO_FAIL(con->Query("INSERT INTO a VALUES (11), (12), (13)"));
auto prepared = con->Prepare("SELECT COUNT(*) FROM a WHERE i=$1");
result = prepared->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
// we can drop the table
REQUIRE_NO_FAIL(con2.Query("DROP TABLE a"));
// after the table is dropped, the prepared statement no longer succeeds when run
REQUIRE_FAIL(prepared->Execute(12));
REQUIRE_FAIL(prepared->Execute(12));
}
TEST_CASE("Alter table and prepared statements", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
auto con = make_uniq<Connection>(db);
Connection con2(db);
REQUIRE_NO_FAIL(con->Query("CREATE TABLE a(i TINYINT)"));
REQUIRE_NO_FAIL(con->Query("INSERT INTO a VALUES (11), (12), (13)"));
auto prepared = con->Prepare("SELECT * FROM a WHERE i=$1");
result = prepared->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {12}));
REQUIRE(prepared->ColumnCount() == 1);
REQUIRE(prepared->GetStatementType() == StatementType::SELECT_STATEMENT);
REQUIRE(prepared->GetTypes()[0].id() == LogicalTypeId::TINYINT);
REQUIRE(prepared->GetNames()[0] == "i");
// we can alter the type of the column
REQUIRE_NO_FAIL(con2.Query("ALTER TABLE a ALTER i TYPE BIGINT USING i"));
// after the table is altered, the return types change, but the rebind is still successful
result = prepared->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {12}));
}
TEST_CASE("Test destructors of prepared statements", "[api]") {
duckdb::unique_ptr<DuckDB> db;
duckdb::unique_ptr<Connection> con;
duckdb::unique_ptr<PreparedStatement> prepare;
duckdb::unique_ptr<QueryResult> result;
// test destruction of connection
db = make_uniq<DuckDB>(nullptr);
con = make_uniq<Connection>(*db);
// create a prepared statement
prepare = con->Prepare("SELECT $1::INTEGER+$2::INTEGER");
// we can execute it
result = prepare->Execute(3, 5);
REQUIRE(CHECK_COLUMN(result, 0, {8}));
// now destroy the connection
con.reset();
// we can still use the prepared statement: the connection is alive until the prepared statement is dropped
REQUIRE_NO_FAIL(prepare->Execute(3, 5));
// destroying the prepared statement is fine
prepare.reset();
// test destruction of db
// create a connection and prepared statement again
con = make_uniq<Connection>(*db);
prepare = con->Prepare("SELECT $1::INTEGER+$2::INTEGER");
// we can execute it
result = prepare->Execute(3, 5);
REQUIRE(CHECK_COLUMN(result, 0, {8}));
// destroy the db
db.reset();
// we can still use the prepared statement
REQUIRE_NO_FAIL(prepare->Execute(3, 5));
// and the connection
REQUIRE_NO_FAIL(con->Query("SELECT 42"));
// we can also prepare new statements
prepare = con->Prepare("SELECT $1::INTEGER+$2::INTEGER");
REQUIRE(!prepare->HasError());
}
TEST_CASE("Test incorrect usage of prepared statements API", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a (i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
// this fails if there is a mismatch between number of arguments in prepare and in variadic
// too few:
REQUIRE_FAIL(con.Query("SELECT COUNT(*) FROM a WHERE i=$1 AND i>$2", 11));
// too many:
REQUIRE_FAIL(con.Query("SELECT COUNT(*) FROM a WHERE i=$1 AND i>$2", 11, 13, 17));
// prepare an SQL string with a parse error
auto prepare = con.Prepare("SELEC COUNT(*) FROM a WHERE i=$1");
// we cannot execute this prepared statement
REQUIRE(prepare->HasError());
REQUIRE_FAIL(prepare->Execute(12));
// cannot prepare multiple statements at once
prepare = con.Prepare("SELECT COUNT(*) FROM a WHERE i=$1; SELECT 42+$2;");
REQUIRE(prepare->HasError());
REQUIRE_FAIL(prepare->Execute(12));
// also not in the Query syntax
REQUIRE_FAIL(con.Query("SELECT COUNT(*) FROM a WHERE i=$1; SELECT 42+$2", 11));
}
TEST_CASE("Test multiple prepared statements", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a (i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
// test that we can have multiple open prepared statements at a time
auto prepare = con.Prepare("SELECT COUNT(*) FROM a WHERE i=$1");
auto prepare2 = con.Prepare("SELECT COUNT(*) FROM a WHERE i>$1");
result = prepare->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = prepare2->Execute(11);
REQUIRE(CHECK_COLUMN(result, 0, {2}));
}
TEST_CASE("Test prepared statements and transactions", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
// create prepared statements in a transaction
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE a (i TINYINT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO a VALUES (11), (12), (13)"));
auto prepare = con.Prepare("SELECT COUNT(*) FROM a WHERE i=$1");
auto prepare2 = con.Prepare("SELECT COUNT(*) FROM a WHERE i>$1");
result = prepare->Execute(12);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = prepare2->Execute(11);
REQUIRE(CHECK_COLUMN(result, 0, {2}));
// now if we rollback our prepared statements are invalidated
REQUIRE_NO_FAIL(con.Query("ROLLBACK"));
REQUIRE_FAIL(prepare->Execute(12));
REQUIRE_FAIL(prepare2->Execute(11));
}
TEST_CASE("Test prepared statement parameter counting", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
auto p0 = con.Prepare("SELECT 42");
REQUIRE(!p0->HasError());
REQUIRE(p0->named_param_map.empty());
auto p1 = con.Prepare("SELECT $1::int");
REQUIRE(!p1->HasError());
REQUIRE(p1->named_param_map.size() == 1);
p1 = con.Prepare("SELECT ?::int");
REQUIRE(!p1->HasError());
REQUIRE(p1->named_param_map.size() == 1);
auto p2 = con.Prepare("SELECT $1::int");
REQUIRE(!p2->HasError());
REQUIRE(p2->named_param_map.size() == 1);
auto p3 = con.Prepare("SELECT ?::int, ?::string");
REQUIRE(!p3->HasError());
REQUIRE(p3->named_param_map.size() == 2);
auto p4 = con.Prepare("SELECT $1::int, $2::string");
REQUIRE(!p4->HasError());
REQUIRE(p4->named_param_map.size() == 2);
}
TEST_CASE("Test ANALYZE", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
// ANALYZE runs without errors, note that ANALYZE is actually just ignored
REQUIRE_NO_FAIL(con.Query("ANALYZE"));
REQUIRE_NO_FAIL(con.Query("VACUUM"));
auto prep = con.Prepare("ANALYZE");
REQUIRE(!prep->HasError());
auto res = prep->Execute();
REQUIRE(!res->HasError());
prep = con.Prepare("VACUUM");
REQUIRE(!prep->HasError());
res = prep->Execute();
REQUIRE(!res->HasError());
}
TEST_CASE("Test DECIMAL with PreparedStatement", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
auto ps = con.Prepare("SELECT $1::DECIMAL(4,1), $2::DECIMAL(9,1), $3::DECIMAL(18,3), $4::DECIMAL(38,8)");
result = ps->Execute(1.1, 100.1, 1401.123, "12481204981084098124.12398");
REQUIRE(CHECK_COLUMN(result, 0, {1.1}));
REQUIRE(CHECK_COLUMN(result, 1, {100.1}));
REQUIRE(CHECK_COLUMN(result, 2, {1401.123}));
REQUIRE(CHECK_COLUMN(result, 3, {12481204981084098124.12398}));
}
TEST_CASE("Test BLOB with PreparedStatement", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
// Creating a blob buffer with almost ALL ASCII chars
uint8_t num_chars = 256 - 5; // skipping: '\0', '\n', '\15', ',', '\32'
auto blob_chars = make_unsafe_uniq_array<char>(num_chars);
char ch = '\0';
idx_t buf_idx = 0;
for (idx_t i = 0; i < 255; ++i, ++ch) {
// skip chars: '\0', new line, shift in, comma, and crtl+Z
if (ch == '\0' || ch == '\n' || ch == '\15' || ch == ',' || ch == '\32') {
continue;
}
blob_chars[buf_idx] = ch;
++buf_idx;
}
REQUIRE_NO_FAIL(con.Query("CREATE TABLE blobs (b BYTEA);"));
// Insert blob values through a PreparedStatement
Value blob_val = Value::BLOB(const_data_ptr_cast(blob_chars.get()), num_chars);
duckdb::unique_ptr<PreparedStatement> ps = con.Prepare("INSERT INTO blobs VALUES (?::BYTEA)");
ps->Execute(blob_val);
REQUIRE(!ps->HasError());
ps.reset();
// Testing if the bytes are stored correctly
result = con.Query("SELECT OCTET_LENGTH(b) FROM blobs");
REQUIRE(CHECK_COLUMN(result, 0, {num_chars}));
result = con.Query("SELECT count(b) FROM blobs");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
result = con.Query("SELECT b FROM blobs");
REQUIRE(CHECK_COLUMN(result, 0, {blob_val}));
blob_chars.reset();
}
TEST_CASE("PREPARE for INSERT with dates", "[prepared]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
// prepared DATE insert
REQUIRE_NO_FAIL(con.Query("CREATE TABLE dates(d DATE)"));
REQUIRE_NO_FAIL(con.Query("PREPARE s1 AS INSERT INTO dates VALUES ($1)"));
REQUIRE_NO_FAIL(con.Query("EXECUTE s1 (DATE '1992-01-01')"));
result = con.Query("SELECT * FROM dates");
REQUIRE(CHECK_COLUMN(result, 0, {Value::DATE(1992, 1, 1)}));
REQUIRE_NO_FAIL(con.Query("DELETE FROM dates"));
auto prepared = con.Prepare("INSERT INTO dates VALUES ($1)");
REQUIRE_NO_FAIL(prepared->Execute(Value::DATE(1992, 1, 3)));
result = con.Query("SELECT * FROM dates");
REQUIRE(CHECK_COLUMN(result, 0, {Value::DATE(1992, 1, 3)}));
}
TEST_CASE("PREPARE multiple statements", "[prepared]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
string query = "SELECT $1::INTEGER; SELECT $1::INTEGER;";
// cannot prepare multiple statements like this
auto prepared = con.Prepare(query);
REQUIRE(prepared->HasError());
// we can use ExtractStatements to execute the individual statements though
auto statements = con.ExtractStatements(query);
for (auto &statement : statements) {
string stmt = query.substr(statement->stmt_location, statement->stmt_length);
prepared = con.Prepare(stmt);
REQUIRE(!prepared->HasError());
result = prepared->Execute(1);
REQUIRE(CHECK_COLUMN(result, 0, {1}));
}
}
static duckdb::unique_ptr<QueryResult> TestExecutePrepared(Connection &con, string query) {
auto prepared = con.Prepare(query);
REQUIRE(!prepared->HasError());
return prepared->Execute();
}
TEST_CASE("Prepare all types of statements", "[prepared]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
auto &fs = db.GetFileSystem();
string csv_path = TestCreatePath("prepared_files");
if (fs.DirectoryExists(csv_path)) {
fs.RemoveDirectory(csv_path);
}
// TRANSACTION
REQUIRE_NO_FAIL(TestExecutePrepared(con, "BEGIN TRANSACTION"));
// SELECT
result = TestExecutePrepared(con, "SELECT 42");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
// CREATE_SCHEMA
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE SCHEMA test"));
// CREATE_TABLE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE TABLE test.a(i INTEGER)"));
// CREATE_TABLE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE TABLE b(i INTEGER)"));
// CREATE_INDEX
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE INDEX i_index ON test.a(i)"));
// CREATE_VIEW
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE VIEW v1 AS SELECT * FROM test.a WHERE i=2"));
// CREATE_SEQUENCE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "CREATE SEQUENCE seq"));
// PRAGMA
REQUIRE_NO_FAIL(TestExecutePrepared(con, "PRAGMA table_info('b')"));
// EXPLAIN
REQUIRE_NO_FAIL(TestExecutePrepared(con, "EXPLAIN SELECT 42"));
// COPY
REQUIRE_NO_FAIL(TestExecutePrepared(con, "COPY test.a TO '" + csv_path + "'"));
// INSERT
REQUIRE_NO_FAIL(TestExecutePrepared(con, "INSERT INTO test.a VALUES (1), (2), (3)"));
// UPDATE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "UPDATE test.a SET i=i+1"));
// DELETE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "DELETE FROM test.a WHERE i<4"));
// PREPARE
REQUIRE_NO_FAIL(TestExecutePrepared(con, "PREPARE p1 AS SELECT * FROM test.a"));
// EXECUTE
result = TestExecutePrepared(con, "EXECUTE p1");
REQUIRE(CHECK_COLUMN(result, 0, {4}));
// DROP
REQUIRE_NO_FAIL(TestExecutePrepared(con, "DROP SEQUENCE seq"));
REQUIRE_NO_FAIL(TestExecutePrepared(con, "DROP VIEW v1"));
REQUIRE_NO_FAIL(TestExecutePrepared(con, "DROP TABLE test.a CASCADE"));
REQUIRE_NO_FAIL(TestExecutePrepared(con, "DROP SCHEMA test CASCADE"));
// TRANSACTION
REQUIRE_NO_FAIL(TestExecutePrepared(con, "COMMIT"));
}
TEST_CASE("Test ambiguous prepared statement parameter types", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
result = con.Query("SELECT ?", 42);
REQUIRE(CHECK_COLUMN(result, 0, {42}));
result = con.Query("SELECT ?", "hello");
REQUIRE(CHECK_COLUMN(result, 0, {"hello"}));
auto prep = con.Prepare("SELECT ?");
result = prep->Execute(42);
REQUIRE(CHECK_COLUMN(result, 0, {42}));
result = prep->Execute("hello");
REQUIRE(CHECK_COLUMN(result, 0, {"hello"}));
}
TEST_CASE("Test prepared statements with SET", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
// create a prepared statement and use it to query
auto prepare = con.Prepare("SET default_null_order=$1");
REQUIRE(prepare->success);
// too many parameters
REQUIRE_FAIL(prepare->Execute("xxx", "yyy"));
// too few parameters
REQUIRE_FAIL(prepare->Execute());
// unsupported setting
REQUIRE_FAIL(prepare->Execute("unsupported_mode"));
// this works
REQUIRE_NO_FAIL(prepare->Execute("NULLS FIRST"));
}
TEST_CASE("Test prepared statements that require rebind", "[api]") {
DuckDB db(nullptr);
Connection con1(db);
con1.EnableQueryVerification();
auto prepared = con1.Prepare("DROP TABLE IF EXISTS t1");
Connection con2(db);
REQUIRE_NO_FAIL(con2.Query("CREATE OR REPLACE TABLE t1 (c1 varchar)"));
REQUIRE_NO_FAIL(prepared->Execute());
}
class TestExtensionState : public ClientContextState {
public:
bool CanRequestRebind() override {
return true;
}
};
TEST_CASE("Test prepared statements with extension that can request a rebind", "[api]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE OR REPLACE TABLE t1 (c1 INTEGER)"));
// https://github.com/duckdb/duckdb/pull/11096
con.context->registered_state->Insert("test_extension", make_shared_ptr<TestExtensionState>());
// SelectStatement
REQUIRE_NO_FAIL(con.Prepare("SELECT ?")->Execute(42));
// InsertStatement
REQUIRE_NO_FAIL(con.Prepare("INSERT INTO t1 VALUES(?)")->Execute(42));
// UpdateStatement
REQUIRE_NO_FAIL(con.Prepare("UPDATE t1 SET c1 = ?")->Execute(43));
// SetVariableStatement
REQUIRE_NO_FAIL(con.Prepare("SET VARIABLE test_var = ?")->Execute(42));
}

View File

@@ -0,0 +1,294 @@
#ifndef DUCKDB_NO_THREADS
#include "catch.hpp"
#include "duckdb/common/progress_bar/progress_bar.hpp"
#include "duckdb/main/client_context.hpp"
#include "test_helpers.hpp"
#include <duckdb/execution/executor.hpp>
#include <future>
#include <thread>
using namespace duckdb;
using namespace std;
class TestProgressBar {
class TestFailure {
using failure_callback = std::function<void()>;
public:
TestFailure() : callback(nullptr) {
}
public:
bool IsSet() {
return callback != nullptr;
}
void SetError(failure_callback failure) {
if (!callback) {
callback = failure;
}
}
void ThrowError() {
D_ASSERT(IsSet());
callback();
}
private:
failure_callback callback;
};
public:
explicit TestProgressBar(ClientContext *context) : context(context) {
}
ClientContext *context;
atomic<bool> stop;
std::thread check_thread;
TestFailure error;
void CheckProgressThread() {
double prev_percentage = -1;
uint64_t total_cardinality = 0;
uint64_t cur_rows_read = 0;
while (!stop) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
auto query_progress = context->GetQueryProgress();
double new_percentage = query_progress.GetPercentage();
if (new_percentage < prev_percentage && new_percentage != -1) {
error.SetError([new_percentage, prev_percentage]() { REQUIRE(new_percentage >= prev_percentage); });
}
if (new_percentage > 100) {
error.SetError([new_percentage]() { REQUIRE(new_percentage <= 100); });
}
cur_rows_read = query_progress.GetRowsProcesseed();
total_cardinality = query_progress.GetTotalRowsToProcess();
if (cur_rows_read > total_cardinality) {
error.SetError([cur_rows_read, total_cardinality]() { REQUIRE(cur_rows_read <= total_cardinality); });
}
}
if (cur_rows_read != total_cardinality) {
if (std::getenv("FORCE_ASYNC_SINK_SOURCE") != nullptr) {
return;
}
error.SetError([cur_rows_read, total_cardinality]() { REQUIRE(cur_rows_read == total_cardinality); });
}
}
void Start() {
stop = false;
check_thread = std::thread(&TestProgressBar::CheckProgressThread, this);
}
void End() {
stop = true;
check_thread.join();
if (error.IsSet()) {
error.ThrowError();
// This should never be reached, ThrowError() should contain a failing REQUIRE statement
REQUIRE(false);
}
}
};
TEST_CASE("Test Progress Bar Fast", "[progress-bar]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NOTHROW(con.context->GetQueryProgress());
TestProgressBar test_progress(con.context.get());
REQUIRE_NOTHROW(con.context->GetQueryProgress());
REQUIRE_NO_FAIL(con.Query("create table tbl as select range a, mod(range,10) b from range(10000);"));
REQUIRE_NO_FAIL(con.Query("create table tbl_2 as select range a from range(10000);"));
REQUIRE_NO_FAIL(con.Query("PRAGMA progress_bar_time=10"));
REQUIRE_NO_FAIL(con.Query("PRAGMA disable_print_progress_bar"));
// Simple Aggregation
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl"));
test_progress.End();
// Simple Join
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)"));
test_progress.End();
// Subquery
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(a) from tbl_2)"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(b) from tbl)"));
test_progress.End();
// Stream result
test_progress.Start();
auto result = con.SendQuery("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)");
test_progress.End();
REQUIRE_NO_FAIL(*result);
// Test Multiple threads
REQUIRE_NO_FAIL(con.Query("PRAGMA threads=2"));
REQUIRE_NO_FAIL(con.Query("PRAGMA verify_parallelism"));
// Simple Aggregation
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl"));
test_progress.End();
// Simple Join
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)"));
test_progress.End();
// Subquery
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(a) from tbl_2)"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(b) from tbl)"));
test_progress.End();
// Stream result
test_progress.Start();
result = con.SendQuery("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)");
test_progress.End();
REQUIRE_NO_FAIL(*result);
}
TEST_CASE("Test Progress Bar", "[progress-bar][.]") {
DuckDB db(nullptr);
Connection con(db);
TestProgressBar test_progress(con.context.get());
REQUIRE_NO_FAIL(con.Query("create table tbl as select range a, mod(range,10) b from range(10000000);"));
REQUIRE_NO_FAIL(con.Query("create table tbl_2 as select range a from range(10000000);"));
REQUIRE_NO_FAIL(con.Query("PRAGMA progress_bar_time=10"));
REQUIRE_NO_FAIL(con.Query("PRAGMA disable_print_progress_bar"));
// Simple Aggregation
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl"));
test_progress.End();
// Simple Join
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)"));
test_progress.End();
// Subquery
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(a) from tbl_2)"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(b) from tbl)"));
test_progress.End();
// Stream result
test_progress.Start();
auto result = con.SendQuery("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)");
test_progress.End();
REQUIRE_NO_FAIL(*result);
// Test Multiple threads
REQUIRE_NO_FAIL(con.Query("PRAGMA threads=4"));
REQUIRE_NO_FAIL(con.Query("PRAGMA verify_parallelism"));
// Simple Aggregation
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl"));
test_progress.End();
// Simple Join
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)"));
test_progress.End();
// Subquery
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(a) from tbl_2)"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("select count(*) from tbl where a = (select min(b) from tbl)"));
test_progress.End();
// Stream result
test_progress.Start();
result = con.SendQuery("select count(*) from tbl inner join tbl_2 on (tbl.a = tbl_2.a)");
test_progress.End();
REQUIRE_NO_FAIL(*result);
}
TEST_CASE("Test Progress Bar CSV", "[progress-bar][.]") {
DuckDB db(nullptr);
Connection con(db);
TestProgressBar test_progress(con.context.get());
REQUIRE_NO_FAIL(con.Query("PRAGMA progress_bar_time=1"));
REQUIRE_NO_FAIL(con.Query("PRAGMA disable_print_progress_bar"));
// Create Tables From CSVs
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/test/test.csv')"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(
con.Query("CREATE TABLE test_2 AS SELECT * FROM read_csv('data/csv/test/test.csv', columns=STRUCT_PACK(a "
":= 'INTEGER', b := 'INTEGER', c := 'VARCHAR'), sep=',', auto_detect='false')"));
test_progress.End();
// Insert into existing tables
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("INSERT INTO test SELECT * FROM read_csv_auto('data/csv/test/test.csv')"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(
con.Query("INSERT INTO test SELECT * FROM read_csv('data/csv/test/test.csv', columns=STRUCT_PACK(a := "
"'INTEGER', b := 'INTEGER', c := 'VARCHAR'), sep=',', auto_detect='false')"));
test_progress.End();
// copy from
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("COPY test FROM 'data/csv/test/test.csv'"));
test_progress.End();
// Repeat but in parallel
REQUIRE_NO_FAIL(con.Query("DROP TABLE test"));
REQUIRE_NO_FAIL(con.Query("DROP TABLE test_2"));
// Test Multiple threads
REQUIRE_NO_FAIL(con.Query("PRAGMA threads=4"));
REQUIRE_NO_FAIL(con.Query("PRAGMA verify_parallelism"));
// Create Tables From CSVs
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test AS SELECT * FROM read_csv_auto ('data/csv/test/test.csv')"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(
con.Query("CREATE TABLE test_2 AS SELECT * FROM read_csv('data/csv/test/test.csv', columns=STRUCT_PACK(a "
":= 'INTEGER', b := 'INTEGER', c := 'VARCHAR'), sep=',', auto_detect='false')"));
test_progress.End();
// Insert into existing tables
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("INSERT INTO test SELECT * FROM read_csv_auto('data/csv/test/test.csv')"));
test_progress.End();
test_progress.Start();
REQUIRE_NO_FAIL(
con.Query("INSERT INTO test SELECT * FROM read_csv('data/csv/test/test.csv', columns=STRUCT_PACK(a := "
"'INTEGER', b := 'INTEGER', c := 'VARCHAR'), sep=',', auto_detect='false')"));
test_progress.End();
// copy from
test_progress.Start();
REQUIRE_NO_FAIL(con.Query("COPY test FROM 'data/csv/test/test.csv'"));
test_progress.End();
}
#endif

View File

@@ -0,0 +1,27 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include <iostream>
using namespace duckdb;
using namespace std;
TEST_CASE("Test query profiler", "[api]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
string output;
con.EnableQueryVerification();
con.EnableProfiling();
// don't pollute the console with profiler info.
con.context->config.emit_profiler_output = false;
REQUIRE_NO_FAIL(con.Query("SELECT * FROM (SELECT 42) tbl1, (SELECT 33) tbl2"));
output = con.GetProfilingInformation();
REQUIRE(output.size() > 0);
output = con.GetProfilingInformation(ProfilerPrintFormat::JSON);
REQUIRE(output.size() > 0);
}

View File

@@ -0,0 +1,129 @@
#include "catch.hpp"
#include "duckdb/common/file_system.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test connection using a read only database", "[readonly]") {
auto dbdir = TestCreatePath("read_only_test");
duckdb::unique_ptr<DuckDB> db, db2;
duckdb::unique_ptr<Connection> con;
// make sure the database does not exist
DeleteDatabase(dbdir);
DBConfig readonly_config;
readonly_config.options.use_temporary_directory = false;
readonly_config.options.access_mode = AccessMode::READ_ONLY;
// cannot create read-only memory database
REQUIRE_THROWS(db = make_uniq<DuckDB>(nullptr, &readonly_config));
// cannot create a read-only database in a new directory
REQUIRE_THROWS(db = make_uniq<DuckDB>(dbdir, &readonly_config));
// create the database file and initialize it with data
db = make_uniq<DuckDB>(dbdir);
con = make_uniq<Connection>(*db);
REQUIRE_NO_FAIL(con->Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(con->Query("INSERT INTO integers VALUES (1), (2), (3), (4), (5)"));
con.reset();
db.reset();
// now connect in read-only mode
REQUIRE_NOTHROW(db = make_uniq<DuckDB>(dbdir, &readonly_config));
con = make_uniq<Connection>(*db);
// we can query the database
auto result = con->Query("SELECT * FROM integers ORDER BY i");
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3, 4, 5}));
// however, we can't perform DDL statements
REQUIRE_FAIL(con->Query("CREATE TABLE integers2(i INTEGER)"));
REQUIRE_FAIL(con->Query("ALTER TABLE integers RENAME COLUMN i TO k"));
REQUIRE_FAIL(con->Query("DROP TABLE integers"));
REQUIRE_FAIL(con->Query("CREATE SEQUENCE seq"));
REQUIRE_FAIL(con->Query("CREATE VIEW v1 AS SELECT * FROM integers"));
// neither can we insert/update/delete data
REQUIRE_FAIL(con->Query("INSERT INTO integers VALUES (3)"));
REQUIRE_FAIL(con->Query("UPDATE integers SET i=5"));
REQUIRE_FAIL(con->Query("DELETE FROM integers"));
// we can run explain queries
REQUIRE_NO_FAIL(con->Query("EXPLAIN SELECT * FROM integers"));
// and run prepared statements
REQUIRE_NO_FAIL(con->Query("PREPARE v1 AS SELECT * FROM integers"));
REQUIRE_NO_FAIL(con->Query("EXECUTE v1"));
REQUIRE_NO_FAIL(con->Query("DEALLOCATE v1"));
// we can also prepare a DDL/update statement
REQUIRE_NO_FAIL(con->Query("PREPARE v1 AS INSERT INTO integers VALUES ($1)"));
// however, executing it fails then!
REQUIRE_FAIL(con->Query("EXECUTE v1(3)"));
// we can create, alter and query temporary tables however
REQUIRE_NO_FAIL(con->Query("CREATE TEMPORARY TABLE integers2(i INTEGER)"));
REQUIRE_NO_FAIL(con->Query("INSERT INTO integers2 VALUES (1), (2), (3), (4), (5)"));
REQUIRE_NO_FAIL(con->Query("UPDATE integers2 SET i=i+1"));
result = con->Query("DELETE FROM integers2 WHERE i=3");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE_NO_FAIL(con->Query("ALTER TABLE integers2 RENAME COLUMN i TO k"));
result = con->Query("SELECT k FROM integers2 ORDER BY 1");
REQUIRE(CHECK_COLUMN(result, 0, {2, 4, 5, 6}));
REQUIRE_NO_FAIL(con->Query("DROP TABLE integers2"));
// also temporary views and sequences
REQUIRE_NO_FAIL(con->Query("CREATE TEMPORARY SEQUENCE seq"));
result = con->Query("SELECT nextval('seq')");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE_NO_FAIL(con->Query("DROP SEQUENCE seq"));
REQUIRE_NO_FAIL(con->Query("CREATE TEMPORARY VIEW v1 AS SELECT 42"));
result = con->Query("SELECT * FROM v1");
REQUIRE(CHECK_COLUMN(result, 0, {42}));
REQUIRE_NO_FAIL(con->Query("DROP VIEW v1"));
con.reset();
db.reset();
// FIXME: these tests currently don't work as we don't do any locking of the database directory
// this should be fixed with the new storage
// we can connect multiple read only databases to the same dbdir
// REQUIRE_NOTHROW(db = make_uniq<DuckDB>(dbdir, true));
// REQUIRE_NOTHROW(db2 = make_uniq<DuckDB>(dbdir, true));
// db.reset();
// db2.reset();
// // however, if there is read-only database, we can't connect a read-write database
// REQUIRE_NOTHROW(db = make_uniq<DuckDB>(dbdir, true));
// REQUIRE_THROWS(db2 = make_uniq<DuckDB>(dbdir));
// db.reset();
// db2.reset();
// // if we add a read-write database first, we can't add a reading database afterwards either
// REQUIRE_NOTHROW(db = make_uniq<DuckDB>(dbdir));
// REQUIRE_THROWS(db2 = make_uniq<DuckDB>(dbdir, true));
// db.reset();
// db2.reset();
DeleteDatabase(dbdir);
}
TEST_CASE("Test view creation using a read only database", "[readonly]") {
auto dbdir = TestCreatePath("read_only_view_test");
duckdb::unique_ptr<DuckDB> db;
duckdb::unique_ptr<Connection> con;
// make sure the database does not exist
DeleteDatabase(dbdir);
DBConfig readonly_config;
readonly_config.options.use_temporary_directory = false;
readonly_config.options.access_mode = AccessMode::READ_ONLY;
// create db in first place
{ auto db_rw = DuckDB(dbdir); }
db = make_uniq<DuckDB>(dbdir, &readonly_config);
// create the database file and initialize it with data
con = make_uniq<Connection>(*db);
REQUIRE_NOTHROW(con->TableFunction("duckdb_tables")->CreateView("boo", true, true));
con.reset();
db.reset();
DeleteDatabase(dbdir);
}

File diff suppressed because it is too large Load Diff

292
external/duckdb/test/api/test_reset.cpp vendored Normal file
View File

@@ -0,0 +1,292 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include <iostream>
#include <map>
#include <set>
using namespace duckdb;
using namespace std;
struct OptionValuePair {
OptionValuePair() {
}
OptionValuePair(Value val) : input(val), output(val) {
}
OptionValuePair(Value input, Value output) : input(std::move(input)), output(std::move(output)) {
}
Value input;
Value output;
};
struct OptionValueSet {
OptionValueSet() {
}
OptionValueSet(Value val) {
pairs.emplace_back(std::move(val));
}
OptionValueSet(Value input, Value output) {
pairs.emplace_back(std::move(input), std::move(output));
}
OptionValueSet(duckdb::vector<std::string> pairs_p) {
for (auto &pair : pairs_p) {
pairs.emplace_back(pair);
}
}
OptionValueSet(duckdb::vector<OptionValuePair> pairs_p) : pairs(std::move(pairs_p)) {
}
duckdb::vector<OptionValuePair> pairs;
};
void RequireValueEqual(const string &option, const Value &left, const Value &right, int line);
#define REQUIRE_VALUE_EQUAL(option, lhs, rhs) RequireValueEqual(option, lhs, rhs, __LINE__)
OptionValueSet GetValueForOption(const string &name, const LogicalType &type) {
static unordered_map<string, OptionValueSet> value_map = {
{"threads", {Value::BIGINT(42), Value::BIGINT(42)}},
{"checkpoint_threshold", {"4.0 GiB"}},
{"debug_checkpoint_abort", {{"none", "before_truncate", "before_header", "after_free_list_write"}}},
{"default_collation", {"nocase"}},
{"default_order", {"DESC"}},
{"default_null_order", {"NULLS_FIRST"}},
{"disabled_compression_methods", {"RLE"}},
{"disabled_optimizers", {"extension"}},
{"debug_force_external", {Value(true)}},
{"old_implicit_casting", {Value(true)}},
{"prefer_range_joins", {Value(true)}},
{"allow_persistent_secrets", {Value(false)}},
{"secret_directory", {"/tmp/some/path"}},
{"default_secret_storage", {"custom_storage"}},
{"custom_extension_repository", {"duckdb.org/no-extensions-here", "duckdb.org/no-extensions-here"}},
{"autoinstall_extension_repository", {"duckdb.org/no-extensions-here", "duckdb.org/no-extensions-here"}},
{"lambda_syntax", {EnumUtil::ToString(LambdaSyntax::DISABLE_SINGLE_ARROW)}},
{"allow_parser_override_extension", {"fallback"}},
{"profiling_coverage", {EnumUtil::ToString(ProfilingCoverage::ALL)}},
#ifdef DUCKDB_EXTENSION_AUTOLOAD_DEFAULT
{"autoload_known_extensions", {!DUCKDB_EXTENSION_AUTOLOAD_DEFAULT}},
#else
{"autoload_known_extensions", {true}},
#endif
#ifdef DUCKDB_EXTENSION_AUTOINSTALL_DEFAULT
{"autoinstall_known_extensions", {!DUCKDB_EXTENSION_AUTOINSTALL_DEFAULT}},
#else
{"autoinstall_known_extensions", {true}},
#endif
{"enable_profiling", {"json"}},
{"explain_output", {{"all", "optimized_only", "physical_only"}}},
{"file_search_path", {"test"}},
{"force_compression", {"uncompressed", "Uncompressed"}},
{"home_directory", {"test"}},
{"allow_extensions_metadata_mismatch", {"true"}},
{"extension_directory", {"test"}},
{"max_expression_depth", {50}},
{"max_memory", {"4.0 GiB"}},
{"max_temp_directory_size", {"10.0 GiB"}},
{"merge_join_threshold", {73}},
{"nested_loop_join_threshold", {73}},
{"memory_limit", {"4.0 GiB"}},
{"storage_compatibility_version", {"v0.10.0"}},
{"ordered_aggregate_threshold", {Value::UBIGINT(idx_t(1) << 12)}},
{"null_order", {"NULLS_FIRST"}},
{"debug_verify_vector", {"dictionary_expression"}},
{"perfect_ht_threshold", {0}},
{"pivot_filter_threshold", {999}},
{"pivot_limit", {999}},
{"partitioned_write_flush_threshold", {123}},
{"preserve_identifier_case", {false}},
{"preserve_insertion_order", {false}},
{"profile_output", {"test"}},
{"profiling_mode", {"detailed"}},
{"disabled_log_types", {"blabla"}},
{"enabled_log_types", {"blabla"}},
{"enabled_log_types", {"blabla"}},
{"enable_logging", {true}},
{"logging_mode", {"ENABLE_SELECTED"}},
{"logging_level", {"FATAL"}},
{"logging_storage", {"stdout"}},
{"enable_progress_bar_print", {false}},
{"scalar_subquery_error_on_multiple_rows", {false}},
{"ieee_floating_point_ops", {false}},
{"progress_bar_time", {0}},
{"temp_directory", {"tmp"}},
{"wal_autocheckpoint", {"4.0 GiB"}},
{"force_bitpacking_mode", {"constant"}},
{"enable_http_logging", {false}},
{"http_proxy", {"localhost:80"}},
{"http_proxy_username", {"john"}},
{"http_proxy_password", {"doe"}},
{"allocator_flush_threshold", {"4.0 GiB"}},
{"allocator_bulk_deallocation_flush_threshold", {"4.0 GiB"}},
{"arrow_output_version", {"1.5"}},
{"enable_external_file_cache", {false}},
{"experimental_metadata_reuse", {false}},
{"storage_block_prefetch", {"always_prefetch"}},
{"pin_threads", {"off"}}};
// Every option that's not excluded has to be part of this map
if (!value_map.count(name)) {
switch (type.id()) {
case LogicalTypeId::BOOLEAN:
return OptionValueSet(Value::BOOLEAN(true));
case LogicalTypeId::TINYINT:
case LogicalTypeId::SMALLINT:
case LogicalTypeId::INTEGER:
case LogicalTypeId::BIGINT:
case LogicalTypeId::UTINYINT:
case LogicalTypeId::USMALLINT:
case LogicalTypeId::UINTEGER:
case LogicalTypeId::UBIGINT:
return OptionValueSet(Value::Numeric(type, 42));
default:
break;
}
REQUIRE(name == "MISSING_FROM_MAP");
}
return value_map[name];
}
bool OptionIsExcludedFromTest(const string &name) {
static unordered_set<string> excluded_options = {
"access_mode",
"allowed_directories",
"allowed_paths",
"schema",
"search_path",
"debug_window_mode",
"experimental_parallel_csv",
"lock_configuration", // cant change this while db is running
"disabled_filesystems", // cant change this while db is running
"enable_external_access", // cant change this while db is running
"allow_unsigned_extensions", // cant change this while db is running
"allow_community_extensions", // cant change this while db is running
"allow_unredacted_secrets", // cant change this while db is running
"disable_database_invalidation", // cant change this while db is running
"temp_file_encryption",
"enable_object_cache",
"streaming_buffer_size",
"log_query_path",
"password",
"username",
"user",
"external_threads", // tested in test_threads.cpp
"profiling_output", // just an alias
"duckdb_api",
"custom_user_agent",
"custom_profiling_settings",
"custom_user_agent",
"default_block_size",
"index_scan_percentage",
"scheduler_process_partial",
"http_logging_output",
"enable_profiling",
"enable_progress_bar",
"enable_progress_bar_print",
"progress_bar_time",
"index_scan_max_count",
"profiling_mode"};
return excluded_options.count(name) == 1;
}
bool ValueEqual(const Value &left, const Value &right) {
return Value::NotDistinctFrom(left, right);
}
void RequireValueEqual(const string &option_name, const Value &left, const Value &right, int line) {
if (ValueEqual(left, right)) {
return;
}
auto error = StringUtil::Format("\nLINE[%d] (Option:%s) | Expected left:'%s' and right:'%s' to be equal", line,
option_name, left.ToString(), right.ToString());
cerr << error << endl;
REQUIRE(false);
}
Value GetValueForSetting(Connection &con, const string &name, const LogicalType &type) {
string new_value;
auto result = con.Query(StringUtil::Format("SELECT value FROM duckdb_settings() WHERE name = %s", SQLString(name)));
for (auto &row : *result) {
new_value = row.GetValue<string>(0);
}
return Value(new_value).CastAs(*con.context, type);
}
//! New options should be added to the value_map in GetValueForOption
//! Or added to the 'excluded_options' in OptionIsExcludedFromTest
TEST_CASE("Test RESET statement for ClientConfig options", "[api]") {
// Create a connection
DBConfig config;
config.options.load_extensions = false;
DuckDB db(nullptr, &config);
Connection con(db);
con.Query("BEGIN TRANSACTION");
con.Query("PRAGMA disable_profiling");
struct ResetSettingOption {
string name;
Value value;
LogicalType type;
};
duckdb::vector<ResetSettingOption> options;
auto result = con.Query("SELECT name, value, input_type FROM duckdb_settings()");
for (auto row : *result) {
ResetSettingOption option;
option.name = row.GetValue<string>(0);
option.type = DBConfig::ParseLogicalType(row.GetValue<string>(2));
if (row.IsNull(1)) {
option.value = Value(option.type);
} else {
Value str_val = Value(row.GetValue<string>(1));
option.value = str_val.CastAs(*con.context, option.type);
}
if (OptionIsExcludedFromTest(option.name)) {
continue;
}
options.push_back(std::move(option));
}
for (auto &option : options) {
auto value_set = GetValueForOption(option.name, option.type);
// verify that at least one value is different
bool any_different = false;
string options;
for (auto &value_pair : value_set.pairs) {
if (!ValueEqual(option.value, value_pair.output)) {
any_different = true;
} else {
if (!options.empty()) {
options += ", ";
}
options += value_pair.output.ToString();
}
}
if (!any_different) {
auto error = StringUtil::Format(
"\n(Option:%s) | Expected original value '%s' and provided option '%s' to be different", option.name,
option.value.ToString(), options);
cerr << error << endl;
REQUIRE(false);
}
auto original_value = GetValueForSetting(con, option.name, option.type);
for (auto &value_pair : value_set.pairs) {
// Get the new value for the option
auto input = value_pair.input.CastAs(*con.context, option.type);
// Set the new option
REQUIRE_NO_FAIL(con.Query(StringUtil::Format("SET %s = %s", option.name, input.ToSQLString())));
auto changed_value = GetValueForSetting(con, option.name, option.type);
// Get the value of the option again
REQUIRE_VALUE_EQUAL(option.name, changed_value, value_pair.output);
// reset the option again
REQUIRE_NO_FAIL(con.Query(StringUtil::Format("RESET %s", option.name)));
auto reset_value = GetValueForSetting(con, option.name, option.type);
// Get the reset value of the option
REQUIRE_VALUE_EQUAL(option.name, reset_value, original_value);
}
}
}

View File

@@ -0,0 +1,225 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/common/types/date.hpp"
#include "duckdb/common/types/time.hpp"
#include "duckdb/common/types/timestamp.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test results API", "[api]") {
DuckDB db(nullptr);
Connection con(db);
// result equality
auto result = con.Query("SELECT 42");
auto result2 = con.Query("SELECT 42");
REQUIRE(result->Equals(*result2));
// result inequality
result = con.Query("SELECT 42");
result2 = con.Query("SELECT 43");
REQUIRE(!result->Equals(*result2));
// stream query to string
auto stream_result = con.SendQuery("SELECT 42");
auto str = stream_result->ToString();
REQUIRE(!str.empty());
// materialized query to string
result = con.Query("SELECT 42");
str = result->ToString();
REQUIRE(!str.empty());
// error to string
result = con.Query("SELEC 42");
str = result->ToString();
REQUIRE(!str.empty());
}
TEST_CASE("Test iterating over results", "[api]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE data(i INTEGER, j VARCHAR)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO data VALUES (1, 'hello'), (2, 'test')"));
duckdb::vector<int> i_values = {1, 2};
duckdb::vector<string> j_values = {"hello", "test"};
idx_t row_count = 0;
auto result = con.Query("SELECT * FROM data;");
for (auto &row : *result) {
REQUIRE(row.GetValue<int>(0) == i_values[row.row]);
REQUIRE(row.GetValue<string>(1) == j_values[row.row]);
row_count++;
}
REQUIRE(row_count == 2);
}
TEST_CASE("Test different result types", "[api]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(
con.Query("CREATE TABLE data(i INTEGER, j VARCHAR, k DECIMAL(38,1), l DECIMAL(18,3), m HUGEINT, n DOUBLE)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO data VALUES (23, '17.1', 94289, 9842, 4982412, 17.3)"));
idx_t row_count = 0;
auto result = con.Query("SELECT * FROM data;");
for (auto &row : *result) {
REQUIRE(row.GetValue<int>(0) == 23);
REQUIRE(row.GetValue<int64_t>(0) == 23);
REQUIRE(row.GetValue<double>(0) == 23);
REQUIRE(row.GetValue<string>(0) == "23");
REQUIRE(row.GetValue<int>(1) == 17);
REQUIRE(row.GetValue<int64_t>(1) == 17);
REQUIRE(row.GetValue<double>(1) == 17.1);
REQUIRE(row.GetValue<string>(1) == "17.1");
REQUIRE(row.GetValue<int>(2) == 94289);
REQUIRE(row.GetValue<int64_t>(2) == 94289);
REQUIRE(row.GetValue<double>(2) == 94289);
REQUIRE(row.GetValue<int>(3) == 9842);
REQUIRE(row.GetValue<int64_t>(3) == 9842);
REQUIRE(row.GetValue<double>(3) == 9842);
REQUIRE(row.GetValue<int>(4) == 4982412);
REQUIRE(row.GetValue<int64_t>(4) == 4982412);
REQUIRE(row.GetValue<double>(4) == 4982412);
REQUIRE(row.GetValue<string>(4) == "4982412");
REQUIRE(row.GetValue<int>(5) == 17);
REQUIRE(row.GetValue<int64_t>(5) == 17);
REQUIRE(row.GetValue<double>(5) == 17.3);
row_count++;
}
REQUIRE(row_count == 1);
}
TEST_CASE("Test dates/times/timestamps", "[api]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE data(i DATE, j TIME, k TIMESTAMP)"));
REQUIRE_NO_FAIL(
con.Query("INSERT INTO data VALUES (DATE '1992-01-01', TIME '13:00:17', TIMESTAMP '1993-01-01 14:00:17')"));
idx_t row_count = 0;
auto result = con.Query("SELECT * FROM data;");
for (auto &row : *result) {
int32_t year, month, day;
int32_t hour, minute, second, millisecond;
auto date = row.GetValue<date_t>(0);
auto time = row.GetValue<dtime_t>(1);
auto timestamp = row.GetValue<timestamp_t>(2);
Date::Convert(date, year, month, day);
REQUIRE(year == 1992);
REQUIRE(month == 1);
REQUIRE(day == 1);
Time::Convert(time, hour, minute, second, millisecond);
REQUIRE(hour == 13);
REQUIRE(minute == 0);
REQUIRE(second == 17);
REQUIRE(millisecond == 0);
Timestamp::Convert(timestamp, date, time);
Date::Convert(date, year, month, day);
Time::Convert(time, hour, minute, second, millisecond);
REQUIRE(year == 1993);
REQUIRE(month == 1);
REQUIRE(day == 1);
REQUIRE(hour == 14);
REQUIRE(minute == 0);
REQUIRE(second == 17);
REQUIRE(millisecond == 0);
row_count++;
}
REQUIRE(row_count == 1);
}
TEST_CASE("Error in streaming result after initial query", "[api][.]") {
DuckDB db(nullptr);
Connection con(db);
// create a big table with strings that are numbers
REQUIRE_NO_FAIL(con.Query("CREATE TABLE strings(v VARCHAR)"));
for (size_t i = 0; i < STANDARD_VECTOR_SIZE * 2 - 1; i++) {
REQUIRE_NO_FAIL(con.Query("INSERT INTO strings VALUES ('" + to_string(i) + "')"));
}
// now insert one non-numeric value
REQUIRE_NO_FAIL(con.Query("INSERT INTO strings VALUES ('hello')"));
// now create a streaming result
auto result = con.SendQuery("SELECT CAST(v AS INTEGER) FROM strings");
REQUIRE_FAIL(result);
}
TEST_CASE("Test UUID", "[api][uuid]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE uuids (u uuid)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO uuids VALUES ('A0EEBC99-9C0B-4EF8-BB6D-6BB9BD380A11'), "
"('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11');"));
REQUIRE_FAIL(con.Query("INSERT INTO uuids VALUES ('');"));
REQUIRE_FAIL(con.Query("INSERT INTO uuids VALUES ('a0eebc99');"));
REQUIRE_FAIL(con.Query("INSERT INTO uuids VALUES ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380z11');"));
idx_t row_count = 0;
auto result = con.Query("SELECT * FROM uuids");
for (auto &row : *result) {
auto uuid = row.GetValue<string>(0);
REQUIRE(uuid == "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11");
row_count++;
}
REQUIRE(row_count == 2);
}
TEST_CASE("Test ARRAY_AGG with ORDER BY", "[api][array_agg]") {
DuckDB db(nullptr);
Connection con(db);
REQUIRE_NO_FAIL(con.Query("CREATE TABLE t2 (a INT, b INT, c INT)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO t2 VALUES (1,1,1), (1,2,2), (2,1,3), (2,2,4)"));
auto result = con.Query("select a, array_agg(c ORDER BY b) from t2 GROUP BY a");
REQUIRE(!result->HasError());
REQUIRE(result->names[1] == "array_agg(c ORDER BY b)");
}
TEST_CASE("Issue #9417", "[api][.]") {
DBConfig config;
config.options.allow_unsigned_extensions = true;
DuckDB db(TestCreatePath("issue_replication.db"), &config);
Connection con(db);
auto result = con.SendQuery("with max_period as ("
" select max(reporting_date) as max_record\n"
" from \"data/parquet-testing/issue9417.parquet\"\n"
" )\n"
" select\n"
" *\n"
" from \"data/parquet-testing/issue9417.parquet\" e\n"
" inner join max_period\n"
" on e.reporting_date = max_period.max_record\n"
" where e.record_date between '2012-01-31' and '2023-06-30'");
idx_t count = 0;
while (true) {
auto chunk = result->Fetch();
if (chunk) {
REQUIRE(count + chunk->size() <= 46);
count += chunk->size();
} else {
break;
}
}
REQUIRE(count == 46);
}

View File

@@ -0,0 +1,38 @@
#include "catch.hpp"
#include "test_helpers.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test table info api", "[api]") {
DuckDB db(nullptr);
Connection con(db), con2(db);
//! table is not found!
auto info = con.TableInfo("test");
REQUIRE(info.get() == nullptr);
// after creating, the table can be found
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test(i INTEGER)"));
info = con.TableInfo("test");
REQUIRE(info.get() != nullptr);
REQUIRE(info->table == "test");
REQUIRE(info->columns.size() == 1);
REQUIRE(info->columns[0].Name() == "i");
// table info is transaction sensitive
REQUIRE_NO_FAIL(con.Query("BEGIN TRANSACTION"));
// dropping the table in a transaction will result in the table being gone
REQUIRE_NO_FAIL(con.Query("DROP TABLE test"));
info = con.TableInfo("test");
REQUIRE(info.get() == nullptr);
// but not in a separate connection!
info = con2.TableInfo("test");
REQUIRE(info.get() != nullptr);
// rolling back brings back the table info again
REQUIRE_NO_FAIL(con.Query("ROLLBACK"));
info = con.TableInfo("test");
REQUIRE(info.get() != nullptr);
}

View File

@@ -0,0 +1,142 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/common/virtual_file_system.hpp"
#include <thread>
using namespace duckdb;
using namespace std;
void run_query_multiple_times(duckdb::unique_ptr<string> query, duckdb::unique_ptr<Connection> con) {
for (int i = 0; i < 10; ++i) {
auto result = con->Query(*query);
}
}
void change_thread_counts(duckdb::DuckDB &db) {
auto con = Connection(db);
for (int i = 0; i < 10; ++i) {
con.Query("SET threads=10");
con.Query("SET threads=1");
}
}
// NumberOfThreads acquired the same lock as RelaunchThreads
// NumberOfThreads is waiting for the lock
// RelaunchThreads is waiting on the thread to finish, while holding the lock
TEST_CASE("Test deadlock issue between NumberOfThreads and RelaunchThreads", "[api]") {
duckdb::DuckDB db(nullptr);
int thread_count = 10;
std::vector<std::thread> threads(thread_count);
// This query will hit NumberOfThreads because it uses the RadixPartitionedHashtable
for (int i = 0; i < thread_count; ++i) {
auto query = make_uniq<string>(R"(
WITH dataset AS (
SELECT * FROM (VALUES
(1, 'Alice'),
(2, 'Bob'),
(3, 'Alice'),
(4, 'Carol')
) AS t(id, name)
)
SELECT DISTINCT name FROM dataset;
)");
threads[i] = std::thread(run_query_multiple_times, std::move(query), make_uniq<Connection>(db));
}
// Fire off queries that change the thread count,
// causing us to relaunch the worker threads on every subsequent query.
change_thread_counts(db);
for (int i = 0; i < thread_count; ++i) {
threads[i].join();
}
}
TEST_CASE("Test database maximum_threads argument", "[api]") {
// default is number of hw threads
// FIXME: not yet
{
DuckDB db(nullptr);
auto file_system = make_uniq<VirtualFileSystem>();
REQUIRE(db.NumberOfThreads() == DBConfig().GetSystemMaxThreads(*file_system));
}
// but we can set another value
{
DBConfig config;
config.options.maximum_threads = 10;
DuckDB db(nullptr, &config);
REQUIRE(db.NumberOfThreads() == 10);
}
// zero is not erlaubt
{
DBConfig config;
config.options.maximum_threads = 0;
DuckDB db;
REQUIRE_THROWS(db = DuckDB(nullptr, &config));
}
}
TEST_CASE("Test external threads", "[api]") {
DuckDB db(nullptr);
Connection con(db);
auto &config = DBConfig::GetConfig(*db.instance);
auto options = config.GetOptions();
con.Query("SET threads=13");
REQUIRE(config.options.maximum_threads == 13);
REQUIRE(db.NumberOfThreads() == 13);
con.Query("SET external_threads=13");
REQUIRE(config.options.external_threads == 13);
REQUIRE(db.NumberOfThreads() == 13);
con.Query("SET external_threads=0");
REQUIRE(config.options.external_threads == 0);
REQUIRE(db.NumberOfThreads() == 13);
auto res = con.Query("SET external_threads=-1");
REQUIRE(res->HasError());
REQUIRE(StringUtil::Contains(res->GetError(), "out of range"));
res = con.Query("SET external_threads=14");
REQUIRE(res->HasError());
REQUIRE(StringUtil::Contains(res->GetError(), "smaller"));
con.Query("SET external_threads=5");
REQUIRE(config.options.external_threads == 5);
REQUIRE(db.NumberOfThreads() == 13);
con.Query("RESET external_threads");
REQUIRE(config.options.external_threads == DBConfig().options.external_threads);
REQUIRE(db.NumberOfThreads() == 13);
con.Query("RESET threads");
auto file_system = make_uniq<VirtualFileSystem>();
REQUIRE(config.options.maximum_threads == DBConfig().GetSystemMaxThreads(*file_system));
REQUIRE(db.NumberOfThreads() == DBConfig().GetSystemMaxThreads(*file_system));
}
#ifdef DUCKDB_NO_THREADS
TEST_CASE("Test scheduling with no threads", "[api]") {
DuckDB db(nullptr);
Connection con1(db);
Connection con2(db);
const auto query_1 = con1.PendingQuery("SELECT 42");
const auto query_2 = con2.PendingQuery("SELECT 42");
// Get the completed pipelines. Because "executeTask" was never called, there should be no completed pipelines.
auto query_1_pipelines = con1.context->GetExecutor().GetCompletedPipelines();
REQUIRE((query_1_pipelines == 0));
// Execute the second query
REQUIRE_NO_FAIL(query_2->Execute());
// And even after that, there should still be no completed pipelines for the first query.
query_1_pipelines = con1.context->GetExecutor().GetCompletedPipelines();
REQUIRE((query_1_pipelines == 0));
REQUIRE_NO_FAIL(query_1->Execute());
}
#endif

View File

@@ -0,0 +1,93 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "tpch_extension.hpp"
#include <chrono>
#include <iostream>
#include "duckdb/common/string_util.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test TPC-H SF0.01 with relations", "[tpch][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
double sf = 0.01;
if (!db.ExtensionIsLoaded("tpch")) {
return;
}
REQUIRE_NO_FAIL(con.Query("CALL dbgen(sf=" + to_string(sf) + ")"));
auto lineitem = con.Table("lineitem");
auto part = con.Table("part");
auto supplier = con.Table("supplier");
auto partsupp = con.Table("partsupp");
auto nation = con.Table("nation");
auto region = con.Table("region");
auto orders = con.Table("orders");
auto customer = con.Table("customer");
// Q01
result =
lineitem->Filter("l_shipdate <= DATE '1998-09-02'")
->Aggregate(
{"l_returnflag", "l_linestatus", "sum(l_quantity) AS sum_qty", "sum(l_extendedprice) AS sum_base_price",
"sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price",
"sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge", "avg(l_quantity) AS avg_qty",
"avg(l_extendedprice) AS avg_price", "avg(l_discount) AS avg_disc", "count(*) AS count_order"})
->Order("l_returnflag, l_linestatus")
->Execute();
COMPARE_CSV(result, TpchExtension::GetAnswer(sf, 1), true);
// Q02
auto partsupp_region = partsupp->Join(supplier, "s_suppkey=ps_suppkey")
->Join(nation, "s_nationkey=n_nationkey")
->Join(region, "n_regionkey=r_regionkey");
partsupp_region->CreateView("partsupp_region");
auto part_join = partsupp_region->Join(part, "p_partkey=ps_partkey");
result =
part_join
->Filter({"p_size=15", "p_type LIKE '%BRASS'", "r_name='EUROPE'",
"ps_supplycost = ( SELECT min(ps_supplycost) FROM partsupp_region WHERE p_partkey = ps_partkey "
"AND r_name = 'EUROPE')"})
->Project({"s_acctbal", "s_name", "n_name", "p_partkey", "p_mfgr", "s_address", "s_phone", "s_comment"})
->Order({"s_acctbal DESC", "n_name", "s_name", "p_partkey"})
->Limit(100)
->Execute();
COMPARE_CSV(result, TpchExtension::GetAnswer(sf, 2), true);
// Q03
auto cust_join = customer->Join(orders, "c_custkey=o_custkey")->Join(lineitem, "l_orderkey=o_orderkey");
result =
cust_join
->Filter({"c_mktsegment = 'BUILDING'", "o_orderdate < DATE '1995-03-15'", "l_shipdate > DATE '1995-03-15'"})
->Aggregate(
{"l_orderkey", "sum(l_extendedprice * (1 - l_discount)) AS revenue", "o_orderdate", "o_shippriority"})
->Order("revenue DESC, o_orderdate")
->Limit(10)
->Execute();
COMPARE_CSV(result, TpchExtension::GetAnswer(sf, 3), true);
// Q06
result = lineitem
->Filter({"l_shipdate >= cast('1994-01-01' AS date)", "l_shipdate < cast('1995-01-01' AS date)",
"l_discount BETWEEN 0.05 AND 0.07", "l_quantity < 24;"})
->Aggregate("sum(l_extendedprice * l_discount) AS revenue")
->Execute();
COMPARE_CSV(result, TpchExtension::GetAnswer(sf, 6), true);
// Q12
result =
lineitem->Join(orders, "l_orderkey=o_orderkey")
->Filter({"l_shipmode IN ('MAIL', 'SHIP')", "l_commitdate < l_receiptdate", "l_shipdate < l_commitdate",
"l_receiptdate >= cast('1994-01-01' AS date)", "l_receiptdate < cast('1995-01-01' AS date)"})
->Aggregate({"l_shipmode",
"sum(CASE WHEN o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' THEN 1 ELSE 0 END) "
"AS high_line_count",
"sum(CASE WHEN o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' THEN 1 ELSE 0 "
"END) AS low_line_count"})
->Order("l_shipmode")
->Execute();
COMPARE_CSV(result, TpchExtension::GetAnswer(sf, 12), true);
}

View File

@@ -0,0 +1,39 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "tpch_extension.hpp"
#include <chrono>
#include <iostream>
#include "duckdb/common/string_util.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test TPC-H SF0.01 using streaming api", "[tpch][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
double sf = 0.01;
if (!db.ExtensionIsLoaded("tpch")) {
return;
}
REQUIRE_NO_FAIL(con.Query("CALL dbgen(sf=" + to_string(sf) + ")"));
for (idx_t tpch_num = 1; tpch_num <= 22; tpch_num++) {
result = con.SendQuery("pragma tpch(" + to_string(tpch_num) + ");");
duckdb::ColumnDataCollection collection(duckdb::Allocator::DefaultAllocator(), result->types);
while (true) {
auto chunk = result->Fetch();
if (chunk) {
collection.Append(*chunk);
} else {
break;
}
}
COMPARE_CSV_COLLECTION(collection, TpchExtension::GetAnswer(sf, tpch_num), true);
}
}

44
external/duckdb/test/api/test_uuid.cpp vendored Normal file
View File

@@ -0,0 +1,44 @@
#include "test_helpers.hpp"
#include "duckdb/common/types/uuid.hpp"
#include "catch.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test UUID API", "[api]") {
REQUIRE(UUID::ToString(UUID::FromUHugeint(uhugeint_t(0))) == "00000000-0000-0000-0000-000000000000");
REQUIRE(UUID::ToString(UUID::FromUHugeint(uhugeint_t(1))) == "00000000-0000-0000-0000-000000000001");
REQUIRE(UUID::ToString(UUID::FromUHugeint(NumericLimits<uhugeint_t>::Maximum())) ==
"ffffffff-ffff-ffff-ffff-ffffffffffff");
REQUIRE(UUID::ToString(UUID::FromUHugeint(NumericLimits<uhugeint_t>::Maximum() - 1)) ==
"ffffffff-ffff-ffff-ffff-fffffffffffe");
REQUIRE(UUID::ToString(UUID::FromUHugeint(NumericLimits<uhugeint_t>::Maximum() / 2)) ==
"7fffffff-ffff-ffff-ffff-ffffffffffff");
REQUIRE(UUID::ToString(UUID::FromUHugeint((NumericLimits<uhugeint_t>::Maximum() / 2) + 1)) ==
"80000000-0000-0000-0000-000000000000");
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("00000000-0000-0000-0000-000000000000")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0x0000000000000000 && input.lower == 0x0000000000000000;
}));
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("00000000-0000-0000-0000-000000000001")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0x0000000000000000 && input.lower == 0x0000000000000001;
}));
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("ffffffff-ffff-ffff-ffff-ffffffffffff")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0xffffffffffffffff && input.lower == 0xffffffffffffffff;
}));
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("ffffffff-ffff-ffff-ffff-fffffffffffe")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0xffffffffffffffff && input.lower == 0xfffffffffffffffe;
}));
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("7fffffff-ffff-ffff-ffff-ffffffffffff")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0x7fffffffffffffff && input.lower == 0xffffffffffffffff;
}));
REQUIRE_THAT(UUID::ToUHugeint(UUID::FromString("80000000-0000-0000-0000-000000000000")),
Catch::Predicate<uhugeint_t>([&](const uhugeint_t &input) {
return input.upper == 0x8000000000000000 && input.lower == 0x0000000000000000;
}));
}

View File

@@ -0,0 +1,20 @@
#ifdef _WIN32
#include <windows.h>
#endif
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Test compatibility with windows.h", "[windows]") {
DuckDB db(nullptr);
Connection con(db);
// This test solely exists to check if compilation is hindered by including windows.h
// before including duckdb.hpp
con.BeginTransaction();
con.Query("select 42;");
con.Commit();
}

View File

@@ -0,0 +1,53 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb.hpp"
#include "duckdb/main/db_instance_cache.hpp"
using namespace duckdb;
using namespace std;
void TestConnectToDatabase(const string &path, bool create_table = false) {
// connect to the database using the standard syntax
{
DuckDB db(path);
Connection con(db);
if (create_table) {
REQUIRE_NO_FAIL(con.Query("CREATE TABLE test AS SELECT * FROM range(10) t(i)"));
}
auto result = con.Query("SELECT SUM(i) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {45}));
}
// connect to the database using the db instance cache
{
DBInstanceCache cache;
DBConfig config;
auto db = cache.CreateInstance(path, config);
Connection con(*db);
auto result = con.Query("SELECT SUM(i) FROM test");
REQUIRE(CHECK_COLUMN(result, 0, {45}));
}
}
TEST_CASE("Issue #6931 - test windows unicode path", "[windows]") {
string dirname = "Moseguí_i_González";
auto test_directory = TestDirectoryPath() + "/" + dirname;
auto current_directory = TestGetCurrentDirectory();
TestCreateDirectory(test_directory);
TestChangeDirectory(test_directory);
// relative path INSIDE folder with accents
TestConnectToDatabase("test.db", true);
TestChangeDirectory("..");
// relative path TOWARDS folder with accents
TestConnectToDatabase(dirname + "/" + "test.db");
// absolute path with folder with accents
TestConnectToDatabase(current_directory + "/" + test_directory + "/" + "test.db");
// revert current working directory
TestChangeDirectory(current_directory);
}

View File

@@ -0,0 +1,12 @@
add_library_unity(
test_api_udf_function
OBJECT
test_templated_scalar_udf.cpp
test_argumented_scalar_udf.cpp
test_templated_vec_udf.cpp
test_argumented_vec_udf.cpp
test_aggregate_udf.cpp)
set(ALL_OBJECT_FILES
${ALL_OBJECT_FILES} $<TARGET_OBJECTS:test_api_udf_function>
PARENT_SCOPE)

View File

@@ -0,0 +1,125 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/common/types/date.hpp"
#include "duckdb/common/types/time.hpp"
#include "duckdb/common/types/timestamp.hpp"
#include "udf_functions_to_test.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Aggregate UDFs", "[coverage][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
SECTION("Testing a binary aggregate UDF using only template parameters") {
// using DOUBLEs
REQUIRE_NOTHROW(
con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<double>, double, double>("udf_avg_double"));
con.Query("CREATE TABLE doubles (d DOUBLE)");
con.Query("INSERT INTO doubles VALUES (1), (2), (3), (4), (5)");
result = con.Query("SELECT udf_avg_double(d) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {3.0}));
// using INTEGERs
REQUIRE_NOTHROW(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, int, int>("udf_avg_int"));
con.Query("CREATE TABLE integers (i INTEGER)");
con.Query("INSERT INTO integers VALUES (1), (2), (3), (4), (5)");
result = con.Query("SELECT udf_avg_int(i) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {3}));
}
SECTION("Testing a binary aggregate UDF using only template parameters") {
// using DOUBLEs
con.CreateAggregateFunction<UDFCovarPopOperation, udf_covar_state_t, double, double, double>(
"udf_covar_pop_double");
result = con.Query("SELECT udf_covar_pop_double(3,3), udf_covar_pop_double(NULL,3), "
"udf_covar_pop_double(3,NULL), udf_covar_pop_double(NULL,NULL)");
REQUIRE(CHECK_COLUMN(result, 0, {0}));
REQUIRE(CHECK_COLUMN(result, 1, {Value()}));
REQUIRE(CHECK_COLUMN(result, 2, {Value()}));
REQUIRE(CHECK_COLUMN(result, 3, {Value()}));
// using INTEGERs
con.CreateAggregateFunction<UDFCovarPopOperation, udf_covar_state_t, int, int, int>("udf_covar_pop_int");
result = con.Query("SELECT udf_covar_pop_int(3,3), udf_covar_pop_int(NULL,3), udf_covar_pop_int(3,NULL), "
"udf_covar_pop_int(NULL,NULL)");
REQUIRE(CHECK_COLUMN(result, 0, {0}));
REQUIRE(CHECK_COLUMN(result, 1, {Value()}));
REQUIRE(CHECK_COLUMN(result, 2, {Value()}));
REQUIRE(CHECK_COLUMN(result, 3, {Value()}));
}
SECTION("Testing aggregate UDF with arguments") {
REQUIRE_NOTHROW(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, int, int>(
"udf_avg_int_args", LogicalType::INTEGER, LogicalType::INTEGER));
con.Query("CREATE TABLE integers (i INTEGER)");
con.Query("INSERT INTO integers VALUES (1), (2), (3), (4), (5)");
result = con.Query("SELECT udf_avg_int_args(i) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {3}));
// using TIMEs to test disambiguation
REQUIRE_NOTHROW(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<dtime_t>, dtime_t, dtime_t>(
"udf_avg_time_args", LogicalType::TIME, LogicalType::TIME));
con.Query("CREATE TABLE times (t TIME)");
con.Query("INSERT INTO times VALUES ('01:00:00'), ('01:00:00'), ('01:00:00'), ('01:00:00'), ('01:00:00')");
result = con.Query("SELECT udf_avg_time_args(t) FROM times");
REQUIRE(CHECK_COLUMN(result, 0, {"01:00:00"}));
// using DOUBLEs and a binary UDF
con.CreateAggregateFunction<UDFCovarPopOperation, udf_covar_state_t, double, double, double>(
"udf_covar_pop_double_args", LogicalType::DOUBLE, LogicalType::DOUBLE, LogicalType::DOUBLE);
result = con.Query("SELECT udf_covar_pop_double_args(3,3), udf_covar_pop_double_args(NULL,3), "
"udf_covar_pop_double_args(3,NULL), udf_covar_pop_double_args(NULL,NULL)");
REQUIRE(CHECK_COLUMN(result, 0, {0}));
REQUIRE(CHECK_COLUMN(result, 1, {Value()}));
REQUIRE(CHECK_COLUMN(result, 2, {Value()}));
REQUIRE(CHECK_COLUMN(result, 3, {Value()}));
}
SECTION("Testing aggregate UDF with WRONG arguments") {
// wrong return type
REQUIRE_THROWS(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, double, int>(
"udf_avg_int_args", LogicalType::INTEGER, LogicalType::INTEGER));
REQUIRE_THROWS(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, int, int>(
"udf_avg_int_args", LogicalType::DOUBLE, LogicalType::INTEGER));
// wrong first argument
REQUIRE_THROWS(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, int, double>(
"udf_avg_int_args", LogicalType::INTEGER, LogicalType::INTEGER));
REQUIRE_THROWS(con.CreateAggregateFunction<UDFAverageFunction, udf_avg_state_t<int>, int, int>(
"udf_avg_int_args", LogicalType::INTEGER, LogicalType::DOUBLE));
// wrong first argument
REQUIRE_THROWS(con.CreateAggregateFunction<UDFCovarPopOperation, udf_covar_state_t, double, double, int>(
"udf_covar_pop_double_args", LogicalType::DOUBLE, LogicalType::DOUBLE, LogicalType::DOUBLE));
REQUIRE_THROWS(con.CreateAggregateFunction<UDFCovarPopOperation, udf_covar_state_t, double, double, double>(
"udf_covar_pop_double_args", LogicalType::DOUBLE, LogicalType::DOUBLE, LogicalType::INTEGER));
}
SECTION("Testing the generic CreateAggregateFunction()") {
REQUIRE_NOTHROW(con.CreateAggregateFunction(
"udf_sum", {LogicalType::DOUBLE}, LogicalType::DOUBLE, &UDFSum::StateSize<UDFSum::sum_state_t>,
&UDFSum::Initialize<UDFSum::sum_state_t>, &UDFSum::Update<UDFSum::sum_state_t, double>,
&UDFSum::Combine<UDFSum::sum_state_t>, &UDFSum::Finalize<UDFSum::sum_state_t, double>,
&UDFSum::SimpleUpdate<UDFSum::sum_state_t, double>));
REQUIRE_NO_FAIL(con.Query("SELECT udf_sum(1)"));
result = con.Query("SELECT udf_sum(1)");
REQUIRE(CHECK_COLUMN(result, 0, {1}));
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers(i INTEGER)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO integers SELECT * FROM range(0, 1000, 1)"));
result = con.Query("SELECT udf_sum(i) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {499500}));
}
}

View File

@@ -0,0 +1,293 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/common/types/date.hpp"
#include "duckdb/common/types/time.hpp"
#include "duckdb/common/types/timestamp.hpp"
#include "udf_functions_to_test.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("UDF functions with arguments", "[coverage][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
string func_name, table_name, col_type;
// The types supported by the argumented CreateScalarFunction
const duckdb::vector<LogicalTypeId> all_sql_types = {
LogicalTypeId::BOOLEAN, LogicalTypeId::TINYINT, LogicalTypeId::SMALLINT, LogicalTypeId::DATE,
LogicalTypeId::TIME, LogicalTypeId::INTEGER, LogicalTypeId::BIGINT, LogicalTypeId::TIMESTAMP,
LogicalTypeId::FLOAT, LogicalTypeId::DOUBLE, LogicalTypeId::DECIMAL, LogicalTypeId::VARCHAR};
// Creating the tables
for (LogicalType sql_type : all_sql_types) {
col_type = EnumUtil::ToString(sql_type.id());
table_name = StringUtil::Lower(col_type);
con.Query("CREATE TABLE " + table_name + " (a " + col_type + ", b " + col_type + ", c " + col_type + ")");
}
// Creating the UDF functions into the catalog
for (LogicalType sql_type : all_sql_types) {
func_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
switch (sql_type.id()) {
case LogicalTypeId::BOOLEAN: {
con.CreateScalarFunction<bool, bool>(func_name + "_1", {LogicalType::BOOLEAN}, LogicalType::BOOLEAN,
&udf_bool);
con.CreateScalarFunction<bool, bool, bool>(func_name + "_2", {LogicalType::BOOLEAN, LogicalType::BOOLEAN},
LogicalType::BOOLEAN, &udf_bool);
con.CreateScalarFunction<bool, bool, bool, bool>(
func_name + "_3", {LogicalType::BOOLEAN, LogicalType::BOOLEAN, LogicalType::BOOLEAN},
LogicalType::BOOLEAN, &udf_bool);
break;
}
case LogicalTypeId::TINYINT: {
con.CreateScalarFunction<int8_t, int8_t>(func_name + "_1", {LogicalType::TINYINT}, LogicalType::TINYINT,
&udf_int8);
con.CreateScalarFunction<int8_t, int8_t, int8_t>(
func_name + "_2", {LogicalType::TINYINT, LogicalType::TINYINT}, LogicalType::TINYINT, &udf_int8);
con.CreateScalarFunction<int8_t, int8_t, int8_t, int8_t>(
func_name + "_3", {LogicalType::TINYINT, LogicalType::TINYINT, LogicalType::TINYINT},
LogicalType::TINYINT, &udf_int8);
break;
}
case LogicalTypeId::SMALLINT: {
con.CreateScalarFunction<int16_t, int16_t>(func_name + "_1", {LogicalType::SMALLINT}, LogicalType::SMALLINT,
&udf_int16);
con.CreateScalarFunction<int16_t, int16_t, int16_t>(
func_name + "_2", {LogicalType::SMALLINT, LogicalType::SMALLINT}, LogicalType::SMALLINT, &udf_int16);
con.CreateScalarFunction<int16_t, int16_t, int16_t, int16_t>(
func_name + "_3", {LogicalType::SMALLINT, LogicalType::SMALLINT, LogicalType::SMALLINT},
LogicalType::SMALLINT, &udf_int16);
break;
}
case LogicalTypeId::DATE: {
con.CreateScalarFunction<date_t, date_t>(func_name + "_1", {LogicalType::DATE}, LogicalType::DATE,
&udf_date);
con.CreateScalarFunction<date_t, date_t, date_t>(func_name + "_2", {LogicalType::DATE, LogicalType::DATE},
LogicalType::DATE, &udf_date);
con.CreateScalarFunction<date_t, date_t, date_t, date_t>(
func_name + "_3", {LogicalType::DATE, LogicalType::DATE, LogicalType::DATE}, LogicalType::DATE,
&udf_date);
break;
}
case LogicalTypeId::TIME: {
con.CreateScalarFunction<dtime_t, dtime_t>(func_name + "_1", {LogicalType::TIME}, LogicalType::TIME,
&udf_time);
con.CreateScalarFunction<dtime_t, dtime_t, dtime_t>(
func_name + "_2", {LogicalType::TIME, LogicalType::TIME}, LogicalType::TIME, &udf_time);
con.CreateScalarFunction<dtime_t, dtime_t, dtime_t, dtime_t>(
func_name + "_3", {LogicalType::TIME, LogicalType::TIME, LogicalType::TIME}, LogicalType::TIME,
&udf_time);
break;
}
case LogicalTypeId::INTEGER: {
con.CreateScalarFunction<int32_t, int32_t>(func_name + "_1", {LogicalType::INTEGER}, LogicalType::INTEGER,
&udf_int);
con.CreateScalarFunction<int32_t, int32_t, int32_t>(
func_name + "_2", {LogicalType::INTEGER, LogicalType::INTEGER}, LogicalType::INTEGER, &udf_int);
con.CreateScalarFunction<int32_t, int32_t, int32_t, int32_t>(
func_name + "_3", {LogicalType::INTEGER, LogicalType::INTEGER, LogicalType::INTEGER},
LogicalType::INTEGER, &udf_int);
break;
}
case LogicalTypeId::BIGINT: {
con.CreateScalarFunction<int64_t, int64_t>(func_name + "_1", {LogicalType::BIGINT}, LogicalType::BIGINT,
&udf_int64);
con.CreateScalarFunction<int64_t, int64_t, int64_t>(
func_name + "_2", {LogicalType::BIGINT, LogicalType::BIGINT}, LogicalType::BIGINT, &udf_int64);
con.CreateScalarFunction<int64_t, int64_t, int64_t, int64_t>(
func_name + "_3", {LogicalType::BIGINT, LogicalType::BIGINT, LogicalType::BIGINT}, LogicalType::BIGINT,
&udf_int64);
break;
}
case LogicalTypeId::TIMESTAMP: {
con.CreateScalarFunction<timestamp_t, timestamp_t>(func_name + "_1", {LogicalType::TIMESTAMP},
LogicalType::TIMESTAMP, &udf_timestamp);
con.CreateScalarFunction<timestamp_t, timestamp_t, timestamp_t>(
func_name + "_2", {LogicalType::TIMESTAMP, LogicalType::TIMESTAMP}, LogicalType::TIMESTAMP,
&udf_timestamp);
con.CreateScalarFunction<timestamp_t, timestamp_t, timestamp_t, timestamp_t>(
func_name + "_3", {LogicalType::TIMESTAMP, LogicalType::TIMESTAMP, LogicalType::TIMESTAMP},
LogicalType::TIMESTAMP, &udf_timestamp);
break;
}
case LogicalTypeId::FLOAT: {
con.CreateScalarFunction<float, float>(func_name + "_1", {LogicalType::FLOAT}, LogicalType::FLOAT,
&udf_float);
con.CreateScalarFunction<float, float, float>(func_name + "_2", {LogicalType::FLOAT, LogicalType::FLOAT},
LogicalType::FLOAT, &udf_float);
con.CreateScalarFunction<float, float, float, float>(
func_name + "_3", {LogicalType::FLOAT, LogicalType::FLOAT, LogicalType::FLOAT}, LogicalType::FLOAT,
&udf_float);
break;
}
case LogicalTypeId::DOUBLE: {
con.CreateScalarFunction<double, double>(func_name + "_1", {LogicalType::DOUBLE}, LogicalType::DOUBLE,
&udf_double);
con.CreateScalarFunction<double, double, double>(
func_name + "_2", {LogicalType::DOUBLE, LogicalType::DOUBLE}, LogicalType::DOUBLE, &udf_double);
con.CreateScalarFunction<double, double, double, double>(
func_name + "_3", {LogicalType::DOUBLE, LogicalType::DOUBLE, LogicalType::DOUBLE}, LogicalType::DOUBLE,
&udf_double);
break;
}
case LogicalTypeId::VARCHAR: {
con.CreateScalarFunction<string_t, string_t>(func_name + "_1", {LogicalType::VARCHAR}, LogicalType::VARCHAR,
&udf_varchar);
con.CreateScalarFunction<string_t, string_t, string_t>(
func_name + "_2", {LogicalType::VARCHAR, LogicalType::VARCHAR}, LogicalType::VARCHAR, &udf_varchar);
con.CreateScalarFunction<string_t, string_t, string_t, string_t>(
func_name + "_3", {LogicalType::VARCHAR, LogicalType::VARCHAR, LogicalType::VARCHAR},
LogicalType::VARCHAR, &udf_varchar);
break;
}
default:
break;
}
}
SECTION("Testing UDF functions") {
// Inserting values
for (LogicalType sql_type : all_sql_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
string query = "INSERT INTO " + table_name + " VALUES";
if (sql_type == LogicalType::BOOLEAN) {
con.Query(query + "(true, true, true), (true, true, false), (false, false, false);");
} else if (sql_type.IsNumeric()) {
con.Query(query + "(1, 10, 100),(2, 10, 100),(3, 10, 100);");
} else if (sql_type == LogicalType::VARCHAR) {
con.Query(query + "('a', 'b', 'c'),('a', 'b', 'c'),('a', 'b', 'c');");
} else if (sql_type == LogicalType::DATE) {
con.Query(query + "('2008-01-01', '2009-01-01', '2010-01-01')," +
"('2008-01-01', '2009-01-01', '2010-01-01')," + "('2008-01-01', '2009-01-01', '2010-01-01')");
} else if (sql_type == LogicalType::TIME) {
con.Query(query + "('01:00:00', '02:00:00', '03:00:00')," + "('04:00:00', '05:00:00', '06:00:00')," +
"('07:00:00', '08:00:00', '09:00:00')");
} else if (sql_type == LogicalType::TIMESTAMP) {
con.Query(query + "('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')," +
"('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')," +
"('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')");
}
}
// Running the UDF functions and checking the results
for (LogicalType sql_type : all_sql_types) {
if (sql_type.id() == LogicalTypeId::DECIMAL) {
continue;
}
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
if (sql_type.IsNumeric()) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {10, 20, 30}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {111, 112, 113}));
} else if (sql_type == LogicalType::BOOLEAN) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, false, false}));
} else if (sql_type == LogicalType::VARCHAR) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"a", "a", "a"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"b", "b", "b"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"c", "c", "c"}));
} else if (sql_type == LogicalType::DATE) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2008-01-01", "2008-01-01", "2008-01-01"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2009-01-01", "2009-01-01", "2009-01-01"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2010-01-01", "2010-01-01", "2010-01-01"}));
} else if (sql_type == LogicalType::TIME) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"01:00:00", "04:00:00", "07:00:00"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"02:00:00", "05:00:00", "08:00:00"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"03:00:00", "06:00:00", "09:00:00"}));
} else if (sql_type == LogicalType::TIMESTAMP) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2008-01-01 00:00:00", "2008-01-01 00:00:00", "2008-01-01 00:00:00"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2009-01-01 00:00:00", "2009-01-01 00:00:00", "2009-01-01 00:00:00"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2010-01-01 00:00:00", "2010-01-01 00:00:00", "2010-01-01 00:00:00"}));
}
}
}
SECTION("Checking NULLs with UDF functions") {
for (LogicalType sql_type : all_sql_types) {
if (sql_type.id() == LogicalTypeId::DECIMAL) {
continue;
}
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
// Deleting old values
REQUIRE_NO_FAIL(con.Query("DELETE FROM " + table_name));
// Inserting NULLs
string query = "INSERT INTO " + table_name + " VALUES";
con.Query(query + "(NULL, NULL, NULL), (NULL, NULL, NULL), (NULL, NULL, NULL);");
// Testing NULLs
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
}
}
}

View File

@@ -0,0 +1,354 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "duckdb/common/types/date.hpp"
#include "duckdb/common/types/time.hpp"
#include "duckdb/common/types/timestamp.hpp"
#include "udf_functions_to_test.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Vectorized UDF functions using arguments", "[coverage][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
string func_name, table_name, col_type;
// The types supported by the templated CreateVectorizedFunction
const duckdb::vector<LogicalTypeId> all_sql_types = {
LogicalTypeId::BOOLEAN, LogicalTypeId::TINYINT, LogicalTypeId::SMALLINT, LogicalTypeId::DATE,
LogicalTypeId::TIME, LogicalTypeId::INTEGER, LogicalTypeId::BIGINT, LogicalTypeId::TIMESTAMP,
LogicalTypeId::FLOAT, LogicalTypeId::DOUBLE, LogicalTypeId::VARCHAR};
// Creating the tables
for (LogicalType sql_type : all_sql_types) {
col_type = EnumUtil::ToString(sql_type.id());
table_name = StringUtil::Lower(col_type);
con.Query("CREATE TABLE " + table_name + " (a " + col_type + ", b " + col_type + ", c " + col_type + ")");
}
// Create the UDF functions into the catalog
for (LogicalType sql_type : all_sql_types) {
func_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
switch (sql_type.id()) {
case LogicalTypeId::BOOLEAN: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::BOOLEAN}, LogicalType::BOOLEAN,
&udf_unary_function<bool>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::BOOLEAN, LogicalType::BOOLEAN},
LogicalType::BOOLEAN, &udf_binary_function<bool>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::BOOLEAN, LogicalType::BOOLEAN, LogicalType::BOOLEAN},
LogicalType::BOOLEAN, &udf_ternary_function<bool>);
break;
}
case LogicalTypeId::TINYINT: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::TINYINT}, LogicalType::TINYINT,
&udf_unary_function<int8_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::TINYINT, LogicalType::TINYINT},
LogicalType::TINYINT, &udf_binary_function<int8_t>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::TINYINT, LogicalType::TINYINT, LogicalType::TINYINT},
LogicalType::TINYINT, &udf_ternary_function<int8_t>);
break;
}
case LogicalTypeId::SMALLINT: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::SMALLINT}, LogicalType::SMALLINT,
&udf_unary_function<int16_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::SMALLINT, LogicalType::SMALLINT},
LogicalType::SMALLINT, &udf_binary_function<int16_t>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::SMALLINT, LogicalType::SMALLINT, LogicalType::SMALLINT},
LogicalType::SMALLINT, &udf_ternary_function<int16_t>);
break;
}
case LogicalTypeId::DATE: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::DATE}, LogicalType::DATE,
&udf_unary_function<date_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::DATE, LogicalType::DATE}, LogicalType::DATE,
&udf_binary_function<date_t>);
con.CreateVectorizedFunction(func_name + "_3", {LogicalType::DATE, LogicalType::DATE, LogicalType::DATE},
LogicalType::DATE, &udf_ternary_function<date_t>);
break;
}
case LogicalTypeId::TIME: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::TIME}, LogicalType::TIME,
&udf_unary_function<dtime_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::TIME, LogicalType::TIME}, LogicalType::TIME,
&udf_binary_function<dtime_t>);
con.CreateVectorizedFunction(func_name + "_3", {LogicalType::TIME, LogicalType::TIME, LogicalType::TIME},
LogicalType::TIME, &udf_ternary_function<dtime_t>);
break;
}
case LogicalTypeId::INTEGER: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::INTEGER}, LogicalType::INTEGER,
&udf_unary_function<int32_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::INTEGER, LogicalType::INTEGER},
LogicalType::INTEGER, &udf_binary_function<int32_t>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::INTEGER, LogicalType::INTEGER, LogicalType::INTEGER},
LogicalType::INTEGER, &udf_ternary_function<int32_t>);
break;
}
case LogicalTypeId::BIGINT: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::BIGINT}, LogicalType::BIGINT,
&udf_unary_function<int64_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::BIGINT, LogicalType::BIGINT},
LogicalType::BIGINT, &udf_binary_function<int64_t>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::BIGINT, LogicalType::BIGINT, LogicalType::BIGINT},
LogicalType::BIGINT, &udf_ternary_function<int64_t>);
break;
}
case LogicalTypeId::TIMESTAMP: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::TIMESTAMP}, LogicalType::TIMESTAMP,
&udf_unary_function<timestamp_t>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::TIMESTAMP, LogicalType::TIMESTAMP},
LogicalType::TIMESTAMP, &udf_binary_function<timestamp_t>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::TIMESTAMP, LogicalType::TIMESTAMP, LogicalType::TIMESTAMP},
LogicalType::TIMESTAMP, &udf_ternary_function<timestamp_t>);
break;
}
case LogicalTypeId::FLOAT:
case LogicalTypeId::DOUBLE: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::DOUBLE}, LogicalType::DOUBLE,
&udf_unary_function<double>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::DOUBLE, LogicalType::DOUBLE},
LogicalType::DOUBLE, &udf_binary_function<double>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::DOUBLE, LogicalType::DOUBLE, LogicalType::DOUBLE},
LogicalType::DOUBLE, &udf_ternary_function<double>);
break;
}
case LogicalTypeId::VARCHAR: {
con.CreateVectorizedFunction(func_name + "_1", {LogicalType::VARCHAR}, LogicalType::VARCHAR,
&udf_unary_function<char *>);
con.CreateVectorizedFunction(func_name + "_2", {LogicalType::VARCHAR, LogicalType::VARCHAR},
LogicalType::VARCHAR, &udf_binary_function<char *>);
con.CreateVectorizedFunction(func_name + "_3",
{LogicalType::VARCHAR, LogicalType::VARCHAR, LogicalType::VARCHAR},
LogicalType::VARCHAR, &udf_ternary_function<char *>);
break;
}
default:
break;
}
}
SECTION("Testing Vectorized UDF functions") {
// Inserting values
for (LogicalType sql_type : all_sql_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
string query = "INSERT INTO " + table_name + " VALUES";
if (sql_type == LogicalType::BOOLEAN) {
con.Query(query + "(true, true, true), (true, true, false), (false, false, false);");
} else if (sql_type.IsNumeric()) {
con.Query(query + "(1, 10, 100),(2, 20, 100),(3, 30, 100);");
} else if (sql_type == LogicalType::VARCHAR) {
con.Query(query + "('a', 'b', 'c'),('a', 'b', 'c'),('a', 'b', 'c');");
} else if (sql_type == LogicalType::DATE) {
con.Query(query + "('2008-01-01', '2009-01-01', '2010-01-01')," +
"('2008-01-01', '2009-01-01', '2010-01-01')," + "('2008-01-01', '2009-01-01', '2010-01-01')");
} else if (sql_type == LogicalType::TIME) {
con.Query(query + "('01:00:00', '02:00:00', '03:00:00')," + "('04:00:00', '05:00:00', '06:00:00')," +
"('07:00:00', '08:00:00', '09:00:00')");
} else if (sql_type == LogicalType::TIMESTAMP) {
con.Query(query + "('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')," +
"('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')," +
"('2008-01-01 00:00:00', '2009-01-01 00:00:00', '2010-01-01 00:00:00')");
}
}
// Running the UDF functions and checking the results
for (LogicalType sql_type : all_sql_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
if (sql_type.IsNumeric()) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {10, 20, 30}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {100, 100, 100}));
} else if (sql_type == LogicalType::BOOLEAN) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, false, false}));
} else if (sql_type == LogicalType::VARCHAR) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"a", "a", "a"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"b", "b", "b"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"c", "c", "c"}));
} else if (sql_type == LogicalType::DATE) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2008-01-01", "2008-01-01", "2008-01-01"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2009-01-01", "2009-01-01", "2009-01-01"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2010-01-01", "2010-01-01", "2010-01-01"}));
} else if (sql_type == LogicalType::TIME) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"01:00:00", "04:00:00", "07:00:00"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"02:00:00", "05:00:00", "08:00:00"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"03:00:00", "06:00:00", "09:00:00"}));
} else if (sql_type == LogicalType::TIMESTAMP) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2008-01-01 00:00:00", "2008-01-01 00:00:00", "2008-01-01 00:00:00"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2009-01-01 00:00:00", "2009-01-01 00:00:00", "2009-01-01 00:00:00"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"2010-01-01 00:00:00", "2010-01-01 00:00:00", "2010-01-01 00:00:00"}));
}
}
}
SECTION("Cheking NULLs with Vectorized UDF functions") {
for (LogicalType sql_type : all_sql_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
// Deleting old values
REQUIRE_NO_FAIL(con.Query("DELETE FROM " + table_name));
// Inserting NULLs
string query = "INSERT INTO " + table_name + " VALUES";
con.Query(query + "(NULL, NULL, NULL), (NULL, NULL, NULL), (NULL, NULL, NULL);");
// Testing NULLs
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
}
}
SECTION("Cheking Vectorized UDF functions with several input columns") {
duckdb::vector<LogicalType> sql_args = {LogicalType::INTEGER, LogicalType::INTEGER, LogicalType::INTEGER,
LogicalType::INTEGER};
// UDF with 4 input ints, return the last one
con.CreateVectorizedFunction("udf_four_ints", sql_args, LogicalType::INTEGER,
&udf_several_constant_input<int, 4>);
result = con.Query("SELECT udf_four_ints(1, 2, 3, 4)");
REQUIRE(CHECK_COLUMN(result, 0, {4}));
// UDF with 5 input ints, return the last one
sql_args.emplace_back(LogicalType::INTEGER);
con.CreateVectorizedFunction("udf_five_ints", sql_args, LogicalType::INTEGER,
&udf_several_constant_input<int, 5>);
result = con.Query("SELECT udf_five_ints(1, 2, 3, 4, 5)");
REQUIRE(CHECK_COLUMN(result, 0, {5}));
// UDF with 10 input ints, return the last one
for (idx_t i = 0; i < 5; ++i) {
// adding more 5 items
sql_args.emplace_back(LogicalType::INTEGER);
}
con.CreateVectorizedFunction("udf_ten_ints", sql_args, LogicalType::INTEGER,
&udf_several_constant_input<int, 10>);
result = con.Query("SELECT udf_ten_ints(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)");
REQUIRE(CHECK_COLUMN(result, 0, {10}));
}
SECTION("Cheking Vectorized UDF functions with varargs and constant values") {
// Test udf_max with integer
con.CreateVectorizedFunction("udf_const_max_int", {LogicalType::INTEGER}, LogicalType::INTEGER,
&udf_max_constant<int>, LogicalType::INTEGER);
result = con.Query("SELECT udf_const_max_int(1, 2, 3, 4, 999, 5, 6, 7)");
REQUIRE(CHECK_COLUMN(result, 0, {999}));
result = con.Query("SELECT udf_const_max_int(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)");
REQUIRE(CHECK_COLUMN(result, 0, {10}));
// Test udf_max with double
con.CreateVectorizedFunction("udf_const_max_double", {LogicalType::DOUBLE}, LogicalType::DOUBLE,
&udf_max_constant<double>, LogicalType::DOUBLE);
result = con.Query("SELECT udf_const_max_double(1.0, 2.0, 3.0, 4.0, 999.0, 5.0, 6.0, 7.0)");
REQUIRE(CHECK_COLUMN(result, 0, {999.0}));
result = con.Query("SELECT udf_const_max_double(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)");
REQUIRE(CHECK_COLUMN(result, 0, {10.0}));
}
SECTION("Cheking Vectorized UDF functions with varargs and input columns") {
// Test udf_max with integer
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers (a INTEGER, b INTEGER, c INTEGER, d INTEGER)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO integers VALUES(1, 2, 3, 4), (10, 20, 30, 40), (100, 200, 300, 400), "
"(1000, 2000, 3000, 4000)"));
con.CreateVectorizedFunction("udf_flat_max_int", {LogicalType::INTEGER}, LogicalType::INTEGER,
&udf_max_flat<int>, LogicalType::INTEGER);
result = con.Query("SELECT udf_flat_max_int(a, b, c, d) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_int(d, c, b, a) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_int(c, b) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {3, 30, 300, 3000}));
// Test udf_max with double
REQUIRE_NO_FAIL(con.Query("CREATE TABLE doubles (a DOUBLE, b DOUBLE, c DOUBLE, d DOUBLE)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO doubles VALUES(1, 2, 3, 4), (10, 20, 30, 40), (100, 200, 300, 400), "
"(1000, 2000, 3000, 4000)"));
con.CreateVectorizedFunction("udf_flat_max_double", {LogicalType::DOUBLE}, LogicalType::DOUBLE,
&udf_max_flat<double>, LogicalType::DOUBLE);
result = con.Query("SELECT udf_flat_max_double(a, b, c, d) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_double(d, c, b, a) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_double(c, b) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {3, 30, 300, 3000}));
}
}

View File

@@ -0,0 +1,164 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "udf_functions_to_test.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("UDF functions with template", "[coverage][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
string func_name, table_name, col_type;
// The types supported by the templated CreateScalarFunction
const duckdb::vector<LogicalType> sql_templated_types = {
LogicalType::BOOLEAN, LogicalType::TINYINT, LogicalType::SMALLINT, LogicalType::INTEGER,
LogicalType::BIGINT, LogicalType::FLOAT, LogicalType::DOUBLE, LogicalType::VARCHAR};
// Creating the tables
for (LogicalType sql_type : sql_templated_types) {
col_type = EnumUtil::ToString(sql_type.id());
table_name = StringUtil::Lower(col_type);
con.Query("CREATE TABLE " + table_name + " (a " + col_type + ", b " + col_type + ", c " + col_type + ")");
}
// Create the UDF functions into the catalog
for (LogicalType sql_type : sql_templated_types) {
func_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
switch (sql_type.id()) {
case LogicalTypeId::BOOLEAN: {
con.CreateScalarFunction<bool, bool>(func_name + "_1", &udf_bool);
con.CreateScalarFunction<bool, bool, bool>(func_name + "_2", &udf_bool);
con.CreateScalarFunction<bool, bool, bool, bool>(func_name + "_3", &udf_bool);
break;
}
case LogicalTypeId::TINYINT: {
con.CreateScalarFunction<int8_t, int8_t>(func_name + "_1", &udf_int8);
con.CreateScalarFunction<int8_t, int8_t, int8_t>(func_name + "_2", &udf_int8);
con.CreateScalarFunction<int8_t, int8_t, int8_t, int8_t>(func_name + "_3", &udf_int8);
break;
}
case LogicalTypeId::SMALLINT: {
con.CreateScalarFunction<int16_t, int16_t>(func_name + "_1", &udf_int16);
con.CreateScalarFunction<int16_t, int16_t, int16_t>(func_name + "_2", &udf_int16);
con.CreateScalarFunction<int16_t, int16_t, int16_t, int16_t>(func_name + "_3", &udf_int16);
break;
}
case LogicalTypeId::INTEGER: {
con.CreateScalarFunction<int32_t, int32_t>(func_name + "_1", &udf_int);
con.CreateScalarFunction<int32_t, int32_t, int32_t>(func_name + "_2", &udf_int);
con.CreateScalarFunction<int32_t, int32_t, int32_t, int32_t>(func_name + "_3", &udf_int);
break;
}
case LogicalTypeId::BIGINT: {
con.CreateScalarFunction<int64_t, int64_t>(func_name + "_1", &udf_int64);
con.CreateScalarFunction<int64_t, int64_t, int64_t>(func_name + "_2", &udf_int64);
con.CreateScalarFunction<int64_t, int64_t, int64_t, int64_t>(func_name + "_3", &udf_int64);
break;
}
case LogicalTypeId::FLOAT:
// FIXME: there is an implicit cast to DOUBLE before calling the function: float_1(CAST[DOUBLE](a)),
// because of that we cannot invoke such a function: float udf_float(float a);
// {
// con.CreateScalarFunction<float, float>(func_name + "_1", &FLOAT);
// con.CreateScalarFunction<float, float, float>(func_name + "_2", &FLOAT);
// con.CreateScalarFunction<float, float, float, float>(func_name + "_3", &FLOAT);
// break;
// }
case LogicalTypeId::DOUBLE: {
con.CreateScalarFunction<double, double>(func_name + "_1", &udf_double);
con.CreateScalarFunction<double, double, double>(func_name + "_2", &udf_double);
con.CreateScalarFunction<double, double, double, double>(func_name + "_3", &udf_double);
break;
}
case LogicalTypeId::VARCHAR: {
con.CreateScalarFunction<string_t, string_t>(func_name + "_1", &udf_varchar);
con.CreateScalarFunction<string_t, string_t, string_t>(func_name + "_2", &udf_varchar);
con.CreateScalarFunction<string_t, string_t, string_t, string_t>(func_name + "_3", &udf_varchar);
break;
}
default:
break;
}
}
SECTION("Testing UDF functions") {
// Inserting values
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
string query = "INSERT INTO " + table_name + " VALUES";
if (sql_type == LogicalType::BOOLEAN) {
con.Query(query + "(true, true, true), (true, true, false), (false, false, false);");
} else if (sql_type.IsNumeric()) {
con.Query(query + "(1, 10, 100),(2, 10, 100),(3, 10, 100);");
} else if (sql_type == LogicalType::VARCHAR) {
con.Query(query + "('a', 'b', 'c'),('a', 'b', 'c'),('a', 'b', 'c');");
}
}
// Running the UDF functions and checking the results
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
if (sql_type.IsNumeric()) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {10, 20, 30}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {111, 112, 113}));
} else if (sql_type == LogicalType::BOOLEAN) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, false, false}));
} else if (sql_type == LogicalType::VARCHAR) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"a", "a", "a"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"b", "b", "b"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"c", "c", "c"}));
}
}
}
SECTION("Checking NULLs with UDF functions") {
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
// Deleting old values
REQUIRE_NO_FAIL(con.Query("DELETE FROM " + table_name));
// Inserting NULLs
string query = "INSERT INTO " + table_name + " VALUES";
con.Query(query + "(NULL, NULL, NULL), (NULL, NULL, NULL), (NULL, NULL, NULL);");
// Testing NULLs
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
}
}
}

View File

@@ -0,0 +1,231 @@
#include "catch.hpp"
#include "test_helpers.hpp"
#include "udf_functions_to_test.hpp"
using namespace duckdb;
using namespace std;
TEST_CASE("Vectorized UDF functions using templates", "[coverage][.]") {
duckdb::unique_ptr<QueryResult> result;
DuckDB db(nullptr);
Connection con(db);
con.EnableQueryVerification();
string func_name, table_name, col_type;
// The types supported by the templated CreateVectorizedFunction
const duckdb::vector<LogicalType> sql_templated_types = {
LogicalType::BOOLEAN, LogicalType::TINYINT, LogicalType::SMALLINT, LogicalType::INTEGER,
LogicalType::BIGINT, LogicalType::FLOAT, LogicalType::DOUBLE, LogicalType::VARCHAR};
// Creating the tables
for (LogicalType sql_type : sql_templated_types) {
col_type = EnumUtil::ToString(sql_type.id());
table_name = StringUtil::Lower(col_type);
con.Query("CREATE TABLE " + table_name + " (a " + col_type + ", b " + col_type + ", c " + col_type + ")");
}
// Create the UDF functions into the catalog
for (LogicalType sql_type : sql_templated_types) {
func_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
switch (sql_type.id()) {
case LogicalTypeId::BOOLEAN: {
con.CreateVectorizedFunction<bool, bool>(func_name + "_1", &udf_unary_function<bool>);
con.CreateVectorizedFunction<bool, bool, bool>(func_name + "_2", &udf_binary_function<bool>);
con.CreateVectorizedFunction<bool, bool, bool, bool>(func_name + "_3", &udf_ternary_function<bool>);
break;
}
case LogicalTypeId::TINYINT: {
con.CreateVectorizedFunction<int8_t, int8_t>(func_name + "_1", &udf_unary_function<int8_t>);
con.CreateVectorizedFunction<int8_t, int8_t, int8_t>(func_name + "_2", &udf_binary_function<int8_t>);
con.CreateVectorizedFunction<int8_t, int8_t, int8_t, int8_t>(func_name + "_3",
&udf_ternary_function<int8_t>);
break;
}
case LogicalTypeId::SMALLINT: {
con.CreateVectorizedFunction<int16_t, int16_t>(func_name + "_1", &udf_unary_function<int16_t>);
con.CreateVectorizedFunction<int16_t, int16_t, int16_t>(func_name + "_2", &udf_binary_function<int16_t>);
con.CreateVectorizedFunction<int16_t, int16_t, int16_t, int16_t>(func_name + "_3",
&udf_ternary_function<int16_t>);
break;
}
case LogicalTypeId::INTEGER: {
con.CreateVectorizedFunction<int, int>(func_name + "_1", &udf_unary_function<int>);
con.CreateVectorizedFunction<int, int, int>(func_name + "_2", &udf_binary_function<int>);
con.CreateVectorizedFunction<int, int, int, int>(func_name + "_3", &udf_ternary_function<int>);
break;
}
case LogicalTypeId::BIGINT: {
con.CreateVectorizedFunction<int64_t, int64_t>(func_name + "_1", &udf_unary_function<int64_t>);
con.CreateVectorizedFunction<int64_t, int64_t, int64_t>(func_name + "_2", &udf_binary_function<int64_t>);
con.CreateVectorizedFunction<int64_t, int64_t, int64_t, int64_t>(func_name + "_3",
&udf_ternary_function<int64_t>);
break;
}
case LogicalTypeId::FLOAT:
case LogicalTypeId::DOUBLE: {
con.CreateVectorizedFunction<double, double>(func_name + "_1", &udf_unary_function<double>);
con.CreateVectorizedFunction<double, double, double>(func_name + "_2", &udf_binary_function<double>);
con.CreateVectorizedFunction<double, double, double, double>(func_name + "_3",
&udf_ternary_function<double>);
break;
}
case LogicalTypeId::VARCHAR: {
con.CreateVectorizedFunction<string_t, string_t>(func_name + "_1", &udf_unary_function<char *>);
con.CreateVectorizedFunction<string_t, string_t, string_t>(func_name + "_2", &udf_binary_function<char *>);
con.CreateVectorizedFunction<string_t, string_t, string_t, string_t>(func_name + "_3",
&udf_ternary_function<char *>);
break;
}
default:
break;
}
}
SECTION("Testing Vectorized UDF functions") {
// Inserting values
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
string query = "INSERT INTO " + table_name + " VALUES";
if (sql_type == LogicalType::BOOLEAN) {
con.Query(query + "(true, true, true), (true, true, false), (false, false, false);");
} else if (sql_type.IsNumeric()) {
con.Query(query + "(1, 10, 101),(2, 20, 102),(3, 30, 103);");
} else if (sql_type == LogicalType::VARCHAR) {
con.Query(query + "('a', 'b', 'c'),('a', 'b', 'c'),('a', 'b', 'c');");
}
}
// Running the UDF functions and checking the results
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
if (sql_type.IsNumeric()) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {1, 2, 3}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {10, 20, 30}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {101, 102, 103}));
} else if (sql_type == LogicalType::BOOLEAN) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, true, false}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {true, false, false}));
} else if (sql_type == LogicalType::VARCHAR) {
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"a", "a", "a"}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"b", "b", "b"}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {"c", "c", "c"}));
}
}
}
SECTION("Cheking NULLs with Vectorized UDF functions") {
for (LogicalType sql_type : sql_templated_types) {
table_name = StringUtil::Lower(EnumUtil::ToString(sql_type.id()));
func_name = table_name;
// Deleting old values
REQUIRE_NO_FAIL(con.Query("DELETE FROM " + table_name));
// Inserting NULLs
string query = "INSERT INTO " + table_name + " VALUES";
con.Query(query + "(NULL, NULL, NULL), (NULL, NULL, NULL), (NULL, NULL, NULL);");
// Testing NULLs
result = con.Query("SELECT " + func_name + "_1(a) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_2(a, b) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
result = con.Query("SELECT " + func_name + "_3(a, b, c) FROM " + table_name);
REQUIRE(CHECK_COLUMN(result, 0, {Value(nullptr), Value(nullptr), Value(nullptr)}));
}
}
SECTION("Cheking Vectorized UDF functions with several input columns") {
// UDF with 4 input ints, return the last one
con.CreateVectorizedFunction<int, int, int, int, int>("udf_four_ints", &udf_several_constant_input<int, 4>);
result = con.Query("SELECT udf_four_ints(1, 2, 3, 4)");
REQUIRE(CHECK_COLUMN(result, 0, {4}));
// UDF with 5 input ints, return the last one
con.CreateVectorizedFunction<int, int, int, int, int, int>("udf_five_ints",
&udf_several_constant_input<int, 5>);
result = con.Query("SELECT udf_five_ints(1, 2, 3, 4, 5)");
REQUIRE(CHECK_COLUMN(result, 0, {5}));
// UDF with 10 input ints, return the last one
con.CreateVectorizedFunction<int, int, int, int, int, int, int, int, int, int, int>(
"udf_ten_ints", &udf_several_constant_input<int, 10>);
result = con.Query("SELECT udf_ten_ints(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)");
REQUIRE(CHECK_COLUMN(result, 0, {10}));
}
SECTION("Cheking Vectorized UDF functions with varargs and constant values") {
// Test udf_max with integer
con.CreateVectorizedFunction<int, int>("udf_const_max_int", &udf_max_constant<int>, LogicalType::INTEGER);
result = con.Query("SELECT udf_const_max_int(1, 2, 3, 4, 999, 5, 6, 7)");
REQUIRE(CHECK_COLUMN(result, 0, {999}));
result = con.Query("SELECT udf_const_max_int(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)");
REQUIRE(CHECK_COLUMN(result, 0, {10}));
// Test udf_max with double
con.CreateVectorizedFunction<double, double>("udf_const_max_double", &udf_max_constant<double>,
LogicalType::DOUBLE);
result = con.Query("SELECT udf_const_max_double(1.0, 2.0, 3.0, 4.0, 999.0, 5.0, 6.0, 7.0)");
REQUIRE(CHECK_COLUMN(result, 0, {999.0}));
result = con.Query("SELECT udf_const_max_double(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)");
REQUIRE(CHECK_COLUMN(result, 0, {10.0}));
}
SECTION("Cheking Vectorized UDF functions with varargs and input columns") {
// Test udf_max with integer
REQUIRE_NO_FAIL(con.Query("CREATE TABLE integers (a INTEGER, b INTEGER, c INTEGER, d INTEGER)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO integers VALUES(1, 2, 3, 4), (10, 20, 30, 40), (100, 200, 300, 400), "
"(1000, 2000, 3000, 4000)"));
con.CreateVectorizedFunction<int, int>("udf_flat_max_int", &udf_max_flat<int>, LogicalType::INTEGER);
result = con.Query("SELECT udf_flat_max_int(a, b, c, d) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_int(d, c, b, a) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_int(c, b) FROM integers");
REQUIRE(CHECK_COLUMN(result, 0, {3, 30, 300, 3000}));
// Test udf_max with double
REQUIRE_NO_FAIL(con.Query("CREATE TABLE doubles (a DOUBLE, b DOUBLE, c DOUBLE, d DOUBLE)"));
REQUIRE_NO_FAIL(con.Query("INSERT INTO doubles VALUES(1, 2, 3, 4), (10, 20, 30, 40), (100, 200, 300, 400), "
"(1000, 2000, 3000, 4000)"));
con.CreateVectorizedFunction<double, double>("udf_flat_max_double", &udf_max_flat<double>, LogicalType::DOUBLE);
result = con.Query("SELECT udf_flat_max_double(a, b, c, d) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_double(d, c, b, a) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {4, 40, 400, 4000}));
result = con.Query("SELECT udf_flat_max_double(c, b) FROM doubles");
REQUIRE(CHECK_COLUMN(result, 0, {3, 30, 300, 3000}));
}
}

View File

@@ -0,0 +1,602 @@
/*HEADER file with all UDF Functions to test*/
#pragma once
namespace duckdb {
// UDF Functions to test
inline bool udf_bool(bool a) {
return a;
}
inline bool udf_bool(bool a, bool b) {
return a & b;
}
inline bool udf_bool(bool a, bool b, bool c) {
return a & b & c;
}
inline int8_t udf_int8(int8_t a) {
return a;
}
inline int8_t udf_int8(int8_t a, int8_t b) {
return a * b;
}
inline int8_t udf_int8(int8_t a, int8_t b, int8_t c) {
return a + b + c;
}
inline int16_t udf_int16(int16_t a) {
return a;
}
inline int16_t udf_int16(int16_t a, int16_t b) {
return a * b;
}
inline int16_t udf_int16(int16_t a, int16_t b, int16_t c) {
return a + b + c;
}
inline date_t udf_date(date_t a) {
return a;
}
inline date_t udf_date(date_t a, date_t b) {
return b;
}
inline date_t udf_date(date_t a, date_t b, date_t c) {
return c;
}
inline dtime_t udf_time(dtime_t a) {
return a;
}
inline dtime_t udf_time(dtime_t a, dtime_t b) {
return b;
}
inline dtime_t udf_time(dtime_t a, dtime_t b, dtime_t c) {
return c;
}
inline int udf_int(int a) {
return a;
}
inline int udf_int(int a, int b) {
return a * b;
}
inline int udf_int(int a, int b, int c) {
return a + b + c;
}
inline int64_t udf_int64(int64_t a) {
return a;
}
inline int64_t udf_int64(int64_t a, int64_t b) {
return a * b;
}
inline int64_t udf_int64(int64_t a, int64_t b, int64_t c) {
return a + b + c;
}
inline timestamp_t udf_timestamp(timestamp_t a) {
return a;
}
inline timestamp_t udf_timestamp(timestamp_t a, timestamp_t b) {
return b;
}
inline timestamp_t udf_timestamp(timestamp_t a, timestamp_t b, timestamp_t c) {
return c;
}
inline float udf_float(float a) {
return a;
}
inline float udf_float(float a, float b) {
return a * b;
}
inline float udf_float(float a, float b, float c) {
return a + b + c;
}
inline double udf_double(double a) {
return a;
}
inline double udf_double(double a, double b) {
return a * b;
}
inline double udf_double(double a, double b, double c) {
return a + b + c;
}
inline double udf_decimal(double a) {
return a;
}
inline double udf_decimal(double a, double b) {
return a * b;
}
inline double udf_decimal(double a, double b, double c) {
return a + b + c;
}
inline string_t udf_varchar(string_t a) {
return a;
}
inline string_t udf_varchar(string_t a, string_t b) {
return b;
}
inline string_t udf_varchar(string_t a, string_t b, string_t c) {
return c;
}
// Vectorized UDF Functions -------------------------------------------------------------------
/*
* This vectorized function is an unary one that copies input values to the result vector
*/
template <typename TYPE>
static void udf_unary_function(DataChunk &input, ExpressionState &state, Vector &result) {
input.Flatten();
switch (GetTypeId<TYPE>()) {
case PhysicalType::VARCHAR: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<string_t>(result);
auto ldata = FlatVector::GetData<string_t>(input.data[0]);
auto &validity = FlatVector::Validity(input.data[0]);
FlatVector::SetValidity(result, FlatVector::Validity(input.data[0]));
for (idx_t i = 0; i < input.size(); i++) {
if (!validity.RowIsValid(i)) {
continue;
}
auto input_length = ldata[i].GetSize();
string_t target = StringVector::EmptyString(result, input_length);
auto target_data = target.GetDataWriteable();
memcpy(target_data, ldata[i].GetData(), input_length);
target.Finalize();
result_data[i] = target;
}
break;
}
default: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<TYPE>(result);
auto ldata = FlatVector::GetData<TYPE>(input.data[0]);
auto mask = FlatVector::Validity(input.data[0]);
FlatVector::SetValidity(result, mask);
for (idx_t i = 0; i < input.size(); i++) {
if (!mask.RowIsValid(i)) {
continue;
}
result_data[i] = ldata[i];
}
}
}
}
/*
* This vectorized function is a binary one that copies values from the second input vector to the result vector
*/
template <typename TYPE>
static void udf_binary_function(DataChunk &input, ExpressionState &state, Vector &result) {
input.Flatten();
switch (GetTypeId<TYPE>()) {
case PhysicalType::VARCHAR: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<string_t>(result);
auto ldata = FlatVector::GetData<string_t>(input.data[1]);
auto &validity = FlatVector::Validity(input.data[0]);
FlatVector::SetValidity(result, FlatVector::Validity(input.data[1]));
for (idx_t i = 0; i < input.size(); i++) {
if (!validity.RowIsValid(i)) {
continue;
}
auto input_length = ldata[i].GetSize();
string_t target = StringVector::EmptyString(result, input_length);
auto target_data = target.GetDataWriteable();
memcpy(target_data, ldata[i].GetData(), input_length);
target.Finalize();
result_data[i] = target;
}
break;
}
default: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<TYPE>(result);
auto ldata = FlatVector::GetData<TYPE>(input.data[1]);
auto &mask = FlatVector::Validity(input.data[1]);
FlatVector::SetValidity(result, mask);
for (idx_t i = 0; i < input.size(); i++) {
if (!mask.RowIsValid(i)) {
continue;
}
result_data[i] = ldata[i];
}
}
}
}
/*
* This vectorized function is a ternary one that copies values from the third input vector to the result vector
*/
template <typename TYPE>
static void udf_ternary_function(DataChunk &input, ExpressionState &state, Vector &result) {
input.Flatten();
switch (GetTypeId<TYPE>()) {
case PhysicalType::VARCHAR: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<string_t>(result);
auto ldata = FlatVector::GetData<string_t>(input.data[2]);
auto &validity = FlatVector::Validity(input.data[0]);
FlatVector::SetValidity(result, FlatVector::Validity(input.data[2]));
for (idx_t i = 0; i < input.size(); i++) {
if (!validity.RowIsValid(i)) {
continue;
}
auto input_length = ldata[i].GetSize();
string_t target = StringVector::EmptyString(result, input_length);
auto target_data = target.GetDataWriteable();
memcpy(target_data, ldata[i].GetData(), input_length);
target.Finalize();
result_data[i] = target;
}
break;
}
default: {
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<TYPE>(result);
auto ldata = FlatVector::GetData<TYPE>(input.data[2]);
auto &mask = FlatVector::Validity(input.data[2]);
FlatVector::SetValidity(result, mask);
for (idx_t i = 0; i < input.size(); i++) {
if (!mask.RowIsValid(i)) {
continue;
}
result_data[i] = ldata[i];
}
}
}
}
/*
* Vectorized function with the number of input as a template parameter
*/
template <typename TYPE, int NUM_INPUT>
static void udf_several_constant_input(DataChunk &input, ExpressionState &state, Vector &result) {
result.SetVectorType(VectorType::CONSTANT_VECTOR);
auto result_data = ConstantVector::GetData<TYPE>(result);
auto ldata = ConstantVector::GetData<TYPE>(input.data[NUM_INPUT - 1]);
for (idx_t i = 0; i < input.size(); i++) {
result_data[i] = ldata[i];
}
}
/*
* Vectorized MAX function with varargs and constant inputs
*/
template <typename TYPE>
static void udf_max_constant(DataChunk &args, ExpressionState &state, Vector &result) {
TYPE max = 0;
result.SetVectorType(VectorType::CONSTANT_VECTOR);
for (idx_t col_idx = 0; col_idx < args.ColumnCount(); col_idx++) {
auto &input = args.data[col_idx];
if (ConstantVector::IsNull(input)) {
// constant null, skip
continue;
}
auto input_data = ConstantVector::GetData<TYPE>(input);
if (max < input_data[0]) {
max = input_data[0];
}
}
auto result_data = ConstantVector::GetData<TYPE>(result);
result_data[0] = max;
}
/*
* Vectorized MAX function with varargs and input columns
*/
template <typename TYPE>
static void udf_max_flat(DataChunk &args, ExpressionState &state, Vector &result) {
args.Flatten();
D_ASSERT(TypeIsNumeric(GetTypeId<TYPE>()));
result.SetVectorType(VectorType::FLAT_VECTOR);
auto result_data = FlatVector::GetData<TYPE>(result);
// Initialize the result vector with the minimum value from TYPE.
memset(result_data, std::numeric_limits<TYPE>::min(), args.size() * sizeof(TYPE));
for (idx_t col_idx = 0; col_idx < args.ColumnCount(); col_idx++) {
auto &input = args.data[col_idx];
D_ASSERT((GetTypeId<TYPE>()) == input.GetType().InternalType());
auto input_data = FlatVector::GetData<TYPE>(input);
for (idx_t i = 0; i < args.size(); ++i) {
if (result_data[i] < input_data[i]) {
result_data[i] = input_data[i];
}
}
}
}
// Aggregate UDF to test -------------------------------------------------------------------
// AVG function copied from "src/function/aggregate/algebraic/avg.cpp"
template <class T>
struct udf_avg_state_t {
uint64_t count;
T sum;
};
struct UDFAverageFunction {
template <class STATE>
static void Initialize(STATE &state) {
state.count = 0;
state.sum = 0;
}
template <class INPUT_TYPE, class STATE, class OP>
static void Operation(STATE &state, const INPUT_TYPE &input, AggregateUnaryInput &) {
state.sum += input;
state.count++;
}
template <class INPUT_TYPE, class STATE, class OP>
static void ConstantOperation(STATE &state, const INPUT_TYPE &input, AggregateUnaryInput &, idx_t count) {
state.count += count;
state.sum += input * count;
}
template <class STATE, class OP>
static void Combine(const STATE &source, STATE &target, AggregateInputData &) {
target.count += source.count;
target.sum += source.sum;
}
template <class T, class STATE>
static void Finalize(STATE &state, T &target, AggregateFinalizeData &finalize_data) {
if (state.count == 0) {
finalize_data.ReturnNull();
} else {
target = state.sum / state.count;
}
}
static bool IgnoreNull() {
return true;
}
};
// COVAR function copied from "src/function/aggregate/algebraic/covar.cpp"
//------------------ COVAR --------------------------------//
struct udf_covar_state_t {
uint64_t count;
double meanx;
double meany;
double co_moment;
};
struct UDFCovarOperation {
template <class STATE>
static void Initialize(STATE &state) {
state.count = 0;
state.meanx = 0;
state.meany = 0;
state.co_moment = 0;
}
template <class A_TYPE, class B_TYPE, class STATE, class OP>
static void Operation(STATE &state, const A_TYPE &x, const B_TYPE &y, AggregateBinaryInput &idata) {
// update running mean and d^2
const uint64_t n = ++(state.count);
const double dx = (x - state.meanx);
const double meanx = state.meanx + dx / n;
const double dy = (y - state.meany);
const double meany = state.meany + dy / n;
const double C = state.co_moment + dx * (y - meany);
state.meanx = meanx;
state.meany = meany;
state.co_moment = C;
}
template <class STATE, class OP>
static void Combine(const STATE &source, STATE &target, AggregateInputData &) {
if (target.count == 0) {
target = source;
} else if (source.count > 0) {
const auto count = target.count + source.count;
const auto meanx = (source.count * source.meanx + target.count * target.meanx) / count;
const auto meany = (source.count * source.meany + target.count * target.meany) / count;
// Schubert and Gertz SSDBM 2018, equation 21
const auto deltax = target.meanx - source.meanx;
const auto deltay = target.meany - source.meany;
target.co_moment =
source.co_moment + target.co_moment + deltax * deltay * source.count * target.count / count;
target.meanx = meanx;
target.meany = meany;
target.count = count;
}
}
static bool IgnoreNull() {
return true;
}
};
struct UDFCovarPopOperation : public UDFCovarOperation {
template <class T, class STATE>
static void Finalize(STATE &state, T &target, AggregateFinalizeData &finalize_data) {
if (state.count == 0) {
finalize_data.ReturnNull();
} else {
target = state.co_moment / state.count;
}
}
};
// UDFSum function based on "src/function/aggregate/distributive/sum.cpp"
//------------------ UDFSum --------------------------------//
struct UDFSum {
typedef struct {
double value;
bool isset;
} sum_state_t;
template <class STATE>
static idx_t StateSize(const AggregateFunction &function) {
return sizeof(STATE);
}
template <class STATE>
static void Initialize(const AggregateFunction &function, data_ptr_t state) {
((STATE *)state)->value = 0;
((STATE *)state)->isset = false;
}
template <class INPUT_TYPE, class STATE>
static void Operation(STATE *state, AggregateInputData &, const INPUT_TYPE *input, idx_t idx) {
state->isset = true;
state->value += input[idx];
}
template <class INPUT_TYPE, class STATE>
static void ConstantOperation(STATE *state, AggregateInputData &, const INPUT_TYPE *input, idx_t count) {
state->isset = true;
state->value += (INPUT_TYPE)input[0] * (INPUT_TYPE)count;
}
template <class STATE_TYPE, class INPUT_TYPE>
static void Update(Vector inputs[], AggregateInputData &aggr_input_data, idx_t input_count, Vector &states,
idx_t count) {
D_ASSERT(input_count == 1);
if (inputs[0].GetVectorType() == VectorType::CONSTANT_VECTOR &&
states.GetVectorType() == VectorType::CONSTANT_VECTOR) {
if (ConstantVector::IsNull(inputs[0])) {
// constant NULL input in function that ignores NULL values
return;
}
// regular constant: get first state
auto idata = ConstantVector::GetData<INPUT_TYPE>(inputs[0]);
auto sdata = ConstantVector::GetData<STATE_TYPE *>(states);
UDFSum::ConstantOperation<INPUT_TYPE, STATE_TYPE>(*sdata, aggr_input_data, idata, count);
} else {
inputs[0].Flatten(input_count);
auto idata = FlatVector::GetData<INPUT_TYPE>(inputs[0]);
auto sdata = FlatVector::GetData<STATE_TYPE *>(states);
auto mask = FlatVector::Validity(inputs[0]);
if (!mask.AllValid()) {
// potential NULL values and NULL values are ignored
for (idx_t i = 0; i < count; i++) {
if (mask.RowIsValid(i)) {
UDFSum::Operation<INPUT_TYPE, STATE_TYPE>(sdata[i], aggr_input_data, idata, i);
}
}
} else {
// quick path: no NULL values or NULL values are not ignored
for (idx_t i = 0; i < count; i++) {
UDFSum::Operation<INPUT_TYPE, STATE_TYPE>(sdata[i], aggr_input_data, idata, i);
}
}
}
}
template <class STATE_TYPE, class INPUT_TYPE>
static void SimpleUpdate(Vector inputs[], AggregateInputData &aggr_input_data, idx_t input_count, data_ptr_t state,
idx_t count) {
D_ASSERT(input_count == 1);
switch (inputs[0].GetVectorType()) {
case VectorType::CONSTANT_VECTOR: {
if (ConstantVector::IsNull(inputs[0])) {
return;
}
auto idata = ConstantVector::GetData<INPUT_TYPE>(inputs[0]);
UDFSum::ConstantOperation<INPUT_TYPE, STATE_TYPE>((STATE_TYPE *)state, aggr_input_data, idata, count);
break;
}
default: {
inputs[0].Flatten(count);
auto idata = FlatVector::GetData<INPUT_TYPE>(inputs[0]);
auto &mask = FlatVector::Validity(inputs[0]);
if (!mask.AllValid()) {
// potential NULL values and NULL values are ignored
for (idx_t i = 0; i < count; i++) {
if (mask.RowIsValid(i)) {
UDFSum::Operation<INPUT_TYPE, STATE_TYPE>((STATE_TYPE *)state, aggr_input_data, idata, i);
}
}
} else {
// quick path: no NULL values or NULL values are not ignored
for (idx_t i = 0; i < count; i++) {
UDFSum::Operation<INPUT_TYPE, STATE_TYPE>((STATE_TYPE *)state, aggr_input_data, idata, i);
}
}
break;
}
}
}
template <class STATE_TYPE>
static void Combine(Vector &source, Vector &target, AggregateInputData &, idx_t count) {
D_ASSERT(source.GetType().id() == LogicalTypeId::POINTER && target.GetType().id() == LogicalTypeId::POINTER);
auto sdata = FlatVector::GetData<const STATE_TYPE *>(source);
auto tdata = FlatVector::GetData<STATE_TYPE *>(target);
// OP::template Combine<STATE_TYPE, OP>(*sdata[i], tdata[i]);
for (idx_t i = 0; i < count; i++) {
if (!sdata[i]->isset) {
// source is NULL, nothing to do
return;
}
if (!tdata[i]->isset) {
// target is NULL, use source value directly
*tdata[i] = *sdata[i];
} else {
// else perform the operation
tdata[i]->value += sdata[i]->value;
}
}
}
template <class STATE_TYPE, class RESULT_TYPE>
static void Finalize(Vector &states, AggregateInputData &, Vector &result, idx_t count, idx_t offset) {
if (states.GetVectorType() == VectorType::CONSTANT_VECTOR) {
result.SetVectorType(VectorType::CONSTANT_VECTOR);
auto sdata = ConstantVector::GetData<STATE_TYPE *>(states);
auto rdata = ConstantVector::GetData<RESULT_TYPE>(result);
UDFSum::Finalize<RESULT_TYPE, STATE_TYPE>(result, *sdata, rdata, ConstantVector::Validity(result), 0);
} else {
D_ASSERT(states.GetVectorType() == VectorType::FLAT_VECTOR);
result.SetVectorType(VectorType::FLAT_VECTOR);
auto sdata = FlatVector::GetData<STATE_TYPE *>(states);
auto rdata = FlatVector::GetData<RESULT_TYPE>(result);
for (idx_t i = 0; i < count; i++) {
UDFSum::Finalize<RESULT_TYPE, STATE_TYPE>(result, sdata[i], rdata, FlatVector::Validity(result),
i + offset);
}
}
}
template <class T, class STATE>
static void Finalize(Vector &result, STATE *state, T *target, ValidityMask &mask, idx_t idx) {
if (!state->isset) {
mask.SetInvalid(idx);
} else {
target[idx] = state->value;
}
}
}; // end UDFSum
} // namespace duckdb