diff --git a/.github/workflows/arrow.yml b/.github/workflows/arrow.yml index 48fdd9ba079..c3d2de82058 100644 --- a/.github/workflows/arrow.yml +++ b/.github/workflows/arrow.yml @@ -64,6 +64,8 @@ jobs: cargo run --example dynamic_types cargo run --example read_csv cargo run --example read_csv_infer_schema + - name: Run non-archery based integration-tests + run: cargo test -p arrow-integration-testing # test compilaton features linux-features: diff --git a/arrow-flight/src/arrow.flight.protocol.rs b/arrow-flight/src/arrow.flight.protocol.rs index 2b085d6d1f6..72066af20e9 100644 --- a/arrow-flight/src/arrow.flight.protocol.rs +++ b/arrow-flight/src/arrow.flight.protocol.rs @@ -1,31 +1,31 @@ // This file was automatically generated through the build.rs script, and should not be edited. /// -/// The request that a client provides to a server on handshake. +/// The request that a client provides to a server on handshake. #[derive(Clone, PartialEq, ::prost::Message)] pub struct HandshakeRequest { /// - /// A defined protocol version + /// A defined protocol version #[prost(uint64, tag="1")] pub protocol_version: u64, /// - /// Arbitrary auth/handshake info. + /// Arbitrary auth/handshake info. #[prost(bytes="vec", tag="2")] pub payload: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct HandshakeResponse { /// - /// A defined protocol version + /// A defined protocol version #[prost(uint64, tag="1")] pub protocol_version: u64, /// - /// Arbitrary auth/handshake info. + /// Arbitrary auth/handshake info. #[prost(bytes="vec", tag="2")] pub payload: ::prost::alloc::vec::Vec, } /// -/// A message for doing simple auth. +/// A message for doing simple auth. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BasicAuth { #[prost(string, tag="2")] @@ -37,8 +37,8 @@ pub struct BasicAuth { pub struct Empty { } /// -/// Describes an available action, including both the name used for execution -/// along with a short description of the purpose of the action. +/// Describes an available action, including both the name used for execution +/// along with a short description of the purpose of the action. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionType { #[prost(string, tag="1")] @@ -47,15 +47,15 @@ pub struct ActionType { pub description: ::prost::alloc::string::String, } /// -/// A service specific expression that can be used to return a limited set -/// of available Arrow Flight streams. +/// A service specific expression that can be used to return a limited set +/// of available Arrow Flight streams. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Criteria { #[prost(bytes="vec", tag="1")] pub expression: ::prost::alloc::vec::Vec, } /// -/// An opaque action specific for the service. +/// An opaque action specific for the service. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Action { #[prost(string, tag="1")] @@ -64,54 +64,54 @@ pub struct Action { pub body: ::prost::alloc::vec::Vec, } /// -/// An opaque result returned after executing an action. +/// An opaque result returned after executing an action. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Result { #[prost(bytes="vec", tag="1")] pub body: ::prost::alloc::vec::Vec, } /// -/// Wrap the result of a getSchema call +/// Wrap the result of a getSchema call #[derive(Clone, PartialEq, ::prost::Message)] pub struct SchemaResult { - /// schema of the dataset as described in Schema.fbs::Schema. + /// schema of the dataset as described in Schema.fbs::Schema. #[prost(bytes="vec", tag="1")] pub schema: ::prost::alloc::vec::Vec, } /// -/// The name or tag for a Flight. May be used as a way to retrieve or generate -/// a flight or be used to expose a set of previously defined flights. +/// The name or tag for a Flight. May be used as a way to retrieve or generate +/// a flight or be used to expose a set of previously defined flights. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FlightDescriptor { #[prost(enumeration="flight_descriptor::DescriptorType", tag="1")] pub r#type: i32, /// - /// Opaque value used to express a command. Should only be defined when - /// type = CMD. + /// Opaque value used to express a command. Should only be defined when + /// type = CMD. #[prost(bytes="vec", tag="2")] pub cmd: ::prost::alloc::vec::Vec, /// - /// List of strings identifying a particular dataset. Should only be defined - /// when type = PATH. + /// List of strings identifying a particular dataset. Should only be defined + /// when type = PATH. #[prost(string, repeated, tag="3")] pub path: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Nested message and enum types in `FlightDescriptor`. pub mod flight_descriptor { /// - /// Describes what type of descriptor is defined. + /// Describes what type of descriptor is defined. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DescriptorType { - /// Protobuf pattern, not used. + /// Protobuf pattern, not used. Unknown = 0, /// - /// A named path that identifies a dataset. A path is composed of a string - /// or list of strings describing a particular dataset. This is conceptually + /// A named path that identifies a dataset. A path is composed of a string + /// or list of strings describing a particular dataset. This is conceptually /// similar to a path inside a filesystem. Path = 1, /// - /// An opaque command to generate a dataset. + /// An opaque command to generate a dataset. Cmd = 2, } impl DescriptorType { @@ -129,86 +129,86 @@ pub mod flight_descriptor { } } /// -/// The access coordinates for retrieval of a dataset. With a FlightInfo, a -/// consumer is able to determine how to retrieve a dataset. +/// The access coordinates for retrieval of a dataset. With a FlightInfo, a +/// consumer is able to determine how to retrieve a dataset. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FlightInfo { - /// schema of the dataset as described in Schema.fbs::Schema. + /// schema of the dataset as described in Schema.fbs::Schema. #[prost(bytes="vec", tag="1")] pub schema: ::prost::alloc::vec::Vec, /// - /// The descriptor associated with this info. + /// The descriptor associated with this info. #[prost(message, optional, tag="2")] pub flight_descriptor: ::core::option::Option, /// - /// A list of endpoints associated with the flight. To consume the whole - /// flight, all endpoints must be consumed. + /// A list of endpoints associated with the flight. To consume the whole + /// flight, all endpoints must be consumed. #[prost(message, repeated, tag="3")] pub endpoint: ::prost::alloc::vec::Vec, - /// Set these to -1 if unknown. + /// Set these to -1 if unknown. #[prost(int64, tag="4")] pub total_records: i64, #[prost(int64, tag="5")] pub total_bytes: i64, } /// -/// A particular stream or split associated with a flight. +/// A particular stream or split associated with a flight. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FlightEndpoint { /// - /// Token used to retrieve this stream. + /// Token used to retrieve this stream. #[prost(message, optional, tag="1")] pub ticket: ::core::option::Option, /// - /// A list of URIs where this ticket can be redeemed. If the list is - /// empty, the expectation is that the ticket can only be redeemed on the - /// current service where the ticket was generated. + /// A list of URIs where this ticket can be redeemed. If the list is + /// empty, the expectation is that the ticket can only be redeemed on the + /// current service where the ticket was generated. #[prost(message, repeated, tag="2")] pub location: ::prost::alloc::vec::Vec, } /// -/// A location where a Flight service will accept retrieval of a particular -/// stream given a ticket. +/// A location where a Flight service will accept retrieval of a particular +/// stream given a ticket. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Location { #[prost(string, tag="1")] pub uri: ::prost::alloc::string::String, } /// -/// An opaque identifier that the service can use to retrieve a particular -/// portion of a stream. +/// An opaque identifier that the service can use to retrieve a particular +/// portion of a stream. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ticket { #[prost(bytes="vec", tag="1")] pub ticket: ::prost::alloc::vec::Vec, } /// -/// A batch of Arrow data as part of a stream of batches. +/// A batch of Arrow data as part of a stream of batches. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FlightData { /// - /// The descriptor of the data. This is only relevant when a client is - /// starting a new DoPut stream. + /// The descriptor of the data. This is only relevant when a client is + /// starting a new DoPut stream. #[prost(message, optional, tag="1")] pub flight_descriptor: ::core::option::Option, /// - /// Header for message data as described in Message.fbs::Message. + /// Header for message data as described in Message.fbs::Message. #[prost(bytes="vec", tag="2")] pub data_header: ::prost::alloc::vec::Vec, /// - /// Application-defined metadata. + /// Application-defined metadata. #[prost(bytes="vec", tag="3")] pub app_metadata: ::prost::alloc::vec::Vec, /// - /// The actual batch of Arrow data. Preferably handled with minimal-copies - /// coming last in the definition to help with sidecar patterns (it is - /// expected that some implementations will fetch this field off the wire - /// with specialized code to avoid extra memory copies). + /// The actual batch of Arrow data. Preferably handled with minimal-copies + /// coming last in the definition to help with sidecar patterns (it is + /// expected that some implementations will fetch this field off the wire + /// with specialized code to avoid extra memory copies). #[prost(bytes="vec", tag="1000")] pub data_body: ::prost::alloc::vec::Vec, } /// * -/// The response message associated with the submission of a DoPut. +/// The response message associated with the submission of a DoPut. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PutResult { #[prost(bytes="vec", tag="1")] diff --git a/arrow-flight/src/sql/arrow.flight.protocol.sql.rs b/arrow-flight/src/sql/arrow.flight.protocol.sql.rs index 77221dd1a48..b52aa467a61 100644 --- a/arrow-flight/src/sql/arrow.flight.protocol.sql.rs +++ b/arrow-flight/src/sql/arrow.flight.protocol.sql.rs @@ -1,13 +1,13 @@ // This file was automatically generated through the build.rs script, and should not be edited. /// -/// Represents a metadata request. Used in the command member of FlightDescriptor -/// for the following RPC calls: +/// Represents a metadata request. Used in the command member of FlightDescriptor +/// for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// info_name: uint32 not null, /// value: dense_union< /// string_value: utf8, @@ -16,185 +16,185 @@ /// int32_bitmask: int32, /// string_list: list /// int32_to_int32_list_map: map> -/// > -/// where there is one row per requested piece of metadata information. +/// > +/// where there is one row per requested piece of metadata information. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetSqlInfo { /// - /// Values are modelled after ODBC's SQLGetInfo() function. This information is intended to provide - /// Flight SQL clients with basic, SQL syntax and SQL functions related information. - /// More information types can be added in future releases. - /// E.g. more SQL syntax support types, scalar functions support, type conversion support etc. + /// Values are modelled after ODBC's SQLGetInfo() function. This information is intended to provide + /// Flight SQL clients with basic, SQL syntax and SQL functions related information. + /// More information types can be added in future releases. + /// E.g. more SQL syntax support types, scalar functions support, type conversion support etc. /// - /// Note that the set of metadata may expand. + /// Note that the set of metadata may expand. /// - /// Initially, Flight SQL will support the following information types: - /// - Server Information - Range [0-500) - /// - Syntax Information - Range [500-1000) - /// Range [0-10,000) is reserved for defaults (see SqlInfo enum for default options). - /// Custom options should start at 10,000. + /// Initially, Flight SQL will support the following information types: + /// - Server Information - Range [0-500) + /// - Syntax Information - Range [500-1000) + /// Range [0-10,000) is reserved for defaults (see SqlInfo enum for default options). + /// Custom options should start at 10,000. /// - /// If omitted, then all metadata will be retrieved. - /// Flight SQL Servers may choose to include additional metadata above and beyond the specified set, however they must - /// at least return the specified set. IDs ranging from 0 to 10,000 (exclusive) are reserved for future use. - /// If additional metadata is included, the metadata IDs should start from 10,000. + /// If omitted, then all metadata will be retrieved. + /// Flight SQL Servers may choose to include additional metadata above and beyond the specified set, however they must + /// at least return the specified set. IDs ranging from 0 to 10,000 (exclusive) are reserved for future use. + /// If additional metadata is included, the metadata IDs should start from 10,000. #[prost(uint32, repeated, tag="1")] pub info: ::prost::alloc::vec::Vec, } /// -/// Represents a request to retrieve the list of catalogs on a Flight SQL enabled backend. -/// The definition of a catalog depends on vendor/implementation. It is usually the database itself -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the list of catalogs on a Flight SQL enabled backend. +/// The definition of a catalog depends on vendor/implementation. It is usually the database itself +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// catalog_name: utf8 not null -/// > -/// The returned data should be ordered by catalog_name. +/// > +/// The returned data should be ordered by catalog_name. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetCatalogs { } /// -/// Represents a request to retrieve the list of database schemas on a Flight SQL enabled backend. -/// The definition of a database schema depends on vendor/implementation. It is usually a collection of tables. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the list of database schemas on a Flight SQL enabled backend. +/// The definition of a database schema depends on vendor/implementation. It is usually a collection of tables. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// catalog_name: utf8, /// db_schema_name: utf8 not null -/// > -/// The returned data should be ordered by catalog_name, then db_schema_name. +/// > +/// The returned data should be ordered by catalog_name, then db_schema_name. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetDbSchemas { /// - /// Specifies the Catalog to search for the tables. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// Specifies the Catalog to search for the tables. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub catalog: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies a filter pattern for schemas to search for. - /// When no db_schema_filter_pattern is provided, the pattern will not be used to narrow the search. - /// In the pattern string, two special characters can be used to denote matching rules: + /// Specifies a filter pattern for schemas to search for. + /// When no db_schema_filter_pattern is provided, the pattern will not be used to narrow the search. + /// In the pattern string, two special characters can be used to denote matching rules: /// - "%" means to match any substring with 0 or more characters. /// - "_" means to match any one character. #[prost(string, optional, tag="2")] pub db_schema_filter_pattern: ::core::option::Option<::prost::alloc::string::String>, } /// -/// Represents a request to retrieve the list of tables, and optionally their schemas, on a Flight SQL enabled backend. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the list of tables, and optionally their schemas, on a Flight SQL enabled backend. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// catalog_name: utf8, /// db_schema_name: utf8, /// table_name: utf8 not null, /// table_type: utf8 not null, /// \[optional\] table_schema: bytes not null (schema of the table as described in Schema.fbs::Schema, /// it is serialized as an IPC message.) -/// > -/// The returned data should be ordered by catalog_name, db_schema_name, table_name, then table_type, followed by table_schema if requested. +/// > +/// The returned data should be ordered by catalog_name, db_schema_name, table_name, then table_type, followed by table_schema if requested. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetTables { /// - /// Specifies the Catalog to search for the tables. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// Specifies the Catalog to search for the tables. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub catalog: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies a filter pattern for schemas to search for. - /// When no db_schema_filter_pattern is provided, all schemas matching other filters are searched. - /// In the pattern string, two special characters can be used to denote matching rules: + /// Specifies a filter pattern for schemas to search for. + /// When no db_schema_filter_pattern is provided, all schemas matching other filters are searched. + /// In the pattern string, two special characters can be used to denote matching rules: /// - "%" means to match any substring with 0 or more characters. /// - "_" means to match any one character. #[prost(string, optional, tag="2")] pub db_schema_filter_pattern: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies a filter pattern for tables to search for. - /// When no table_name_filter_pattern is provided, all tables matching other filters are searched. - /// In the pattern string, two special characters can be used to denote matching rules: + /// Specifies a filter pattern for tables to search for. + /// When no table_name_filter_pattern is provided, all tables matching other filters are searched. + /// In the pattern string, two special characters can be used to denote matching rules: /// - "%" means to match any substring with 0 or more characters. /// - "_" means to match any one character. #[prost(string, optional, tag="3")] pub table_name_filter_pattern: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies a filter of table types which must match. - /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. - /// TABLE, VIEW, and SYSTEM TABLE are commonly supported. + /// Specifies a filter of table types which must match. + /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. + /// TABLE, VIEW, and SYSTEM TABLE are commonly supported. #[prost(string, repeated, tag="4")] pub table_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Specifies if the Arrow schema should be returned for found tables. + /// Specifies if the Arrow schema should be returned for found tables. #[prost(bool, tag="5")] pub include_schema: bool, } /// -/// Represents a request to retrieve the list of table types on a Flight SQL enabled backend. -/// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. -/// TABLE, VIEW, and SYSTEM TABLE are commonly supported. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the list of table types on a Flight SQL enabled backend. +/// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables. +/// TABLE, VIEW, and SYSTEM TABLE are commonly supported. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// table_type: utf8 not null -/// > -/// The returned data should be ordered by table_type. +/// > +/// The returned data should be ordered by table_type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetTableTypes { } /// -/// Represents a request to retrieve the primary keys of a table on a Flight SQL enabled backend. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the primary keys of a table on a Flight SQL enabled backend. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// catalog_name: utf8, /// db_schema_name: utf8, /// table_name: utf8 not null, /// column_name: utf8 not null, /// key_name: utf8, /// key_sequence: int not null -/// > -/// The returned data should be ordered by catalog_name, db_schema_name, table_name, key_name, then key_sequence. +/// > +/// The returned data should be ordered by catalog_name, db_schema_name, table_name, key_name, then key_sequence. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetPrimaryKeys { /// - /// Specifies the catalog to search for the table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// Specifies the catalog to search for the table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub catalog: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies the schema to search for the table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. + /// Specifies the schema to search for the table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. #[prost(string, optional, tag="2")] pub db_schema: ::core::option::Option<::prost::alloc::string::String>, - /// Specifies the table to get the primary keys for. + /// Specifies the table to get the primary keys for. #[prost(string, tag="3")] pub table: ::prost::alloc::string::String, } /// -/// Represents a request to retrieve a description of the foreign key columns that reference the given table's -/// primary key columns (the foreign keys exported by a table) of a table on a Flight SQL enabled backend. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve a description of the foreign key columns that reference the given table's +/// primary key columns (the foreign keys exported by a table) of a table on a Flight SQL enabled backend. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// pk_catalog_name: utf8, /// pk_db_schema_name: utf8, /// pk_table_name: utf8 not null, @@ -208,35 +208,35 @@ pub struct CommandGetPrimaryKeys { /// pk_key_name: utf8, /// update_rule: uint1 not null, /// delete_rule: uint1 not null -/// > -/// The returned data should be ordered by fk_catalog_name, fk_db_schema_name, fk_table_name, fk_key_name, then key_sequence. -/// update_rule and delete_rule returns a byte that is equivalent to actions declared on UpdateDeleteRules enum. +/// > +/// The returned data should be ordered by fk_catalog_name, fk_db_schema_name, fk_table_name, fk_key_name, then key_sequence. +/// update_rule and delete_rule returns a byte that is equivalent to actions declared on UpdateDeleteRules enum. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetExportedKeys { /// - /// Specifies the catalog to search for the foreign key table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// Specifies the catalog to search for the foreign key table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub catalog: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies the schema to search for the foreign key table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. + /// Specifies the schema to search for the foreign key table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. #[prost(string, optional, tag="2")] pub db_schema: ::core::option::Option<::prost::alloc::string::String>, - /// Specifies the foreign key table to get the foreign keys for. + /// Specifies the foreign key table to get the foreign keys for. #[prost(string, tag="3")] pub table: ::prost::alloc::string::String, } /// -/// Represents a request to retrieve the foreign keys of a table on a Flight SQL enabled backend. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve the foreign keys of a table on a Flight SQL enabled backend. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// pk_catalog_name: utf8, /// pk_db_schema_name: utf8, /// pk_table_name: utf8 not null, @@ -250,9 +250,9 @@ pub struct CommandGetExportedKeys { /// pk_key_name: utf8, /// update_rule: uint1 not null, /// delete_rule: uint1 not null -/// > -/// The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. -/// update_rule and delete_rule returns a byte that is equivalent to actions: +/// > +/// The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. +/// update_rule and delete_rule returns a byte that is equivalent to actions: /// - 0 = CASCADE /// - 1 = RESTRICT /// - 2 = SET NULL @@ -261,31 +261,31 @@ pub struct CommandGetExportedKeys { #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetImportedKeys { /// - /// Specifies the catalog to search for the primary key table. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// Specifies the catalog to search for the primary key table. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub catalog: ::core::option::Option<::prost::alloc::string::String>, /// - /// Specifies the schema to search for the primary key table. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. + /// Specifies the schema to search for the primary key table. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. #[prost(string, optional, tag="2")] pub db_schema: ::core::option::Option<::prost::alloc::string::String>, - /// Specifies the primary key table to get the foreign keys for. + /// Specifies the primary key table to get the foreign keys for. #[prost(string, tag="3")] pub table: ::prost::alloc::string::String, } /// -/// Represents a request to retrieve a description of the foreign key columns in the given foreign key table that -/// reference the primary key or the columns representing a unique constraint of the parent table (could be the same -/// or a different table) on a Flight SQL enabled backend. -/// Used in the command member of FlightDescriptor for the following RPC calls: +/// Represents a request to retrieve a description of the foreign key columns in the given foreign key table that +/// reference the primary key or the columns representing a unique constraint of the parent table (could be the same +/// or a different table) on a Flight SQL enabled backend. +/// Used in the command member of FlightDescriptor for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the catalog metadata request. /// -/// The returned Arrow schema will be: -/// < +/// The returned Arrow schema will be: +/// < /// pk_catalog_name: utf8, /// pk_db_schema_name: utf8, /// pk_table_name: utf8 not null, @@ -299,9 +299,9 @@ pub struct CommandGetImportedKeys { /// pk_key_name: utf8, /// update_rule: uint1 not null, /// delete_rule: uint1 not null -/// > -/// The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. -/// update_rule and delete_rule returns a byte that is equivalent to actions: +/// > +/// The returned data should be ordered by pk_catalog_name, pk_db_schema_name, pk_table_name, pk_key_name, then key_sequence. +/// update_rule and delete_rule returns a byte that is equivalent to actions: /// - 0 = CASCADE /// - 1 = RESTRICT /// - 2 = SET NULL @@ -310,697 +310,697 @@ pub struct CommandGetImportedKeys { #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandGetCrossReference { /// * - /// The catalog name where the parent table is. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// The catalog name where the parent table is. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="1")] pub pk_catalog: ::core::option::Option<::prost::alloc::string::String>, /// * - /// The Schema name where the parent table is. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. + /// The Schema name where the parent table is. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. #[prost(string, optional, tag="2")] pub pk_db_schema: ::core::option::Option<::prost::alloc::string::String>, /// * - /// The parent table name. It cannot be null. + /// The parent table name. It cannot be null. #[prost(string, tag="3")] pub pk_table: ::prost::alloc::string::String, /// * - /// The catalog name where the foreign table is. - /// An empty string retrieves those without a catalog. - /// If omitted the catalog name should not be used to narrow the search. + /// The catalog name where the foreign table is. + /// An empty string retrieves those without a catalog. + /// If omitted the catalog name should not be used to narrow the search. #[prost(string, optional, tag="4")] pub fk_catalog: ::core::option::Option<::prost::alloc::string::String>, /// * - /// The schema name where the foreign table is. - /// An empty string retrieves those without a schema. - /// If omitted the schema name should not be used to narrow the search. + /// The schema name where the foreign table is. + /// An empty string retrieves those without a schema. + /// If omitted the schema name should not be used to narrow the search. #[prost(string, optional, tag="5")] pub fk_db_schema: ::core::option::Option<::prost::alloc::string::String>, /// * - /// The foreign table name. It cannot be null. + /// The foreign table name. It cannot be null. #[prost(string, tag="6")] pub fk_table: ::prost::alloc::string::String, } -// SQL Execution Action Messages +// SQL Execution Action Messages /// -/// Request message for the "CreatePreparedStatement" action on a Flight SQL enabled backend. +/// Request message for the "CreatePreparedStatement" action on a Flight SQL enabled backend. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionCreatePreparedStatementRequest { - /// The valid SQL string to create a prepared statement for. + /// The valid SQL string to create a prepared statement for. #[prost(string, tag="1")] pub query: ::prost::alloc::string::String, } /// -/// Wrap the result of a "GetPreparedStatement" action. +/// Wrap the result of a "GetPreparedStatement" action. /// -/// The resultant PreparedStatement can be closed either: -/// - Manually, through the "ClosePreparedStatement" action; -/// - Automatically, by a server timeout. +/// The resultant PreparedStatement can be closed either: +/// - Manually, through the "ClosePreparedStatement" action; +/// - Automatically, by a server timeout. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionCreatePreparedStatementResult { - /// Opaque handle for the prepared statement on the server. + /// Opaque handle for the prepared statement on the server. #[prost(bytes="vec", tag="1")] pub prepared_statement_handle: ::prost::alloc::vec::Vec, - /// If a result set generating query was provided, dataset_schema contains the - /// schema of the dataset as described in Schema.fbs::Schema, it is serialized as an IPC message. + /// If a result set generating query was provided, dataset_schema contains the + /// schema of the dataset as described in Schema.fbs::Schema, it is serialized as an IPC message. #[prost(bytes="vec", tag="2")] pub dataset_schema: ::prost::alloc::vec::Vec, - /// If the query provided contained parameters, parameter_schema contains the - /// schema of the expected parameters as described in Schema.fbs::Schema, it is serialized as an IPC message. + /// If the query provided contained parameters, parameter_schema contains the + /// schema of the expected parameters as described in Schema.fbs::Schema, it is serialized as an IPC message. #[prost(bytes="vec", tag="3")] pub parameter_schema: ::prost::alloc::vec::Vec, } /// -/// Request message for the "ClosePreparedStatement" action on a Flight SQL enabled backend. -/// Closes server resources associated with the prepared statement handle. +/// Request message for the "ClosePreparedStatement" action on a Flight SQL enabled backend. +/// Closes server resources associated with the prepared statement handle. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionClosePreparedStatementRequest { - /// Opaque handle for the prepared statement on the server. + /// Opaque handle for the prepared statement on the server. #[prost(bytes="vec", tag="1")] pub prepared_statement_handle: ::prost::alloc::vec::Vec, } -// SQL Execution Messages. +// SQL Execution Messages. /// -/// Represents a SQL query. Used in the command member of FlightDescriptor -/// for the following RPC calls: +/// Represents a SQL query. Used in the command member of FlightDescriptor +/// for the following RPC calls: /// - GetSchema: return the Arrow schema of the query. /// - GetFlightInfo: execute the query. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandStatementQuery { - /// The SQL syntax. + /// The SQL syntax. #[prost(string, tag="1")] pub query: ::prost::alloc::string::String, } /// * -/// Represents a ticket resulting from GetFlightInfo with a CommandStatementQuery. -/// This should be used only once and treated as an opaque value, that is, clients should not attempt to parse this. +/// Represents a ticket resulting from GetFlightInfo with a CommandStatementQuery. +/// This should be used only once and treated as an opaque value, that is, clients should not attempt to parse this. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TicketStatementQuery { - /// Unique identifier for the instance of the statement to execute. + /// Unique identifier for the instance of the statement to execute. #[prost(bytes="vec", tag="1")] pub statement_handle: ::prost::alloc::vec::Vec, } /// -/// Represents an instance of executing a prepared statement. Used in the command member of FlightDescriptor for -/// the following RPC calls: +/// Represents an instance of executing a prepared statement. Used in the command member of FlightDescriptor for +/// the following RPC calls: /// - DoPut: bind parameter values. All of the bound parameter sets will be executed as a single atomic execution. /// - GetFlightInfo: execute the prepared statement instance. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandPreparedStatementQuery { - /// Opaque handle for the prepared statement on the server. + /// Opaque handle for the prepared statement on the server. #[prost(bytes="vec", tag="1")] pub prepared_statement_handle: ::prost::alloc::vec::Vec, } /// -/// Represents a SQL update query. Used in the command member of FlightDescriptor -/// for the the RPC call DoPut to cause the server to execute the included SQL update. +/// Represents a SQL update query. Used in the command member of FlightDescriptor +/// for the the RPC call DoPut to cause the server to execute the included SQL update. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandStatementUpdate { - /// The SQL syntax. + /// The SQL syntax. #[prost(string, tag="1")] pub query: ::prost::alloc::string::String, } /// -/// Represents a SQL update query. Used in the command member of FlightDescriptor -/// for the the RPC call DoPut to cause the server to execute the included -/// prepared statement handle as an update. +/// Represents a SQL update query. Used in the command member of FlightDescriptor +/// for the the RPC call DoPut to cause the server to execute the included +/// prepared statement handle as an update. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommandPreparedStatementUpdate { - /// Opaque handle for the prepared statement on the server. + /// Opaque handle for the prepared statement on the server. #[prost(bytes="vec", tag="1")] pub prepared_statement_handle: ::prost::alloc::vec::Vec, } /// -/// Returned from the RPC call DoPut when a CommandStatementUpdate -/// CommandPreparedStatementUpdate was in the request, containing -/// results from the update. +/// Returned from the RPC call DoPut when a CommandStatementUpdate +/// CommandPreparedStatementUpdate was in the request, containing +/// results from the update. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DoPutUpdateResult { - /// The number of records updated. A return value of -1 represents - /// an unknown updated record count. + /// The number of records updated. A return value of -1 represents + /// an unknown updated record count. #[prost(int64, tag="1")] pub record_count: i64, } -/// Options for CommandGetSqlInfo. +/// Options for CommandGetSqlInfo. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SqlInfo { - // Server Information [0-500): Provides basic information about the Flight SQL Server. + // Server Information [0-500): Provides basic information about the Flight SQL Server. - /// Retrieves a UTF-8 string with the name of the Flight SQL Server. + /// Retrieves a UTF-8 string with the name of the Flight SQL Server. FlightSqlServerName = 0, - /// Retrieves a UTF-8 string with the native version of the Flight SQL Server. + /// Retrieves a UTF-8 string with the native version of the Flight SQL Server. FlightSqlServerVersion = 1, - /// Retrieves a UTF-8 string with the Arrow format version of the Flight SQL Server. + /// Retrieves a UTF-8 string with the Arrow format version of the Flight SQL Server. FlightSqlServerArrowVersion = 2, - /// - /// Retrieves a boolean value indicating whether the Flight SQL Server is read only. + /// + /// Retrieves a boolean value indicating whether the Flight SQL Server is read only. /// - /// Returns: - /// - false: if read-write - /// - true: if read only + /// Returns: + /// - false: if read-write + /// - true: if read only FlightSqlServerReadOnly = 3, - // SQL Syntax Information [500-1000): provides information about SQL syntax supported by the Flight SQL Server. + // SQL Syntax Information [500-1000): provides information about SQL syntax supported by the Flight SQL Server. /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of catalogs. + /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of catalogs. /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of catalogs. - /// - true: if it supports CREATE and DROP of catalogs. + /// Returns: + /// - false: if it doesn't support CREATE and DROP of catalogs. + /// - true: if it supports CREATE and DROP of catalogs. SqlDdlCatalog = 500, /// - /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of schemas. + /// Retrieves a boolean value indicating whether the Flight SQL Server supports CREATE and DROP of schemas. /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of schemas. - /// - true: if it supports CREATE and DROP of schemas. + /// Returns: + /// - false: if it doesn't support CREATE and DROP of schemas. + /// - true: if it supports CREATE and DROP of schemas. SqlDdlSchema = 501, /// - /// Indicates whether the Flight SQL Server supports CREATE and DROP of tables. + /// Indicates whether the Flight SQL Server supports CREATE and DROP of tables. /// - /// Returns: - /// - false: if it doesn't support CREATE and DROP of tables. - /// - true: if it supports CREATE and DROP of tables. + /// Returns: + /// - false: if it doesn't support CREATE and DROP of tables. + /// - true: if it supports CREATE and DROP of tables. SqlDdlTable = 502, /// - /// Retrieves a uint32 value representing the enu uint32 ordinal for the case sensitivity of catalog, table, schema and table names. + /// Retrieves a uint32 value representing the enu uint32 ordinal for the case sensitivity of catalog, table, schema and table names. /// - /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. SqlIdentifierCase = 503, - /// Retrieves a UTF-8 string with the supported character(s) used to surround a delimited identifier. + /// Retrieves a UTF-8 string with the supported character(s) used to surround a delimited identifier. SqlIdentifierQuoteChar = 504, /// - /// Retrieves a uint32 value representing the enu uint32 ordinal for the case sensitivity of quoted identifiers. + /// Retrieves a uint32 value representing the enu uint32 ordinal for the case sensitivity of quoted identifiers. /// - /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. + /// The possible values are listed in `arrow.flight.protocol.sql.SqlSupportedCaseSensitivity`. SqlQuotedIdentifierCase = 505, /// - /// Retrieves a boolean value indicating whether all tables are selectable. + /// Retrieves a boolean value indicating whether all tables are selectable. /// - /// Returns: - /// - false: if not all tables are selectable or if none are; - /// - true: if all tables are selectable. + /// Returns: + /// - false: if not all tables are selectable or if none are; + /// - true: if all tables are selectable. SqlAllTablesAreSelectable = 506, /// - /// Retrieves the null ordering. + /// Retrieves the null ordering. /// - /// Returns a uint32 ordinal for the null ordering being used, as described in - /// `arrow.flight.protocol.sql.SqlNullOrdering`. + /// Returns a uint32 ordinal for the null ordering being used, as described in + /// `arrow.flight.protocol.sql.SqlNullOrdering`. SqlNullOrdering = 507, - /// Retrieves a UTF-8 string list with values of the supported keywords. + /// Retrieves a UTF-8 string list with values of the supported keywords. SqlKeywords = 508, - /// Retrieves a UTF-8 string list with values of the supported numeric functions. + /// Retrieves a UTF-8 string list with values of the supported numeric functions. SqlNumericFunctions = 509, - /// Retrieves a UTF-8 string list with values of the supported string functions. + /// Retrieves a UTF-8 string list with values of the supported string functions. SqlStringFunctions = 510, - /// Retrieves a UTF-8 string list with values of the supported system functions. + /// Retrieves a UTF-8 string list with values of the supported system functions. SqlSystemFunctions = 511, - /// Retrieves a UTF-8 string list with values of the supported datetime functions. + /// Retrieves a UTF-8 string list with values of the supported datetime functions. SqlDatetimeFunctions = 512, /// - /// Retrieves the UTF-8 string that can be used to escape wildcard characters. - /// This is the string that can be used to escape '_' or '%' in the catalog search parameters that are a pattern - /// (and therefore use one of the wildcard characters). - /// The '_' character represents any single character; the '%' character represents any sequence of zero or more - /// characters. + /// Retrieves the UTF-8 string that can be used to escape wildcard characters. + /// This is the string that can be used to escape '_' or '%' in the catalog search parameters that are a pattern + /// (and therefore use one of the wildcard characters). + /// The '_' character represents any single character; the '%' character represents any sequence of zero or more + /// characters. SqlSearchStringEscape = 513, /// - /// Retrieves a UTF-8 string with all the "extra" characters that can be used in unquoted identifier names - /// (those beyond a-z, A-Z, 0-9 and _). + /// Retrieves a UTF-8 string with all the "extra" characters that can be used in unquoted identifier names + /// (those beyond a-z, A-Z, 0-9 and _). SqlExtraNameCharacters = 514, /// - /// Retrieves a boolean value indicating whether column aliasing is supported. - /// If so, the SQL AS clause can be used to provide names for computed columns or to provide alias names for columns - /// as required. + /// Retrieves a boolean value indicating whether column aliasing is supported. + /// If so, the SQL AS clause can be used to provide names for computed columns or to provide alias names for columns + /// as required. /// - /// Returns: - /// - false: if column aliasing is unsupported; - /// - true: if column aliasing is supported. + /// Returns: + /// - false: if column aliasing is unsupported; + /// - true: if column aliasing is supported. SqlSupportsColumnAliasing = 515, /// - /// Retrieves a boolean value indicating whether concatenations between null and non-null values being - /// null are supported. + /// Retrieves a boolean value indicating whether concatenations between null and non-null values being + /// null are supported. /// - /// - Returns: - /// - false: if concatenations between null and non-null values being null are unsupported; - /// - true: if concatenations between null and non-null values being null are supported. + /// - Returns: + /// - false: if concatenations between null and non-null values being null are unsupported; + /// - true: if concatenations between null and non-null values being null are supported. SqlNullPlusNullIsNull = 516, /// - /// Retrieves a map where the key is the type to convert from and the value is a list with the types to convert to, - /// indicating the supported conversions. Each key and each item on the list value is a value to a predefined type on - /// SqlSupportsConvert enum. - /// The returned map will be: map> + /// Retrieves a map where the key is the type to convert from and the value is a list with the types to convert to, + /// indicating the supported conversions. Each key and each item on the list value is a value to a predefined type on + /// SqlSupportsConvert enum. + /// The returned map will be: map> SqlSupportsConvert = 517, /// - /// Retrieves a boolean value indicating whether, when table correlation names are supported, - /// they are restricted to being different from the names of the tables. + /// Retrieves a boolean value indicating whether, when table correlation names are supported, + /// they are restricted to being different from the names of the tables. /// - /// Returns: - /// - false: if table correlation names are unsupported; - /// - true: if table correlation names are supported. + /// Returns: + /// - false: if table correlation names are unsupported; + /// - true: if table correlation names are supported. SqlSupportsTableCorrelationNames = 518, /// - /// Retrieves a boolean value indicating whether, when table correlation names are supported, - /// they are restricted to being different from the names of the tables. + /// Retrieves a boolean value indicating whether, when table correlation names are supported, + /// they are restricted to being different from the names of the tables. /// - /// Returns: - /// - false: if different table correlation names are unsupported; - /// - true: if different table correlation names are supported + /// Returns: + /// - false: if different table correlation names are unsupported; + /// - true: if different table correlation names are supported SqlSupportsDifferentTableCorrelationNames = 519, /// - /// Retrieves a boolean value indicating whether expressions in ORDER BY lists are supported. + /// Retrieves a boolean value indicating whether expressions in ORDER BY lists are supported. /// - /// Returns: - /// - false: if expressions in ORDER BY are unsupported; - /// - true: if expressions in ORDER BY are supported; + /// Returns: + /// - false: if expressions in ORDER BY are unsupported; + /// - true: if expressions in ORDER BY are supported; SqlSupportsExpressionsInOrderBy = 520, /// - /// Retrieves a boolean value indicating whether using a column that is not in the SELECT statement in a GROUP BY - /// clause is supported. + /// Retrieves a boolean value indicating whether using a column that is not in the SELECT statement in a GROUP BY + /// clause is supported. /// - /// Returns: - /// - false: if using a column that is not in the SELECT statement in a GROUP BY clause is unsupported; - /// - true: if using a column that is not in the SELECT statement in a GROUP BY clause is supported. + /// Returns: + /// - false: if using a column that is not in the SELECT statement in a GROUP BY clause is unsupported; + /// - true: if using a column that is not in the SELECT statement in a GROUP BY clause is supported. SqlSupportsOrderByUnrelated = 521, /// - /// Retrieves the supported GROUP BY commands; + /// Retrieves the supported GROUP BY commands; /// - /// Returns an int32 bitmask value representing the supported commands. - /// The returned bitmask should be parsed in order to retrieve the supported commands. + /// Returns an int32 bitmask value representing the supported commands. + /// The returned bitmask should be parsed in order to retrieve the supported commands. /// - /// For instance: - /// - return 0 (\b0) => [] (GROUP BY is unsupported); - /// - return 1 (\b1) => \[SQL_GROUP_BY_UNRELATED\]; - /// - return 2 (\b10) => \[SQL_GROUP_BY_BEYOND_SELECT\]; - /// - return 3 (\b11) => [SQL_GROUP_BY_UNRELATED, SQL_GROUP_BY_BEYOND_SELECT]. - /// Valid GROUP BY types are described under `arrow.flight.protocol.sql.SqlSupportedGroupBy`. + /// For instance: + /// - return 0 (\b0) => [] (GROUP BY is unsupported); + /// - return 1 (\b1) => \[SQL_GROUP_BY_UNRELATED\]; + /// - return 2 (\b10) => \[SQL_GROUP_BY_BEYOND_SELECT\]; + /// - return 3 (\b11) => [SQL_GROUP_BY_UNRELATED, SQL_GROUP_BY_BEYOND_SELECT]. + /// Valid GROUP BY types are described under `arrow.flight.protocol.sql.SqlSupportedGroupBy`. SqlSupportedGroupBy = 522, /// - /// Retrieves a boolean value indicating whether specifying a LIKE escape clause is supported. + /// Retrieves a boolean value indicating whether specifying a LIKE escape clause is supported. /// - /// Returns: - /// - false: if specifying a LIKE escape clause is unsupported; - /// - true: if specifying a LIKE escape clause is supported. + /// Returns: + /// - false: if specifying a LIKE escape clause is unsupported; + /// - true: if specifying a LIKE escape clause is supported. SqlSupportsLikeEscapeClause = 523, /// - /// Retrieves a boolean value indicating whether columns may be defined as non-nullable. + /// Retrieves a boolean value indicating whether columns may be defined as non-nullable. /// - /// Returns: - /// - false: if columns cannot be defined as non-nullable; - /// - true: if columns may be defined as non-nullable. + /// Returns: + /// - false: if columns cannot be defined as non-nullable; + /// - true: if columns may be defined as non-nullable. SqlSupportsNonNullableColumns = 524, /// - /// Retrieves the supported SQL grammar level as per the ODBC specification. - /// - /// Returns an int32 bitmask value representing the supported SQL grammar level. - /// The returned bitmask should be parsed in order to retrieve the supported grammar levels. - /// - /// For instance: - /// - return 0 (\b0) => [] (SQL grammar is unsupported); - /// - return 1 (\b1) => \[SQL_MINIMUM_GRAMMAR\]; - /// - return 2 (\b10) => \[SQL_CORE_GRAMMAR\]; - /// - return 3 (\b11) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR]; - /// - return 4 (\b100) => \[SQL_EXTENDED_GRAMMAR\]; - /// - return 5 (\b101) => [SQL_MINIMUM_GRAMMAR, SQL_EXTENDED_GRAMMAR]; - /// - return 6 (\b110) => [SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]; - /// - return 7 (\b111) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]. - /// Valid SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedSqlGrammar`. + /// Retrieves the supported SQL grammar level as per the ODBC specification. + /// + /// Returns an int32 bitmask value representing the supported SQL grammar level. + /// The returned bitmask should be parsed in order to retrieve the supported grammar levels. + /// + /// For instance: + /// - return 0 (\b0) => [] (SQL grammar is unsupported); + /// - return 1 (\b1) => \[SQL_MINIMUM_GRAMMAR\]; + /// - return 2 (\b10) => \[SQL_CORE_GRAMMAR\]; + /// - return 3 (\b11) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR]; + /// - return 4 (\b100) => \[SQL_EXTENDED_GRAMMAR\]; + /// - return 5 (\b101) => [SQL_MINIMUM_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + /// - return 6 (\b110) => [SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]; + /// - return 7 (\b111) => [SQL_MINIMUM_GRAMMAR, SQL_CORE_GRAMMAR, SQL_EXTENDED_GRAMMAR]. + /// Valid SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedSqlGrammar`. SqlSupportedGrammar = 525, /// - /// Retrieves the supported ANSI92 SQL grammar level. - /// - /// Returns an int32 bitmask value representing the supported ANSI92 SQL grammar level. - /// The returned bitmask should be parsed in order to retrieve the supported commands. - /// - /// For instance: - /// - return 0 (\b0) => [] (ANSI92 SQL grammar is unsupported); - /// - return 1 (\b1) => \[ANSI92_ENTRY_SQL\]; - /// - return 2 (\b10) => \[ANSI92_INTERMEDIATE_SQL\]; - /// - return 3 (\b11) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL]; - /// - return 4 (\b100) => \[ANSI92_FULL_SQL\]; - /// - return 5 (\b101) => [ANSI92_ENTRY_SQL, ANSI92_FULL_SQL]; - /// - return 6 (\b110) => [ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]; - /// - return 7 (\b111) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]. - /// Valid ANSI92 SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel`. + /// Retrieves the supported ANSI92 SQL grammar level. + /// + /// Returns an int32 bitmask value representing the supported ANSI92 SQL grammar level. + /// The returned bitmask should be parsed in order to retrieve the supported commands. + /// + /// For instance: + /// - return 0 (\b0) => [] (ANSI92 SQL grammar is unsupported); + /// - return 1 (\b1) => \[ANSI92_ENTRY_SQL\]; + /// - return 2 (\b10) => \[ANSI92_INTERMEDIATE_SQL\]; + /// - return 3 (\b11) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL]; + /// - return 4 (\b100) => \[ANSI92_FULL_SQL\]; + /// - return 5 (\b101) => [ANSI92_ENTRY_SQL, ANSI92_FULL_SQL]; + /// - return 6 (\b110) => [ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]; + /// - return 7 (\b111) => [ANSI92_ENTRY_SQL, ANSI92_INTERMEDIATE_SQL, ANSI92_FULL_SQL]. + /// Valid ANSI92 SQL grammar levels are described under `arrow.flight.protocol.sql.SupportedAnsi92SqlGrammarLevel`. SqlAnsi92SupportedLevel = 526, /// - /// Retrieves a boolean value indicating whether the SQL Integrity Enhancement Facility is supported. + /// Retrieves a boolean value indicating whether the SQL Integrity Enhancement Facility is supported. /// - /// Returns: - /// - false: if the SQL Integrity Enhancement Facility is supported; - /// - true: if the SQL Integrity Enhancement Facility is supported. + /// Returns: + /// - false: if the SQL Integrity Enhancement Facility is supported; + /// - true: if the SQL Integrity Enhancement Facility is supported. SqlSupportsIntegrityEnhancementFacility = 527, /// - /// Retrieves the support level for SQL OUTER JOINs. + /// Retrieves the support level for SQL OUTER JOINs. /// - /// Returns a uint3 uint32 ordinal for the SQL ordering being used, as described in - /// `arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel`. + /// Returns a uint3 uint32 ordinal for the SQL ordering being used, as described in + /// `arrow.flight.protocol.sql.SqlOuterJoinsSupportLevel`. SqlOuterJoinsSupportLevel = 528, - /// Retrieves a UTF-8 string with the preferred term for "schema". + /// Retrieves a UTF-8 string with the preferred term for "schema". SqlSchemaTerm = 529, - /// Retrieves a UTF-8 string with the preferred term for "procedure". + /// Retrieves a UTF-8 string with the preferred term for "procedure". SqlProcedureTerm = 530, - /// Retrieves a UTF-8 string with the preferred term for "catalog". + /// Retrieves a UTF-8 string with the preferred term for "catalog". SqlCatalogTerm = 531, /// - /// Retrieves a boolean value indicating whether a catalog appears at the start of a fully qualified table name. + /// Retrieves a boolean value indicating whether a catalog appears at the start of a fully qualified table name. /// - /// - false: if a catalog does not appear at the start of a fully qualified table name; - /// - true: if a catalog appears at the start of a fully qualified table name. + /// - false: if a catalog does not appear at the start of a fully qualified table name; + /// - true: if a catalog appears at the start of a fully qualified table name. SqlCatalogAtStart = 532, /// - /// Retrieves the supported actions for a SQL schema. - /// - /// Returns an int32 bitmask value representing the supported actions for a SQL schema. - /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL schema. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported actions for SQL schema); - /// - return 1 (\b1) => \[SQL_ELEMENT_IN_PROCEDURE_CALLS\]; - /// - return 2 (\b10) => \[SQL_ELEMENT_IN_INDEX_DEFINITIONS\]; - /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 4 (\b100) => \[SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS\]; - /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. - /// Valid actions for a SQL schema described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + /// Retrieves the supported actions for a SQL schema. + /// + /// Returns an int32 bitmask value representing the supported actions for a SQL schema. + /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL schema. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported actions for SQL schema); + /// - return 1 (\b1) => \[SQL_ELEMENT_IN_PROCEDURE_CALLS\]; + /// - return 2 (\b10) => \[SQL_ELEMENT_IN_INDEX_DEFINITIONS\]; + /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 4 (\b100) => \[SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS\]; + /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + /// Valid actions for a SQL schema described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. SqlSchemasSupportedActions = 533, /// - /// Retrieves the supported actions for a SQL schema. - /// - /// Returns an int32 bitmask value representing the supported actions for a SQL catalog. - /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL catalog. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported actions for SQL catalog); - /// - return 1 (\b1) => \[SQL_ELEMENT_IN_PROCEDURE_CALLS\]; - /// - return 2 (\b10) => \[SQL_ELEMENT_IN_INDEX_DEFINITIONS\]; - /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; - /// - return 4 (\b100) => \[SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS\]; - /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; - /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. - /// Valid actions for a SQL catalog are described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. + /// Retrieves the supported actions for a SQL schema. + /// + /// Returns an int32 bitmask value representing the supported actions for a SQL catalog. + /// The returned bitmask should be parsed in order to retrieve the supported actions for a SQL catalog. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported actions for SQL catalog); + /// - return 1 (\b1) => \[SQL_ELEMENT_IN_PROCEDURE_CALLS\]; + /// - return 2 (\b10) => \[SQL_ELEMENT_IN_INDEX_DEFINITIONS\]; + /// - return 3 (\b11) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS]; + /// - return 4 (\b100) => \[SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS\]; + /// - return 5 (\b101) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 6 (\b110) => [SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]; + /// - return 7 (\b111) => [SQL_ELEMENT_IN_PROCEDURE_CALLS, SQL_ELEMENT_IN_INDEX_DEFINITIONS, SQL_ELEMENT_IN_PRIVILEGE_DEFINITIONS]. + /// Valid actions for a SQL catalog are described under `arrow.flight.protocol.sql.SqlSupportedElementActions`. SqlCatalogsSupportedActions = 534, /// - /// Retrieves the supported SQL positioned commands. + /// Retrieves the supported SQL positioned commands. /// - /// Returns an int32 bitmask value representing the supported SQL positioned commands. - /// The returned bitmask should be parsed in order to retrieve the supported SQL positioned commands. + /// Returns an int32 bitmask value representing the supported SQL positioned commands. + /// The returned bitmask should be parsed in order to retrieve the supported SQL positioned commands. /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL positioned commands); - /// - return 1 (\b1) => \[SQL_POSITIONED_DELETE\]; - /// - return 2 (\b10) => \[SQL_POSITIONED_UPDATE\]; - /// - return 3 (\b11) => [SQL_POSITIONED_DELETE, SQL_POSITIONED_UPDATE]. - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedPositionedCommands`. + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL positioned commands); + /// - return 1 (\b1) => \[SQL_POSITIONED_DELETE\]; + /// - return 2 (\b10) => \[SQL_POSITIONED_UPDATE\]; + /// - return 3 (\b11) => [SQL_POSITIONED_DELETE, SQL_POSITIONED_UPDATE]. + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedPositionedCommands`. SqlSupportedPositionedCommands = 535, /// - /// Retrieves a boolean value indicating whether SELECT FOR UPDATE statements are supported. + /// Retrieves a boolean value indicating whether SELECT FOR UPDATE statements are supported. /// - /// Returns: - /// - false: if SELECT FOR UPDATE statements are unsupported; - /// - true: if SELECT FOR UPDATE statements are supported. + /// Returns: + /// - false: if SELECT FOR UPDATE statements are unsupported; + /// - true: if SELECT FOR UPDATE statements are supported. SqlSelectForUpdateSupported = 536, /// - /// Retrieves a boolean value indicating whether stored procedure calls that use the stored procedure escape syntax - /// are supported. + /// Retrieves a boolean value indicating whether stored procedure calls that use the stored procedure escape syntax + /// are supported. /// - /// Returns: - /// - false: if stored procedure calls that use the stored procedure escape syntax are unsupported; - /// - true: if stored procedure calls that use the stored procedure escape syntax are supported. + /// Returns: + /// - false: if stored procedure calls that use the stored procedure escape syntax are unsupported; + /// - true: if stored procedure calls that use the stored procedure escape syntax are supported. SqlStoredProceduresSupported = 537, /// - /// Retrieves the supported SQL subqueries. - /// - /// Returns an int32 bitmask value representing the supported SQL subqueries. - /// The returned bitmask should be parsed in order to retrieve the supported SQL subqueries. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL subqueries); - /// - return 1 (\b1) => \[SQL_SUBQUERIES_IN_COMPARISONS\]; - /// - return 2 (\b10) => \[SQL_SUBQUERIES_IN_EXISTS\]; - /// - return 3 (\b11) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS]; - /// - return 4 (\b100) => \[SQL_SUBQUERIES_IN_INS\]; - /// - return 5 (\b101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS]; - /// - return 6 (\b110) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_EXISTS]; - /// - return 7 (\b111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS]; - /// - return 8 (\b1000) => \[SQL_SUBQUERIES_IN_QUANTIFIEDS\]; - /// - return 9 (\b1001) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 10 (\b1010) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 11 (\b1011) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 12 (\b1100) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 13 (\b1101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 14 (\b1110) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - return 15 (\b1111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; - /// - ... - /// Valid SQL subqueries are described under `arrow.flight.protocol.sql.SqlSupportedSubqueries`. + /// Retrieves the supported SQL subqueries. + /// + /// Returns an int32 bitmask value representing the supported SQL subqueries. + /// The returned bitmask should be parsed in order to retrieve the supported SQL subqueries. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL subqueries); + /// - return 1 (\b1) => \[SQL_SUBQUERIES_IN_COMPARISONS\]; + /// - return 2 (\b10) => \[SQL_SUBQUERIES_IN_EXISTS\]; + /// - return 3 (\b11) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS]; + /// - return 4 (\b100) => \[SQL_SUBQUERIES_IN_INS\]; + /// - return 5 (\b101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS]; + /// - return 6 (\b110) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_EXISTS]; + /// - return 7 (\b111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS]; + /// - return 8 (\b1000) => \[SQL_SUBQUERIES_IN_QUANTIFIEDS\]; + /// - return 9 (\b1001) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 10 (\b1010) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 11 (\b1011) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 12 (\b1100) => [SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 13 (\b1101) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 14 (\b1110) => [SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - return 15 (\b1111) => [SQL_SUBQUERIES_IN_COMPARISONS, SQL_SUBQUERIES_IN_EXISTS, SQL_SUBQUERIES_IN_INS, SQL_SUBQUERIES_IN_QUANTIFIEDS]; + /// - ... + /// Valid SQL subqueries are described under `arrow.flight.protocol.sql.SqlSupportedSubqueries`. SqlSupportedSubqueries = 538, /// - /// Retrieves a boolean value indicating whether correlated subqueries are supported. + /// Retrieves a boolean value indicating whether correlated subqueries are supported. /// - /// Returns: - /// - false: if correlated subqueries are unsupported; - /// - true: if correlated subqueries are supported. + /// Returns: + /// - false: if correlated subqueries are unsupported; + /// - true: if correlated subqueries are supported. SqlCorrelatedSubqueriesSupported = 539, /// - /// Retrieves the supported SQL UNIONs. + /// Retrieves the supported SQL UNIONs. /// - /// Returns an int32 bitmask value representing the supported SQL UNIONs. - /// The returned bitmask should be parsed in order to retrieve the supported SQL UNIONs. + /// Returns an int32 bitmask value representing the supported SQL UNIONs. + /// The returned bitmask should be parsed in order to retrieve the supported SQL UNIONs. /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL positioned commands); - /// - return 1 (\b1) => \[SQL_UNION\]; - /// - return 2 (\b10) => \[SQL_UNION_ALL\]; - /// - return 3 (\b11) => [SQL_UNION, SQL_UNION_ALL]. - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedUnions`. + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL positioned commands); + /// - return 1 (\b1) => \[SQL_UNION\]; + /// - return 2 (\b10) => \[SQL_UNION_ALL\]; + /// - return 3 (\b11) => [SQL_UNION, SQL_UNION_ALL]. + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlSupportedUnions`. SqlSupportedUnions = 540, - /// Retrieves a uint32 value representing the maximum number of hex characters allowed in an inline binary literal. + /// Retrieves a uint32 value representing the maximum number of hex characters allowed in an inline binary literal. SqlMaxBinaryLiteralLength = 541, - /// Retrieves a uint32 value representing the maximum number of characters allowed for a character literal. + /// Retrieves a uint32 value representing the maximum number of characters allowed for a character literal. SqlMaxCharLiteralLength = 542, - /// Retrieves a uint32 value representing the maximum number of characters allowed for a column name. + /// Retrieves a uint32 value representing the maximum number of characters allowed for a column name. SqlMaxColumnNameLength = 543, - /// Retrieves a uint32 value representing the the maximum number of columns allowed in a GROUP BY clause. + /// Retrieves a uint32 value representing the the maximum number of columns allowed in a GROUP BY clause. SqlMaxColumnsInGroupBy = 544, - /// Retrieves a uint32 value representing the maximum number of columns allowed in an index. + /// Retrieves a uint32 value representing the maximum number of columns allowed in an index. SqlMaxColumnsInIndex = 545, - /// Retrieves a uint32 value representing the maximum number of columns allowed in an ORDER BY clause. + /// Retrieves a uint32 value representing the maximum number of columns allowed in an ORDER BY clause. SqlMaxColumnsInOrderBy = 546, - /// Retrieves a uint32 value representing the maximum number of columns allowed in a SELECT list. + /// Retrieves a uint32 value representing the maximum number of columns allowed in a SELECT list. SqlMaxColumnsInSelect = 547, - /// Retrieves a uint32 value representing the maximum number of columns allowed in a table. + /// Retrieves a uint32 value representing the maximum number of columns allowed in a table. SqlMaxColumnsInTable = 548, - /// Retrieves a uint32 value representing the maximum number of concurrent connections possible. + /// Retrieves a uint32 value representing the maximum number of concurrent connections possible. SqlMaxConnections = 549, - /// Retrieves a uint32 value the maximum number of characters allowed in a cursor name. + /// Retrieves a uint32 value the maximum number of characters allowed in a cursor name. SqlMaxCursorNameLength = 550, /// - /// Retrieves a uint32 value representing the maximum number of bytes allowed for an index, - /// including all of the parts of the index. + /// Retrieves a uint32 value representing the maximum number of bytes allowed for an index, + /// including all of the parts of the index. SqlMaxIndexLength = 551, - /// Retrieves a uint32 value representing the maximum number of characters allowed in a schema name. + /// Retrieves a uint32 value representing the maximum number of characters allowed in a schema name. SqlDbSchemaNameLength = 552, - /// Retrieves a uint32 value representing the maximum number of characters allowed in a procedure name. + /// Retrieves a uint32 value representing the maximum number of characters allowed in a procedure name. SqlMaxProcedureNameLength = 553, - /// Retrieves a uint32 value representing the maximum number of characters allowed in a catalog name. + /// Retrieves a uint32 value representing the maximum number of characters allowed in a catalog name. SqlMaxCatalogNameLength = 554, - /// Retrieves a uint32 value representing the maximum number of bytes allowed in a single row. + /// Retrieves a uint32 value representing the maximum number of bytes allowed in a single row. SqlMaxRowSize = 555, /// - /// Retrieves a boolean indicating whether the return value for the JDBC method getMaxRowSize includes the SQL - /// data types LONGVARCHAR and LONGVARBINARY. + /// Retrieves a boolean indicating whether the return value for the JDBC method getMaxRowSize includes the SQL + /// data types LONGVARCHAR and LONGVARBINARY. /// - /// Returns: - /// - false: if return value for the JDBC method getMaxRowSize does + /// Returns: + /// - false: if return value for the JDBC method getMaxRowSize does /// not include the SQL data types LONGVARCHAR and LONGVARBINARY; - /// - true: if return value for the JDBC method getMaxRowSize includes + /// - true: if return value for the JDBC method getMaxRowSize includes /// the SQL data types LONGVARCHAR and LONGVARBINARY. SqlMaxRowSizeIncludesBlobs = 556, /// - /// Retrieves a uint32 value representing the maximum number of characters allowed for an SQL statement; - /// a result of 0 (zero) means that there is no limit or the limit is not known. + /// Retrieves a uint32 value representing the maximum number of characters allowed for an SQL statement; + /// a result of 0 (zero) means that there is no limit or the limit is not known. SqlMaxStatementLength = 557, - /// Retrieves a uint32 value representing the maximum number of active statements that can be open at the same time. + /// Retrieves a uint32 value representing the maximum number of active statements that can be open at the same time. SqlMaxStatements = 558, - /// Retrieves a uint32 value representing the maximum number of characters allowed in a table name. + /// Retrieves a uint32 value representing the maximum number of characters allowed in a table name. SqlMaxTableNameLength = 559, - /// Retrieves a uint32 value representing the maximum number of tables allowed in a SELECT statement. + /// Retrieves a uint32 value representing the maximum number of tables allowed in a SELECT statement. SqlMaxTablesInSelect = 560, - /// Retrieves a uint32 value representing the maximum number of characters allowed in a user name. + /// Retrieves a uint32 value representing the maximum number of characters allowed in a user name. SqlMaxUsernameLength = 561, /// - /// Retrieves this database's default transaction isolation level as described in - /// `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + /// Retrieves this database's default transaction isolation level as described in + /// `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. /// - /// Returns a uint32 ordinal for the SQL transaction isolation level. + /// Returns a uint32 ordinal for the SQL transaction isolation level. SqlDefaultTransactionIsolation = 562, /// - /// Retrieves a boolean value indicating whether transactions are supported. If not, invoking the method commit is a - /// noop, and the isolation level is `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. + /// Retrieves a boolean value indicating whether transactions are supported. If not, invoking the method commit is a + /// noop, and the isolation level is `arrow.flight.protocol.sql.SqlTransactionIsolationLevel.TRANSACTION_NONE`. /// - /// Returns: - /// - false: if transactions are unsupported; - /// - true: if transactions are supported. + /// Returns: + /// - false: if transactions are unsupported; + /// - true: if transactions are supported. SqlTransactionsSupported = 563, /// - /// Retrieves the supported transactions isolation levels. - /// - /// Returns an int32 bitmask value representing the supported transactions isolation levels. - /// The returned bitmask should be parsed in order to retrieve the supported transactions isolation levels. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported SQL transactions isolation levels); - /// - return 1 (\b1) => \[SQL_TRANSACTION_NONE\]; - /// - return 2 (\b10) => \[SQL_TRANSACTION_READ_UNCOMMITTED\]; - /// - return 3 (\b11) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED]; - /// - return 4 (\b100) => \[SQL_TRANSACTION_REPEATABLE_READ\]; - /// - return 5 (\b101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 6 (\b110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 7 (\b111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 8 (\b1000) => \[SQL_TRANSACTION_REPEATABLE_READ\]; - /// - return 9 (\b1001) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 10 (\b1010) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 11 (\b1011) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 12 (\b1100) => [SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 13 (\b1101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 14 (\b1110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 15 (\b1111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; - /// - return 16 (\b10000) => \[SQL_TRANSACTION_SERIALIZABLE\]; - /// - ... - /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. + /// Retrieves the supported transactions isolation levels. + /// + /// Returns an int32 bitmask value representing the supported transactions isolation levels. + /// The returned bitmask should be parsed in order to retrieve the supported transactions isolation levels. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported SQL transactions isolation levels); + /// - return 1 (\b1) => \[SQL_TRANSACTION_NONE\]; + /// - return 2 (\b10) => \[SQL_TRANSACTION_READ_UNCOMMITTED\]; + /// - return 3 (\b11) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED]; + /// - return 4 (\b100) => \[SQL_TRANSACTION_REPEATABLE_READ\]; + /// - return 5 (\b101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 6 (\b110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 7 (\b111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 8 (\b1000) => \[SQL_TRANSACTION_REPEATABLE_READ\]; + /// - return 9 (\b1001) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 10 (\b1010) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 11 (\b1011) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 12 (\b1100) => [SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 13 (\b1101) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 14 (\b1110) => [SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 15 (\b1111) => [SQL_TRANSACTION_NONE, SQL_TRANSACTION_READ_UNCOMMITTED, SQL_TRANSACTION_REPEATABLE_READ, SQL_TRANSACTION_REPEATABLE_READ]; + /// - return 16 (\b10000) => \[SQL_TRANSACTION_SERIALIZABLE\]; + /// - ... + /// Valid SQL positioned commands are described under `arrow.flight.protocol.sql.SqlTransactionIsolationLevel`. SqlSupportedTransactionsIsolationLevels = 564, /// - /// Retrieves a boolean value indicating whether a data definition statement within a transaction forces - /// the transaction to commit. + /// Retrieves a boolean value indicating whether a data definition statement within a transaction forces + /// the transaction to commit. /// - /// Returns: - /// - false: if a data definition statement within a transaction does not force the transaction to commit; - /// - true: if a data definition statement within a transaction forces the transaction to commit. + /// Returns: + /// - false: if a data definition statement within a transaction does not force the transaction to commit; + /// - true: if a data definition statement within a transaction forces the transaction to commit. SqlDataDefinitionCausesTransactionCommit = 565, /// - /// Retrieves a boolean value indicating whether a data definition statement within a transaction is ignored. + /// Retrieves a boolean value indicating whether a data definition statement within a transaction is ignored. /// - /// Returns: - /// - false: if a data definition statement within a transaction is taken into account; - /// - true: a data definition statement within a transaction is ignored. + /// Returns: + /// - false: if a data definition statement within a transaction is taken into account; + /// - true: a data definition statement within a transaction is ignored. SqlDataDefinitionsInTransactionsIgnored = 566, /// - /// Retrieves an int32 bitmask value representing the supported result set types. - /// The returned bitmask should be parsed in order to retrieve the supported result set types. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported result set types); - /// - return 1 (\b1) => \[SQL_RESULT_SET_TYPE_UNSPECIFIED\]; - /// - return 2 (\b10) => \[SQL_RESULT_SET_TYPE_FORWARD_ONLY\]; - /// - return 3 (\b11) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY]; - /// - return 4 (\b100) => \[SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE\]; - /// - return 5 (\b101) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 6 (\b110) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 7 (\b111) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; - /// - return 8 (\b1000) => \[SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE\]; - /// - ... - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetType`. + /// Retrieves an int32 bitmask value representing the supported result set types. + /// The returned bitmask should be parsed in order to retrieve the supported result set types. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported result set types); + /// - return 1 (\b1) => \[SQL_RESULT_SET_TYPE_UNSPECIFIED\]; + /// - return 2 (\b10) => \[SQL_RESULT_SET_TYPE_FORWARD_ONLY\]; + /// - return 3 (\b11) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY]; + /// - return 4 (\b100) => \[SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE\]; + /// - return 5 (\b101) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 6 (\b110) => [SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 7 (\b111) => [SQL_RESULT_SET_TYPE_UNSPECIFIED, SQL_RESULT_SET_TYPE_FORWARD_ONLY, SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE]; + /// - return 8 (\b1000) => \[SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE\]; + /// - ... + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetType`. SqlSupportedResultSetTypes = 567, /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_UNSPECIFIED`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] - /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_UNSPECIFIED`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] + /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. SqlSupportedConcurrenciesForResultSetUnspecified = 568, /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_FORWARD_ONLY`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] - /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_FORWARD_ONLY`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] + /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. SqlSupportedConcurrenciesForResultSetForwardOnly = 569, /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] - /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_SENSITIVE`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] + /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. SqlSupportedConcurrenciesForResultSetScrollSensitive = 570, /// - /// Returns an int32 bitmask value concurrency types supported for - /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE`. - /// - /// For instance: - /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) - /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] - /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] - /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] - /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] - /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] - /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. + /// Returns an int32 bitmask value concurrency types supported for + /// `arrow.flight.protocol.sql.SqlSupportedResultSetType.SQL_RESULT_SET_TYPE_SCROLL_INSENSITIVE`. + /// + /// For instance: + /// - return 0 (\b0) => [] (no supported concurrency types for this result set type) + /// - return 1 (\b1) => \[SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED\] + /// - return 2 (\b10) => \[SQL_RESULT_SET_CONCURRENCY_READ_ONLY\] + /// - return 3 (\b11) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY] + /// - return 4 (\b100) => \[SQL_RESULT_SET_CONCURRENCY_UPDATABLE\] + /// - return 5 (\b101) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 6 (\b110) => [SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// - return 7 (\b111) => [SQL_RESULT_SET_CONCURRENCY_UNSPECIFIED, SQL_RESULT_SET_CONCURRENCY_READ_ONLY, SQL_RESULT_SET_CONCURRENCY_UPDATABLE] + /// Valid result set types are described under `arrow.flight.protocol.sql.SqlSupportedResultSetConcurrency`. SqlSupportedConcurrenciesForResultSetScrollInsensitive = 571, /// - /// Retrieves a boolean value indicating whether this database supports batch updates. + /// Retrieves a boolean value indicating whether this database supports batch updates. /// - /// - false: if this database does not support batch updates; - /// - true: if this database supports batch updates. + /// - false: if this database does not support batch updates; + /// - true: if this database supports batch updates. SqlBatchUpdatesSupported = 572, /// - /// Retrieves a boolean value indicating whether this database supports savepoints. + /// Retrieves a boolean value indicating whether this database supports savepoints. /// - /// Returns: - /// - false: if this database does not support savepoints; - /// - true: if this database supports savepoints. + /// Returns: + /// - false: if this database does not support savepoints; + /// - true: if this database supports savepoints. SqlSavepointsSupported = 573, /// - /// Retrieves a boolean value indicating whether named parameters are supported in callable statements. + /// Retrieves a boolean value indicating whether named parameters are supported in callable statements. /// - /// Returns: - /// - false: if named parameters in callable statements are unsupported; - /// - true: if named parameters in callable statements are supported. + /// Returns: + /// - false: if named parameters in callable statements are unsupported; + /// - true: if named parameters in callable statements are supported. SqlNamedParametersSupported = 574, /// - /// Retrieves a boolean value indicating whether updates made to a LOB are made on a copy or directly to the LOB. + /// Retrieves a boolean value indicating whether updates made to a LOB are made on a copy or directly to the LOB. /// - /// Returns: - /// - false: if updates made to a LOB are made directly to the LOB; - /// - true: if updates made to a LOB are made on a copy. + /// Returns: + /// - false: if updates made to a LOB are made directly to the LOB; + /// - true: if updates made to a LOB are made on a copy. SqlLocatorsUpdateCopy = 575, /// - /// Retrieves a boolean value indicating whether invoking user-defined or vendor functions - /// using the stored procedure escape syntax is supported. + /// Retrieves a boolean value indicating whether invoking user-defined or vendor functions + /// using the stored procedure escape syntax is supported. /// - /// Returns: - /// - false: if invoking user-defined or vendor functions using the stored procedure escape syntax is unsupported; - /// - true: if invoking user-defined or vendor functions using the stored procedure escape syntax is supported. + /// Returns: + /// - false: if invoking user-defined or vendor functions using the stored procedure escape syntax is unsupported; + /// - true: if invoking user-defined or vendor functions using the stored procedure escape syntax is supported. SqlStoredFunctionsUsingCallSyntaxSupported = 576, } impl SqlInfo { diff --git a/arrow-integration/Cargo.toml b/arrow-integration/Cargo.toml new file mode 100644 index 00000000000..4be384b88a7 --- /dev/null +++ b/arrow-integration/Cargo.toml @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[package] +name = "arrow-integration" +description = "" +version = "21.0.0" +homepage = "https://github.com/apache/arrow-rs" +repository = "https://github.com/apache/arrow-rs" +authors = ["Apache Arrow "] +license = "Apache-2.0" +keywords = ["arrow"] +edition = "2021" +rust-version = "1.62" + +[dependencies] +arrow = { path = "../arrow", default-features = false, features = ["test_utils", "ipc", "ipc_compression"] } +serde = { version = "1.0", default-features = false, features = ["derive"] } +serde_json = { version = "1.0", default-features = false, features = ["std"] } +num = { version = "0.4", default-features = false, features = ["std"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +flate2 = { version = "1", default-features = false, features = ["rust_backend"] } + +[dev-dependencies] +rand = { version = "0.8", default-features = false, features = ["std", "std_rng"] } +tempfile = { version = "3", default-features = false } +chrono = { version = "0.4", default-features = false } + +[build-dependencies] + diff --git a/arrow-integration/README.md b/arrow-integration/README.md new file mode 100644 index 00000000000..79556a33591 --- /dev/null +++ b/arrow-integration/README.md @@ -0,0 +1,3 @@ +# Arrow Integration Testing + +Rust integration tests, see [integration-testing](../integration-testing) for archery integration diff --git a/arrow/Cargo.toml b/arrow/Cargo.toml index ed339bc304a..c2872baa524 100644 --- a/arrow/Cargo.toml +++ b/arrow/Cargo.toml @@ -38,10 +38,10 @@ path = "src/lib.rs" bench = false [target.'cfg(target_arch = "wasm32")'.dependencies] -ahash = { version = "0.8", default-features = false, features=["compile-time-rng"] } +ahash = { version = "0.8", default-features = false, features = ["compile-time-rng"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ahash = { version = "0.8", default-features = false, features=["runtime-rng"] } +ahash = { version = "0.8", default-features = false, features = ["runtime-rng"] } [dependencies] serde = { version = "1.0", default-features = false } @@ -61,7 +61,6 @@ packed_simd = { version = "0.3", default-features = false, optional = true, pack chrono = { version = "0.4", default-features = false, features = ["clock"] } chrono-tz = { version = "0.6", default-features = false, optional = true } flatbuffers = { version = "2.1.2", default-features = false, features = ["thiserror"], optional = true } -hex = { version = "0.4", default-features = false, features = ["std"] } comfy-table = { version = "6.0", optional = true, default-features = false } pyo3 = { version = "0.17", default-features = false, optional = true } lexical-core = { version = "^0.8", default-features = false, features = ["write-integers", "write-floats", "parse-integers", "parse-floats"] } @@ -102,7 +101,7 @@ tempfile = { version = "3", default-features = false } [[example]] name = "dynamic_types" required-features = ["prettyprint"] -path="./examples/dynamic_types.rs" +path = "./examples/dynamic_types.rs" [[bench]] name = "aggregate_kernels" diff --git a/arrow/src/ipc/reader.rs b/arrow/src/ipc/reader.rs index 7ffa9aa5946..969c8c43f02 100644 --- a/arrow/src/ipc/reader.rs +++ b/arrow/src/ipc/reader.rs @@ -1173,336 +1173,8 @@ mod tests { use std::fs::File; - use flate2::read::GzDecoder; - + use crate::datatypes; use crate::datatypes::{ArrowNativeType, Float64Type, Int32Type, Int8Type}; - use crate::{datatypes, util::integration_util::*}; - - #[test] - #[cfg(not(feature = "force_validate"))] - fn read_generated_files_014() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "0.14.1"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - "generated_nested", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - "generated_decimal", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - #[should_panic(expected = "Big Endian is not supported for Decimal!")] - fn read_decimal_be_file_should_panic() { - let testdata = crate::util::test_util::arrow_test_data(); - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/1.0.0-bigendian/generated_decimal.arrow_file", - testdata - )) - .unwrap(); - FileReader::try_new(file, None).unwrap(); - } - - #[test] - #[should_panic( - expected = "Last offset 687865856 of Utf8 is larger than values length 41" - )] - fn read_dictionary_be_not_implemented() { - // The offsets are not translated for big-endian files - // https://github.com/apache/arrow-rs/issues/859 - let testdata = crate::util::test_util::arrow_test_data(); - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/1.0.0-bigendian/generated_dictionary.arrow_file", - testdata - )) - .unwrap(); - FileReader::try_new(file, None).unwrap(); - } - - #[test] - fn read_generated_be_files_should_work() { - // complementary to the previous test - let testdata = crate::util::test_util::arrow_test_data(); - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_map", - "generated_nested", - "generated_null_trivial", - "generated_null", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/1.0.0-bigendian/{}.arrow_file", - testdata, path - )) - .unwrap(); - - FileReader::try_new(file, None).unwrap(); - }); - } - - #[test] - fn projection_should_work() { - // complementary to the previous test - let testdata = crate::util::test_util::arrow_test_data(); - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_map", - "generated_nested", - "generated_null_trivial", - "generated_null", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - ]; - paths.iter().for_each(|path| { - // We must use littleendian files here. - // The offsets are not translated for big-endian files - // https://github.com/apache/arrow-rs/issues/859 - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/1.0.0-littleendian/{}.arrow_file", - testdata, path - )) - .unwrap(); - - let reader = FileReader::try_new(file, Some(vec![0])).unwrap(); - let datatype_0 = reader.schema().fields()[0].data_type().clone(); - reader.for_each(|batch| { - let batch = batch.unwrap(); - assert_eq!(batch.columns().len(), 1); - assert_eq!(datatype_0, batch.schema().fields()[0].data_type().clone()); - }); - }); - } - - #[test] - #[cfg(not(feature = "force_validate"))] - fn read_generated_streams_014() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "0.14.1"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - "generated_nested", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - "generated_decimal", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - // the next batch must be empty - assert!(reader.next().is_none()); - // the stream must indicate that it's finished - assert!(reader.is_finished()); - }); - } - - #[test] - fn read_generated_files_100() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "1.0.0-littleendian"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - // "generated_map_non_canonical", - "generated_nested", - "generated_null_trivial", - "generated_null", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - fn read_generated_streams_100() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "1.0.0-littleendian"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - // "generated_map_non_canonical", - "generated_nested", - "generated_null_trivial", - "generated_null", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - // the next batch must be empty - assert!(reader.next().is_none()); - // the stream must indicate that it's finished - assert!(reader.is_finished()); - }); - } - - #[test] - #[cfg(feature = "ipc_compression")] - fn read_generated_streams_200() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - - // the test is repetitive, thus we can read all supported files at once - let paths = vec!["generated_lz4", "generated_zstd"]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - // the next batch must be empty - assert!(reader.next().is_none()); - // the stream must indicate that it's finished - assert!(reader.is_finished()); - }); - } - - #[test] - #[cfg(not(feature = "ipc_compression"))] - fn read_generated_streams_200_negative() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - - // the test is repetitive, thus we can read all supported files at once - let cases = vec![("generated_lz4", "LZ4_FRAME"), ("generated_zstd", "ZSTD")]; - cases.iter().for_each(|(path, compression_name)| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let mut reader = StreamReader::try_new(file, None).unwrap(); - let err = reader.next().unwrap().unwrap_err(); - let expected_error = format!( - "Invalid argument error: compression type {} not supported because arrow was not compiled with the ipc_compression feature", - compression_name - ); - assert_eq!(err.to_string(), expected_error); - }); - } - - #[test] - #[cfg(feature = "ipc_compression")] - fn read_generated_files_200() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec!["generated_lz4", "generated_zstd"]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - #[cfg(not(feature = "ipc_compression"))] - fn read_generated_files_200_negative() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - // the test is repetitive, thus we can read all supported files at once - let cases = vec![("generated_lz4", "LZ4_FRAME"), ("generated_zstd", "ZSTD")]; - cases.iter().for_each(|(path, compression_name)| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - let err = reader.next().unwrap().unwrap_err(); - let expected_error = format!( - "Invalid argument error: compression type {} not supported because arrow was not compiled with the ipc_compression feature", - compression_name - ); - assert_eq!(err.to_string(), expected_error); - }); - } fn create_test_projection_schema() -> Schema { // define field types @@ -1816,22 +1488,6 @@ mod tests { check_union_with_builder(UnionBuilder::new_sparse()); } - /// Read gzipped JSON file - fn read_gzip_json(version: &str, path: &str) -> ArrowJson { - let testdata = crate::util::test_util::arrow_test_data(); - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.json.gz", - testdata, version, path - )) - .unwrap(); - let mut gz = GzDecoder::new(&file); - let mut s = String::new(); - gz.read_to_string(&mut s).unwrap(); - // convert to Arrow JSON - let arrow_json: ArrowJson = serde_json::from_str(&s).unwrap(); - arrow_json - } - #[test] fn test_roundtrip_stream_nested_dict() { let xs = vec!["AA", "BB", "AA", "CC", "BB"]; diff --git a/arrow/src/ipc/writer.rs b/arrow/src/ipc/writer.rs index 826e49ca87d..63f1520a5e9 100644 --- a/arrow/src/ipc/writer.rs +++ b/arrow/src/ipc/writer.rs @@ -1270,31 +1270,28 @@ mod tests { use super::*; use std::fs::File; - use std::io::Read; + use std::io::Seek; use std::sync::Arc; - use flate2::read::GzDecoder; use ipc::MetadataVersion; use crate::array::*; use crate::datatypes::Field; use crate::ipc::reader::*; - use crate::util::integration_util::*; #[test] #[cfg(feature = "ipc_compression")] fn test_write_empty_record_batch_lz4_compression() { - let file_name = "arrow_lz4_empty"; let schema = Schema::new(vec![Field::new("field1", DataType::Int32, true)]); let values: Vec> = vec![]; let array = Int32Array::from(values); let record_batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(array)]) .unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + { - let file = - File::create(format!("target/debug/testdata/{}.arrow_file", file_name)) - .unwrap(); let write_option = IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) .unwrap() @@ -1302,15 +1299,14 @@ mod tests { .unwrap(); let mut writer = - FileWriter::try_new_with_options(file, &schema, write_option).unwrap(); + FileWriter::try_new_with_options(&mut file, &schema, write_option) + .unwrap(); writer.write(&record_batch).unwrap(); writer.finish().unwrap(); } + file.rewind().unwrap(); { // read file - let file = - File::open(format!("target/debug/testdata/{}.arrow_file", file_name)) - .unwrap(); let mut reader = FileReader::try_new(file, None).unwrap(); loop { match reader.next() { @@ -1345,9 +1341,9 @@ mod tests { let record_batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(array)]) .unwrap(); + + let mut file = tempfile::tempfile().unwrap(); { - let file = - File::create("target/debug/testdata/arrow_lz4.arrow_file").unwrap(); let write_option = IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) .unwrap() @@ -1355,15 +1351,14 @@ mod tests { .unwrap(); let mut writer = - FileWriter::try_new_with_options(file, &schema, write_option).unwrap(); + FileWriter::try_new_with_options(&mut file, &schema, write_option) + .unwrap(); writer.write(&record_batch).unwrap(); writer.finish().unwrap(); } + file.rewind().unwrap(); { // read file - let file = - File::open(format!("target/debug/testdata/{}.arrow_file", "arrow_lz4")) - .unwrap(); let mut reader = FileReader::try_new(file, None).unwrap(); loop { match reader.next() { @@ -1398,9 +1393,8 @@ mod tests { let record_batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(array)]) .unwrap(); + let mut file = tempfile::tempfile().unwrap(); { - let file = - File::create("target/debug/testdata/arrow_zstd.arrow_file").unwrap(); let write_option = IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) .unwrap() @@ -1408,15 +1402,14 @@ mod tests { .unwrap(); let mut writer = - FileWriter::try_new_with_options(file, &schema, write_option).unwrap(); + FileWriter::try_new_with_options(&mut file, &schema, write_option) + .unwrap(); writer.write(&record_batch).unwrap(); writer.finish().unwrap(); } + file.rewind().unwrap(); { // read file - let file = - File::open(format!("target/debug/testdata/{}.arrow_file", "arrow_zstd")) - .unwrap(); let mut reader = FileReader::try_new(file, None).unwrap(); loop { match reader.next() { @@ -1462,18 +1455,16 @@ mod tests { vec![Arc::new(array1) as ArrayRef], ) .unwrap(); + let mut file = tempfile::tempfile().unwrap(); { - let file = File::create("target/debug/testdata/arrow.arrow_file").unwrap(); - let mut writer = FileWriter::try_new(file, &schema).unwrap(); + let mut writer = FileWriter::try_new(&mut file, &schema).unwrap(); writer.write(&batch).unwrap(); writer.finish().unwrap(); } + file.rewind().unwrap(); { - let file = - File::open(format!("target/debug/testdata/{}.arrow_file", "arrow")) - .unwrap(); let mut reader = FileReader::try_new(file, None).unwrap(); while let Some(Ok(read_batch)) = reader.next() { read_batch @@ -1569,352 +1560,6 @@ mod tests { ); } - #[test] - #[cfg(not(feature = "force_validate"))] - fn read_and_rewrite_generated_files_014() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "0.14.1"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - "generated_nested", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - "generated_decimal", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read and rewrite the file to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - let mut writer = FileWriter::try_new(file, &reader.schema()).unwrap(); - while let Some(Ok(batch)) = reader.next() { - writer.write(&batch).unwrap(); - } - writer.finish().unwrap(); - } - - let file = File::open(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - #[cfg(not(feature = "force_validate"))] - fn read_and_rewrite_generated_streams_014() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "0.14.1"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_interval", - "generated_datetime", - "generated_dictionary", - "generated_map", - "generated_nested", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - "generated_decimal", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let reader = StreamReader::try_new(file, None).unwrap(); - - // read and rewrite the stream to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.stream", - version, path - )) - .unwrap(); - let mut writer = StreamWriter::try_new(file, &reader.schema()).unwrap(); - reader.for_each(|batch| { - writer.write(&batch.unwrap()).unwrap(); - }); - writer.finish().unwrap(); - } - - let file = - File::open(format!("target/debug/testdata/{}-{}.stream", version, path)) - .unwrap(); - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - fn read_and_rewrite_generated_files_100() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "1.0.0-littleendian"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_custom_metadata", - "generated_datetime", - "generated_dictionary_unsigned", - "generated_dictionary", - // "generated_duplicate_fieldnames", - "generated_interval", - "generated_map", - "generated_nested", - // "generated_nested_large_offsets", - "generated_null_trivial", - "generated_null", - "generated_primitive_large_offsets", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - // "generated_recursive_nested", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read and rewrite the file to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - // write IPC version 5 - let options = - IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5).unwrap(); - let mut writer = - FileWriter::try_new_with_options(file, &reader.schema(), options) - .unwrap(); - while let Some(Ok(batch)) = reader.next() { - writer.write(&batch).unwrap(); - } - writer.finish().unwrap(); - } - - let file = File::open(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - fn read_and_rewrite_generated_streams_100() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "1.0.0-littleendian"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec![ - "generated_custom_metadata", - "generated_datetime", - "generated_dictionary_unsigned", - "generated_dictionary", - // "generated_duplicate_fieldnames", - "generated_interval", - "generated_map", - "generated_nested", - // "generated_nested_large_offsets", - "generated_null_trivial", - "generated_null", - "generated_primitive_large_offsets", - "generated_primitive_no_batches", - "generated_primitive_zerolength", - "generated_primitive", - // "generated_recursive_nested", - ]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let reader = StreamReader::try_new(file, None).unwrap(); - - // read and rewrite the stream to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.stream", - version, path - )) - .unwrap(); - let options = - IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5).unwrap(); - let mut writer = - StreamWriter::try_new_with_options(file, &reader.schema(), options) - .unwrap(); - reader.for_each(|batch| { - writer.write(&batch.unwrap()).unwrap(); - }); - writer.finish().unwrap(); - } - - let file = - File::open(format!("target/debug/testdata/{}-{}.stream", version, path)) - .unwrap(); - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - #[cfg(feature = "ipc_compression")] - fn read_and_rewrite_compression_files_200() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec!["generated_lz4", "generated_zstd"]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", - testdata, version, path - )) - .unwrap(); - - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read and rewrite the file to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - // write IPC version 5 - let options = - IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) - .unwrap() - .try_with_compression(Some(ipc::CompressionType::LZ4_FRAME)) - .unwrap(); - - let mut writer = - FileWriter::try_new_with_options(file, &reader.schema(), options) - .unwrap(); - while let Some(Ok(batch)) = reader.next() { - writer.write(&batch).unwrap(); - } - writer.finish().unwrap(); - } - - let file = File::open(format!( - "target/debug/testdata/{}-{}.arrow_file", - version, path - )) - .unwrap(); - let mut reader = FileReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - #[test] - #[cfg(feature = "ipc_compression")] - fn read_and_rewrite_compression_stream_200() { - let testdata = crate::util::test_util::arrow_test_data(); - let version = "2.0.0-compression"; - // the test is repetitive, thus we can read all supported files at once - let paths = vec!["generated_lz4", "generated_zstd"]; - paths.iter().for_each(|path| { - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.stream", - testdata, version, path - )) - .unwrap(); - - let reader = StreamReader::try_new(file, None).unwrap(); - - // read and rewrite the stream to a temp location - { - let file = File::create(format!( - "target/debug/testdata/{}-{}.stream", - version, path - )) - .unwrap(); - let options = - IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) - .unwrap() - .try_with_compression(Some(ipc::CompressionType::ZSTD)) - .unwrap(); - - let mut writer = - StreamWriter::try_new_with_options(file, &reader.schema(), options) - .unwrap(); - reader.for_each(|batch| { - writer.write(&batch.unwrap()).unwrap(); - }); - writer.finish().unwrap(); - } - - let file = - File::open(format!("target/debug/testdata/{}-{}.stream", version, path)) - .unwrap(); - let mut reader = StreamReader::try_new(file, None).unwrap(); - - // read expected JSON output - let arrow_json = read_gzip_json(version, path); - assert!(arrow_json.equals_reader(&mut reader).unwrap()); - }); - } - - /// Read gzipped JSON file - fn read_gzip_json(version: &str, path: &str) -> ArrowJson { - let testdata = crate::util::test_util::arrow_test_data(); - let file = File::open(format!( - "{}/arrow-ipc-stream/integration/{}/{}.json.gz", - testdata, version, path - )) - .unwrap(); - let mut gz = GzDecoder::new(&file); - let mut s = String::new(); - gz.read_to_string(&mut s).unwrap(); - // convert to Arrow JSON - let arrow_json: ArrowJson = serde_json::from_str(&s).unwrap(); - arrow_json - } - #[test] fn track_union_nested_dict() { let inner: DictionaryArray = vec!["a", "b", "a"].into_iter().collect(); @@ -1982,7 +1627,6 @@ mod tests { #[test] fn read_union_017() { let testdata = crate::util::test_util::arrow_test_data(); - let version = "0.17.1"; let data_file = File::open(format!( "{}/arrow-ipc-stream/integration/0.17.1/generated_union.stream", testdata, @@ -1991,26 +1635,18 @@ mod tests { let reader = StreamReader::try_new(data_file, None).unwrap(); + let mut file = tempfile::tempfile().unwrap(); // read and rewrite the stream to a temp location { - let file = File::create(format!( - "target/debug/testdata/{}-generated_union.stream", - version - )) - .unwrap(); - let mut writer = StreamWriter::try_new(file, &reader.schema()).unwrap(); + let mut writer = StreamWriter::try_new(&mut file, &reader.schema()).unwrap(); reader.for_each(|batch| { writer.write(&batch.unwrap()).unwrap(); }); writer.finish().unwrap(); } + file.rewind().unwrap(); // Compare original file and rewrote file - let file = File::open(format!( - "target/debug/testdata/{}-generated_union.stream", - version - )) - .unwrap(); let rewrite_reader = StreamReader::try_new(file, None).unwrap(); let data_file = File::open(format!( @@ -2053,18 +1689,18 @@ mod tests { vec![Arc::new(union) as ArrayRef], ) .unwrap(); - let file_name = "target/debug/testdata/union.arrow_file"; + + let mut file = tempfile::tempfile().unwrap(); { - let file = File::create(&file_name).unwrap(); let mut writer = - FileWriter::try_new_with_options(file, &schema, options).unwrap(); + FileWriter::try_new_with_options(&mut file, &schema, options).unwrap(); writer.write(&batch).unwrap(); writer.finish().unwrap(); } + file.rewind().unwrap(); { - let file = File::open(&file_name).unwrap(); let reader = FileReader::try_new(file, None).unwrap(); reader.for_each(|maybe_batch| { maybe_batch diff --git a/arrow/src/util/mod.rs b/arrow/src/util/mod.rs index 1ee05d8a02c..6f68398e770 100644 --- a/arrow/src/util/mod.rs +++ b/arrow/src/util/mod.rs @@ -24,8 +24,6 @@ pub mod bit_util; #[cfg(feature = "test_utils")] pub mod data_gen; pub mod display; -#[cfg(any(test, feature = "test_utils"))] -pub mod integration_util; #[cfg(feature = "prettyprint")] pub mod pretty; pub(crate) mod serialization; diff --git a/integration-testing/Cargo.toml b/integration-testing/Cargo.toml index 786f77bd2f2..74a1ee6aa70 100644 --- a/integration-testing/Cargo.toml +++ b/integration-testing/Cargo.toml @@ -36,12 +36,15 @@ arrow-flight = { path = "../arrow-flight", default-features = false } async-trait = { version = "0.1.41", default-features = false } clap = { version = "3", default-features = false, features = ["std", "derive"] } futures = { version = "0.3", default-features = false } -hex = { version = "0.4", default-features = false } +hex = { version = "0.4", default-features = false, features = ["std"] } prost = { version = "0.11", default-features = false } -serde = { version = "1.0", default-features = false, features = ["rc"] } -serde_derive = { version = "1.0", default-features = false } +serde = { version = "1.0", default-features = false, features = ["rc", "derive"] } serde_json = { version = "1.0", default-features = false, features = ["std"] } tokio = { version = "1.0", default-features = false } tonic = { version = "0.8", default-features = false } tracing-subscriber = { version = "0.3.1", default-features = false, features = ["fmt"], optional = true } num = { version = "0.4", default-features = false, features = ["std"] } +flate2 = { version = "1", default-features = false, features = ["rust_backend"] } + +[dev-dependencies] +tempfile = { version = "3", default-features = false } diff --git a/arrow/test/data/integration.json b/integration-testing/data/integration.json similarity index 100% rename from arrow/test/data/integration.json rename to integration-testing/data/integration.json diff --git a/integration-testing/src/bin/arrow-json-integration-test.rs b/integration-testing/src/bin/arrow-json-integration-test.rs index b442e8b5ed3..a7d7cf6ee7c 100644 --- a/integration-testing/src/bin/arrow-json-integration-test.rs +++ b/integration-testing/src/bin/arrow-json-integration-test.rs @@ -20,8 +20,7 @@ use arrow::datatypes::{DataType, Field}; use arrow::error::{ArrowError, Result}; use arrow::ipc::reader::FileReader; use arrow::ipc::writer::FileWriter; -use arrow::util::integration_util::*; -use arrow_integration_testing::read_json_file; +use arrow_integration_testing::{read_json_file, util::*}; use clap::Parser; use std::fs::File; diff --git a/integration-testing/src/lib.rs b/integration-testing/src/lib.rs index 5d3da15d3f5..ffe112af72c 100644 --- a/integration-testing/src/lib.rs +++ b/integration-testing/src/lib.rs @@ -19,12 +19,12 @@ use serde_json::Value; -use arrow::util::integration_util::ArrowJsonBatch; +use util::*; use arrow::datatypes::Schema; use arrow::error::Result; use arrow::record_batch::RecordBatch; -use arrow::util::integration_util::*; +use arrow::util::test_util::arrow_test_data; use std::collections::HashMap; use std::fs::File; use std::io::BufReader; @@ -36,6 +36,7 @@ pub const AUTH_PASSWORD: &str = "flight"; pub mod flight_client_scenarios; pub mod flight_server_scenarios; +pub mod util; pub struct ArrowFile { pub schema: Schema, @@ -76,3 +77,22 @@ pub fn read_json_file(json_name: &str) -> Result { batches, }) } + +/// Read gzipped JSON test file +pub fn read_gzip_json(version: &str, path: &str) -> ArrowJson { + use flate2::read::GzDecoder; + use std::io::Read; + + let testdata = arrow_test_data(); + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.json.gz", + testdata, version, path + )) + .unwrap(); + let mut gz = GzDecoder::new(&file); + let mut s = String::new(); + gz.read_to_string(&mut s).unwrap(); + // convert to Arrow JSON + let arrow_json: ArrowJson = serde_json::from_str(&s).unwrap(); + arrow_json +} diff --git a/arrow/src/util/integration_util.rs b/integration-testing/src/util.rs similarity index 99% rename from arrow/src/util/integration_util.rs rename to integration-testing/src/util.rs index 36751ed6aae..531bdc57175 100644 --- a/arrow/src/util/integration_util.rs +++ b/integration-testing/src/util.rs @@ -22,19 +22,19 @@ use hex::decode; use num::BigInt; use num::Signed; -use serde_derive::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_json::{Map as SJMap, Value}; use std::collections::HashMap; use std::sync::Arc; -use crate::array::*; -use crate::buffer::{Buffer, MutableBuffer}; -use crate::compute; -use crate::datatypes::*; -use crate::error::{ArrowError, Result}; -use crate::record_batch::{RecordBatch, RecordBatchReader}; -use crate::util::bit_util; -use crate::util::decimal::Decimal256; +use arrow::array::*; +use arrow::buffer::{Buffer, MutableBuffer}; +use arrow::compute; +use arrow::datatypes::*; +use arrow::error::{ArrowError, Result}; +use arrow::record_batch::{RecordBatch, RecordBatchReader}; +use arrow::util::bit_util; +use arrow::util::decimal::Decimal256; /// A struct that represents an Arrow file with a schema and record batches #[derive(Deserialize, Serialize, Debug)] @@ -1047,7 +1047,7 @@ mod tests { use std::io::Read; use std::sync::Arc; - use crate::buffer::Buffer; + use arrow::buffer::Buffer; #[test] fn test_schema_equality() { @@ -1112,7 +1112,6 @@ mod tests { } #[test] - #[cfg_attr(miri, ignore)] // running forever fn test_arrow_data_equality() { let secs_tz = Some("Europe/Budapest".to_string()); let millis_tz = Some("America/New_York".to_string()); @@ -1333,7 +1332,7 @@ mod tests { ], ) .unwrap(); - let mut file = File::open("test/data/integration.json").unwrap(); + let mut file = File::open("data/integration.json").unwrap(); let mut json = String::new(); file.read_to_string(&mut json).unwrap(); let arrow_json: ArrowJson = serde_json::from_str(&json).unwrap(); diff --git a/integration-testing/tests/ipc_reader.rs b/integration-testing/tests/ipc_reader.rs new file mode 100644 index 00000000000..fc97a193c76 --- /dev/null +++ b/integration-testing/tests/ipc_reader.rs @@ -0,0 +1,276 @@ +use arrow::ipc::reader::{FileReader, StreamReader}; +use arrow::util::test_util::arrow_test_data; +use arrow_integration_testing::read_gzip_json; +use std::fs::File; + +#[test] +fn read_generated_files_014() { + let testdata = arrow_test_data(); + let version = "0.14.1"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + "generated_nested", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + "generated_decimal", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +#[should_panic(expected = "Big Endian is not supported for Decimal!")] +fn read_decimal_be_file_should_panic() { + let testdata = arrow_test_data(); + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/1.0.0-bigendian/generated_decimal.arrow_file", + testdata + )) + .unwrap(); + FileReader::try_new(file, None).unwrap(); +} + +#[test] +#[should_panic( + expected = "Last offset 687865856 of Utf8 is larger than values length 41" +)] +fn read_dictionary_be_not_implemented() { + // The offsets are not translated for big-endian files + // https://github.com/apache/arrow-rs/issues/859 + let testdata = arrow_test_data(); + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/1.0.0-bigendian/generated_dictionary.arrow_file", + testdata + )) + .unwrap(); + FileReader::try_new(file, None).unwrap(); +} + +#[test] +fn read_generated_be_files_should_work() { + // complementary to the previous test + let testdata = arrow_test_data(); + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_map", + "generated_nested", + "generated_null_trivial", + "generated_null", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/1.0.0-bigendian/{}.arrow_file", + testdata, path + )) + .unwrap(); + + FileReader::try_new(file, None).unwrap(); + }); +} + +#[test] +fn projection_should_work() { + // complementary to the previous test + let testdata = arrow_test_data(); + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_map", + "generated_nested", + "generated_null_trivial", + "generated_null", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + ]; + paths.iter().for_each(|path| { + // We must use littleendian files here. + // The offsets are not translated for big-endian files + // https://github.com/apache/arrow-rs/issues/859 + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/1.0.0-littleendian/{}.arrow_file", + testdata, path + )) + .unwrap(); + + let reader = FileReader::try_new(file, Some(vec![0])).unwrap(); + let datatype_0 = reader.schema().fields()[0].data_type().clone(); + reader.for_each(|batch| { + let batch = batch.unwrap(); + assert_eq!(batch.columns().len(), 1); + assert_eq!(datatype_0, batch.schema().fields()[0].data_type().clone()); + }); + }); +} + +#[test] +fn read_generated_streams_014() { + let testdata = arrow_test_data(); + let version = "0.14.1"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + "generated_nested", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + "generated_decimal", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + // the next batch must be empty + assert!(reader.next().is_none()); + // the stream must indicate that it's finished + assert!(reader.is_finished()); + }); +} + +#[test] +fn read_generated_files_100() { + let testdata = arrow_test_data(); + let version = "1.0.0-littleendian"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + // "generated_map_non_canonical", + "generated_nested", + "generated_null_trivial", + "generated_null", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_generated_streams_100() { + let testdata = arrow_test_data(); + let version = "1.0.0-littleendian"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + // "generated_map_non_canonical", + "generated_nested", + "generated_null_trivial", + "generated_null", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + // the next batch must be empty + assert!(reader.next().is_none()); + // the stream must indicate that it's finished + assert!(reader.is_finished()); + }); +} + +#[test] +fn read_generated_streams_200() { + let testdata = arrow_test_data(); + let version = "2.0.0-compression"; + + // the test is repetitive, thus we can read all supported files at once + let paths = vec!["generated_lz4", "generated_zstd"]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + // the next batch must be empty + assert!(reader.next().is_none()); + // the stream must indicate that it's finished + assert!(reader.is_finished()); + }); +} + +#[test] +fn read_generated_files_200() { + let testdata = arrow_test_data(); + let version = "2.0.0-compression"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec!["generated_lz4", "generated_zstd"]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} diff --git a/integration-testing/tests/ipc_writer.rs b/integration-testing/tests/ipc_writer.rs new file mode 100644 index 00000000000..e159694ecc1 --- /dev/null +++ b/integration-testing/tests/ipc_writer.rs @@ -0,0 +1,297 @@ +use arrow::ipc; +use arrow::ipc::reader::{FileReader, StreamReader}; +use arrow::ipc::writer::{FileWriter, IpcWriteOptions, StreamWriter}; +use arrow::util::test_util::arrow_test_data; +use arrow_integration_testing::read_gzip_json; +use std::fs::File; +use std::io::Seek; + +#[test] +fn read_and_rewrite_generated_files_014() { + let testdata = arrow_test_data(); + let version = "0.14.1"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + "generated_nested", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + "generated_decimal", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the file to a temp location + { + let mut writer = FileWriter::try_new(&mut file, &reader.schema()).unwrap(); + while let Some(Ok(batch)) = reader.next() { + writer.write(&batch).unwrap(); + } + writer.finish().unwrap(); + } + file.rewind().unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_and_rewrite_generated_streams_014() { + let testdata = arrow_test_data(); + let version = "0.14.1"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_interval", + "generated_datetime", + "generated_dictionary", + "generated_map", + "generated_nested", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + "generated_decimal", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let reader = StreamReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the stream to a temp location + { + let mut writer = StreamWriter::try_new(&mut file, &reader.schema()).unwrap(); + reader.for_each(|batch| { + writer.write(&batch.unwrap()).unwrap(); + }); + writer.finish().unwrap(); + } + + file.rewind().unwrap(); + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_and_rewrite_generated_files_100() { + let testdata = arrow_test_data(); + let version = "1.0.0-littleendian"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_custom_metadata", + "generated_datetime", + "generated_dictionary_unsigned", + "generated_dictionary", + // "generated_duplicate_fieldnames", + "generated_interval", + "generated_map", + "generated_nested", + // "generated_nested_large_offsets", + "generated_null_trivial", + "generated_null", + "generated_primitive_large_offsets", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + // "generated_recursive_nested", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the file to a temp location + { + // write IPC version 5 + let options = + IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5).unwrap(); + let mut writer = + FileWriter::try_new_with_options(&mut file, &reader.schema(), options) + .unwrap(); + while let Some(Ok(batch)) = reader.next() { + writer.write(&batch).unwrap(); + } + writer.finish().unwrap(); + } + + file.rewind().unwrap(); + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_and_rewrite_generated_streams_100() { + let testdata = arrow_test_data(); + let version = "1.0.0-littleendian"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec![ + "generated_custom_metadata", + "generated_datetime", + "generated_dictionary_unsigned", + "generated_dictionary", + // "generated_duplicate_fieldnames", + "generated_interval", + "generated_map", + "generated_nested", + // "generated_nested_large_offsets", + "generated_null_trivial", + "generated_null", + "generated_primitive_large_offsets", + "generated_primitive_no_batches", + "generated_primitive_zerolength", + "generated_primitive", + // "generated_recursive_nested", + ]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let reader = StreamReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the stream to a temp location + { + let options = + IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5).unwrap(); + let mut writer = + StreamWriter::try_new_with_options(&mut file, &reader.schema(), options) + .unwrap(); + reader.for_each(|batch| { + writer.write(&batch.unwrap()).unwrap(); + }); + writer.finish().unwrap(); + } + + file.rewind().unwrap(); + + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_and_rewrite_compression_files_200() { + let testdata = arrow_test_data(); + let version = "2.0.0-compression"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec!["generated_lz4", "generated_zstd"]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.arrow_file", + testdata, version, path + )) + .unwrap(); + + let mut reader = FileReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the file to a temp location + { + // write IPC version 5 + let options = IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) + .unwrap() + .try_with_compression(Some(ipc::CompressionType::LZ4_FRAME)) + .unwrap(); + + let mut writer = + FileWriter::try_new_with_options(&mut file, &reader.schema(), options) + .unwrap(); + while let Some(Ok(batch)) = reader.next() { + writer.write(&batch).unwrap(); + } + writer.finish().unwrap(); + } + + file.rewind().unwrap(); + let mut reader = FileReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +} + +#[test] +fn read_and_rewrite_compression_stream_200() { + let testdata = arrow_test_data(); + let version = "2.0.0-compression"; + // the test is repetitive, thus we can read all supported files at once + let paths = vec!["generated_lz4", "generated_zstd"]; + paths.iter().for_each(|path| { + let file = File::open(format!( + "{}/arrow-ipc-stream/integration/{}/{}.stream", + testdata, version, path + )) + .unwrap(); + + let reader = StreamReader::try_new(file, None).unwrap(); + + let mut file = tempfile::tempfile().unwrap(); + + // read and rewrite the stream to a temp location + { + let options = IpcWriteOptions::try_new(8, false, ipc::MetadataVersion::V5) + .unwrap() + .try_with_compression(Some(ipc::CompressionType::ZSTD)) + .unwrap(); + + let mut writer = + StreamWriter::try_new_with_options(&mut file, &reader.schema(), options) + .unwrap(); + reader.for_each(|batch| { + writer.write(&batch.unwrap()).unwrap(); + }); + writer.finish().unwrap(); + } + + file.rewind().unwrap(); + + let mut reader = StreamReader::try_new(file, None).unwrap(); + + // read expected JSON output + let arrow_json = read_gzip_json(version, path); + assert!(arrow_json.equals_reader(&mut reader).unwrap()); + }); +}