From 46e13636048d958c2ec036d07bdfba24eaf82aea Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 4 Oct 2021 12:02:08 -0700 Subject: [PATCH 01/13] feat: add jobs.delete --- src/index.ts | 1 + src/job.ts | 50 ++++++ src/types.d.ts | 340 +++++++++++++++++++++++++++++++++------- system-test/bigquery.ts | 19 +++ 4 files changed, 355 insertions(+), 55 deletions(-) diff --git a/src/index.ts b/src/index.ts index f852cffa..04b347e0 100644 --- a/src/index.ts +++ b/src/index.ts @@ -82,6 +82,7 @@ export { export { CancelCallback, CancelResponse, + DeleteCallback, Job, JobMetadata, JobOptions, diff --git a/src/job.ts b/src/job.ts index dbbb933e..27b8bb92 100644 --- a/src/job.ts +++ b/src/job.ts @@ -46,6 +46,8 @@ export type JobOptions = JobRequest; export type CancelCallback = RequestCallback; export type CancelResponse = [bigquery.IJobCancelResponse]; +export type DeleteCallback = RequestCallback; + export type QueryResultsOptions = { job?: Job; wrapIntegers?: boolean | IntegerTypeCastOptions; @@ -370,6 +372,54 @@ class Job extends Operation { ); } + delete(): Promise; + delete(callback: DeleteCallback): void; + /** + * @callback DeleteJobCallback + * @param {?Error} err Request error, if any. + * @param {object} apiResponse The full API response. + */ + /** + * Delete the job. + * + * @see [Jobs: delete API Documentation]{@link https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/delete} + * + * @method Job#delete + * @param {DeleteJobCallback} [callback] The callback function. + * @param {?error} callback.err An error returned while making this + * request. + * @param {object} callback.apiResponse The full API response. + * @returns {Promise} + * + * @example + * const {BigQuery} = require('@google-cloud/bigquery'); + * const bigquery = new BigQuery(); + * + * const job = bigquery.job(jobId); + * + * job.delete((err, apiResponse) => {}); + * + * @example If the callback is omitted we'll return a Promise. + * const [apiResponse] = await job.delete(); + * @example If successful, the response body is empty. + */ + delete(callback?: DeleteCallback): void | Promise { + let qs; + + if (this.location) { + qs = {location: this.location}; + } + + this.request( + { + method: 'DELETE', + uri: '/delete', + qs, + }, + callback! + ); + } + getQueryResults(options?: QueryResultsOptions): Promise; getQueryResults( options: QueryResultsOptions, diff --git a/src/types.d.ts b/src/types.d.ts index a59da1db..f071122e 100644 --- a/src/types.d.ts +++ b/src/types.d.ts @@ -150,6 +150,18 @@ declare namespace bigquery { * Whether Arima model fitted with drift or not. It is always false when d is not 1. */ hasDrift?: boolean; + /** + * If true, holiday_effect is a part of time series decomposition result. + */ + hasHolidayEffect?: boolean; + /** + * If true, spikes_and_dips is a part of time series decomposition result. + */ + hasSpikesAndDips?: boolean; + /** + * If true, step_changes is a part of time series decomposition result. + */ + hasStepChanges?: boolean; /** * Non-seasonal order. */ @@ -167,9 +179,13 @@ declare namespace bigquery { | 'YEARLY' >; /** - * The id to indicate different time series. + * The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used. */ timeSeriesId?: string; + /** + * The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns. + */ + timeSeriesIds?: Array; }; /** @@ -224,6 +240,18 @@ declare namespace bigquery { * Is arima model fitted with drift or not. It is always false when d is not 1. */ hasDrift?: boolean; + /** + * If true, holiday_effect is a part of time series decomposition result. + */ + hasHolidayEffect?: boolean; + /** + * If true, spikes_and_dips is a part of time series decomposition result. + */ + hasSpikesAndDips?: boolean; + /** + * If true, step_changes is a part of time series decomposition result. + */ + hasStepChanges?: boolean; /** * Non-seasonal order. */ @@ -241,9 +269,13 @@ declare namespace bigquery { | 'YEARLY' >; /** - * The id to indicate different time series. + * The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used. */ timeSeriesId?: string; + /** + * The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns. + */ + timeSeriesIds?: Array; }; /** @@ -278,6 +310,35 @@ declare namespace bigquery { | 'DATA_READ'; }; + type IAvroOptions = { + /** + * [Optional] If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). + */ + useAvroLogicalTypes?: boolean; + }; + + type IBiEngineReason = { + /** + * [Output-only] High-level BI Engine reason for partial or disabled acceleration. + */ + code?: string; + /** + * [Output-only] Free form human-readable reason for partial or disabled acceleration. + */ + message?: string; + }; + + type IBiEngineStatistics = { + /** + * [Output-only] Specifies which mode of BI Engine acceleration was performed (if any). + */ + biEngineMode?: string; + /** + * In case of DISABLED or PARTIAL bi_engine_mode, these contain the explanatory reasons as to why BI Engine could not accelerate. In case the full query was accelerated, this field is not populated. + */ + biEngineReasons?: Array; + }; + type IBigQueryModelTraining = { /** * [Output-only, Beta] Index of current ML training iteration. Updated during create model query job to show job progress. @@ -557,7 +618,7 @@ declare namespace bigquery { */ type IClusteringMetrics = { /** - * [Beta] Information for all clusters. + * Information for all clusters. */ clusters?: Array; /** @@ -612,6 +673,10 @@ declare namespace bigquery { * [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). */ fieldDelimiter?: string; + /** + * [Optional] An custom string that will represent a NULL value in CSV import data. + */ + null_marker?: string; /** * [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. */ @@ -641,6 +706,10 @@ declare namespace bigquery { * [Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER; */ access?: Array<{ + /** + * [Pick one] A grant authorizing all resources of a particular type in a particular dataset access to this dataset. Only views are supported for now. The role field is not required when this field is set. If that dataset is deleted and re-created, its access needs to be granted again via an update operation. + */ + dataset?: IDatasetAccessEntry; /** * [Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: "example.com". Maps to IAM policy member "domain:DOMAIN". */ @@ -682,6 +751,10 @@ declare namespace bigquery { * [Required] A reference that identifies the dataset. */ datasetReference?: IDatasetReference; + /** + * [Output-only] The default collation of the dataset. + */ + defaultCollation?: string; defaultEncryptionConfiguration?: IEncryptionConfiguration; /** * [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. @@ -707,6 +780,10 @@ declare namespace bigquery { * [Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field. */ id?: string; + /** + * [Optional] Indicates if table names are case insensitive in the dataset. + */ + isCaseInsensitive?: boolean; /** * [Output-only] The resource type. */ @@ -733,6 +810,19 @@ declare namespace bigquery { selfLink?: string; }; + type IDatasetAccessEntry = { + /** + * [Required] The dataset this entry applies to. + */ + dataset?: IDatasetReference; + target_types?: Array<{ + /** + * [Required] Which resources in the dataset this entry applies to. Currently, only views are supported, but additional target types may be added in the future. Possible values: VIEWS: This entry applies to all views in the dataset. + */ + targetType?: string; + }>; + }; + type IDatasetList = { /** * An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project. @@ -803,6 +893,21 @@ declare namespace bigquery { labels?: {[key: string]: string}; }; + type IDmlStatistics = { + /** + * Number of deleted Rows. populated by DML DELETE, MERGE and TRUNCATE statements. + */ + deletedRowCount?: string; + /** + * Number of inserted Rows. Populated by DML INSERT and MERGE statements. + */ + insertedRowCount?: string; + /** + * Number of updated Rows. Populated by DML UPDATE and MERGE statements. + */ + updatedRowCount?: string; + }; + type IEncryptionConfiguration = { /** * [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. @@ -1007,20 +1112,6 @@ declare namespace bigquery { substeps?: Array; }; - /** - * Explanation for a single feature. - */ - type IExplanation = { - /** - * Attribution of feature. - */ - attribution?: number; - /** - * Full name of the feature. For non-numerical features, will be formatted like .. Overall size of feature name will always be truncated to first 120 characters. - */ - featureName?: string; - }; - /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. */ @@ -1048,6 +1139,10 @@ declare namespace bigquery { * Try to detect schema and format options automatically. Any option specified explicitly will be honored. */ autodetect?: boolean; + /** + * Additional properties to set if sourceFormat is set to Avro. + */ + avroOptions?: IAvroOptions; /** * [Optional] Additional options if sourceFormat is set to BIGTABLE. */ @@ -1064,12 +1159,16 @@ declare namespace bigquery { * Additional properties to set if sourceFormat is set to CSV. */ csvOptions?: ICsvOptions; + /** + * [Optional] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: (38,9) -> NUMERIC; (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); (76,38) -> BIGNUMERIC; (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. + */ + decimalTargetTypes?: Array; /** * [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. */ googleSheetsOptions?: IGoogleSheetsOptions; /** - * [Optional, Trusted Tester] Options to configure hive partitioning support. + * [Optional] Options to configure hive partitioning support. */ hivePartitioningOptions?: IHivePartitioningOptions; /** @@ -1080,6 +1179,10 @@ declare namespace bigquery { * [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. */ maxBadRecords?: number; + /** + * Additional properties to set if sourceFormat is set to Parquet. + */ + parquetOptions?: IParquetOptions; /** * [Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats. */ @@ -1194,20 +1297,6 @@ declare namespace bigquery { kind?: string; }; - /** - * Global explanations containing the top most important features after training. - */ - type IGlobalExplanation = { - /** - * Class label for this set of global explanations. Will be empty/null for binary logistic and linear regression models. Sorted alphabetically in descending order. - */ - classLabel?: string; - /** - * A list of the top global explanations. Sorted by absolute value of attribution in descending order. - */ - explanations?: Array; - }; - type IGoogleSheetsOptions = { /** * [Optional] Range of a sheet to query from. Only used when non-empty. Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id For example: sheet1!A1:B20 @@ -1411,7 +1500,7 @@ declare namespace bigquery { */ createDisposition?: string; /** - * [Trusted Tester] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. For example: suppose decimal_target_type = ["NUMERIC", "BIGNUMERIC"]. Then if (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). For duplicated types in this field, only one will be considered and the rest will be ignored. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. + * [Optional] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: (38,9) -> NUMERIC; (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); (76,38) -> BIGNUMERIC; (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. */ decimalTargetTypes?: Array; /** @@ -1435,13 +1524,17 @@ declare namespace bigquery { */ fieldDelimiter?: string; /** - * [Optional, Trusted Tester] Options to configure hive partitioning support. + * [Optional] Options to configure hive partitioning support. */ hivePartitioningOptions?: IHivePartitioningOptions; /** * [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names */ ignoreUnknownValues?: boolean; + /** + * [Optional] If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON. + */ + jsonExtension?: string; /** * [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV and JSON. The default value is 0, which requires that all records are valid. */ @@ -1450,6 +1543,10 @@ declare namespace bigquery { * [Optional] Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */ nullMarker?: string; + /** + * [Optional] Options to configure parquet support. + */ + parquetOptions?: IParquetOptions; /** * If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result. */ @@ -1495,7 +1592,7 @@ declare namespace bigquery { */ timePartitioning?: ITimePartitioning; /** - * [Optional] If sourceFormat is set to "AVRO", indicates whether to enable interpreting logical types into their corresponding types (ie. TIMESTAMP), instead of only using their raw types (ie. INTEGER). + * [Optional] If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). */ useAvroLogicalTypes?: boolean; /** @@ -1521,6 +1618,10 @@ declare namespace bigquery { * [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. */ createDisposition?: string; + /** + * If true, creates a new session, where session id will be a server generated random id. If false, runs query with an existing session_id passed in ConnectionProperty, otherwise runs query in non-session mode. + */ + createSession?: boolean; /** * [Optional] Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. */ @@ -1767,6 +1868,10 @@ declare namespace bigquery { * [Output-only] Statistics for a child job of a script. */ scriptStatistics?: IScriptStatistics; + /** + * [Output-only] [Preview] Information of the session if this job is part of one. + */ + sessionInfo?: ISessionInfo; /** * [Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE. */ @@ -1782,10 +1887,14 @@ declare namespace bigquery { /** * [Output-only] [Alpha] Information of the multi-statement transaction if this job is part of one. */ - transactionInfoTemplate?: ITransactionInfo; + transactionInfo?: ITransactionInfo; }; type IJobStatistics2 = { + /** + * BI Engine specific Statistics. [Output-only] BI Engine specific Statistics. + */ + biEngineStatistics?: IBiEngineStatistics; /** * [Output-only] Billing tier for the job. */ @@ -1798,10 +1907,18 @@ declare namespace bigquery { * [Output-only] [Preview] The number of row access policies affected by a DDL statement. Present only for DROP ALL ROW ACCESS POLICIES queries. */ ddlAffectedRowAccessPolicyCount?: string; + /** + * [Output-only] The DDL destination table. Present only for ALTER TABLE RENAME TO queries. Note that ddl_target_table is used just for its type information. + */ + ddlDestinationTable?: ITableReference; /** * The DDL operation performed, possibly dependent on the pre-existence of the DDL target. Possible values (new values might be added in the future): "CREATE": The query created the DDL target. "SKIP": No-op. Example cases: the query is CREATE TABLE IF NOT EXISTS while the table already exists, or the query is DROP TABLE IF EXISTS while the table does not exist. "REPLACE": The query replaced the DDL target. Example case: the query is CREATE OR REPLACE TABLE, and the table already exists. "DROP": The query deleted the DDL target. */ ddlOperationPerformed?: string; + /** + * [Output-only] The DDL target dataset. Present only for CREATE/ALTER/DROP SCHEMA queries. + */ + ddlTargetDataset?: IDatasetReference; /** * The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. */ @@ -1814,6 +1931,10 @@ declare namespace bigquery { * [Output-only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. */ ddlTargetTable?: ITableReference; + /** + * [Output-only] Detailed statistics for DML statements Present only for DML statements INSERT, UPDATE, DELETE or TRUNCATE. + */ + dmlStats?: IDmlStatistics; /** * [Output-only] The original estimate of bytes processed for the job. */ @@ -2019,6 +2140,10 @@ declare namespace bigquery { }; type IModel = { + /** + * The best trial_id across all training runs. + */ + bestTrialId?: string; /** * Output only. The time when this model was created, in millisecs since the epoch. */ @@ -2083,7 +2208,8 @@ declare namespace bigquery { | 'BOOSTED_TREE_CLASSIFIER' | 'ARIMA' | 'AUTOML_REGRESSOR' - | 'AUTOML_CLASSIFIER'; + | 'AUTOML_CLASSIFIER' + | 'ARIMA_PLUS'; /** * Output only. Information for all training runs in increasing order of start_time. */ @@ -2134,8 +2260,19 @@ declare namespace bigquery { confusionMatrixList?: Array; }; + type IParquetOptions = { + /** + * [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type. + */ + enableListInference?: boolean; + /** + * [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. + */ + enumAsString?: boolean; + }; + /** - * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). + * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). */ type IPolicy = { /** @@ -2270,6 +2407,10 @@ declare namespace bigquery { * Connection properties. */ connectionProperties?: Array; + /** + * If true, creates a new session, where session id will be a server generated random id. If false, runs query with an existing session_id passed in ConnectionProperty, otherwise runs query in non-session mode. + */ + createSession?: boolean; /** * [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. */ @@ -2337,6 +2478,10 @@ declare namespace bigquery { * Whether the query result was fetched from the query cache. */ cacheHit?: boolean; + /** + * [Output-only] Detailed statistics for DML statements Present only for DML statements INSERT, UPDATE, DELETE or TRUNCATE. + */ + dmlStats?: IDmlStatistics; /** * [Output-only] The first errors or warnings encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has completed or was unsuccessful. */ @@ -2369,6 +2514,10 @@ declare namespace bigquery { * The schema of the results. Present only when the query completes successfully. */ schema?: ITableSchema; + /** + * [Output-only] [Preview] Information of the session if this job is part of one. + */ + sessionInfo?: ISessionInfo; /** * The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run. */ @@ -2469,7 +2618,7 @@ declare namespace bigquery { */ medianAbsoluteError?: number; /** - * R^2 score. + * R^2 score. This corresponds to r2_score in ML.EVALUATE. */ rSquared?: number; }; @@ -2491,11 +2640,11 @@ declare namespace bigquery { */ definitionBody?: string; /** - * Optional. [Experimental] The description of the routine if defined. + * Optional. The description of the routine, if defined. */ description?: string; /** - * Optional. [Experimental] The determinism level of the JavaScript UDF if defined. + * Optional. The determinism level of the JavaScript UDF, if defined. */ determinismLevel?: | 'DETERMINISM_LEVEL_UNSPECIFIED' @@ -2518,7 +2667,11 @@ declare namespace bigquery { */ lastModifiedTime?: string; /** - * Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definition_body at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. For example, for the functions created with the following statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and is absent for `Increment` (inferred as FLOAT64 at query time). Suppose the function `Add` is replaced by `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then the inferred return type of `Increment` is automatically changed to INT64 at query time, while the return type of `Decrement` remains FLOAT64. + * Optional. Can be set only if routine_type = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definition_body at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time. + */ + returnTableType?: IStandardSqlTableType; + /** + * Optional if language = "SQL"; required otherwise. Cannot be set if routine_type = "TABLE_VALUED_FUNCTION". If absent, the return type is inferred from definition_body at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. For example, for the functions created with the following statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and is absent for `Increment` (inferred as FLOAT64 at query time). Suppose the function `Add` is replaced by `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then the inferred return type of `Increment` is automatically changed to INT64 at query time, while the return type of `Decrement` remains FLOAT64. */ returnType?: IStandardSqlDataType; /** @@ -2528,7 +2681,15 @@ declare namespace bigquery { /** * Required. The type of routine. */ - routineType?: 'ROUTINE_TYPE_UNSPECIFIED' | 'SCALAR_FUNCTION' | 'PROCEDURE'; + routineType?: + | 'ROUTINE_TYPE_UNSPECIFIED' + | 'SCALAR_FUNCTION' + | 'PROCEDURE' + | 'TABLE_VALUED_FUNCTION'; + /** + * Optional. Can be set for procedures only. If true (default), the definition body will be validated in the creation and the updates of the procedure. For procedures with an argument of ANY TYPE, the definition body validtion is not supported at creation/update time, and thus this field must be set to false explicitly. + */ + strictMode?: boolean; }; type IRoutineReference = { @@ -2650,6 +2811,13 @@ declare namespace bigquery { stackFrames?: Array; }; + type ISessionInfo = { + /** + * [Output-only] // [Preview] Id of the session. + */ + sessionId?: string; + }; + /** * Request message for `SetIamPolicy` method. */ @@ -2666,11 +2834,11 @@ declare namespace bigquery { type ISnapshotDefinition = { /** - * [Required] Reference describing the ID of the table that is snapshotted. + * [Required] Reference describing the ID of the table that was snapshot. */ baseTableReference?: ITableReference; /** - * [Required] The time at which the base table was snapshot. + * [Required] The time at which the base table was snapshot. This value is reported in the JSON response using RFC3339 format. */ snapshotTime?: string; }; @@ -2701,9 +2869,11 @@ declare namespace bigquery { | 'DATE' | 'TIME' | 'DATETIME' + | 'INTERVAL' | 'GEOGRAPHY' | 'NUMERIC' | 'BIGNUMERIC' + | 'JSON' | 'ARRAY' | 'STRUCT'; }; @@ -2724,6 +2894,16 @@ declare namespace bigquery { type IStandardSqlStructType = {fields?: Array}; + /** + * A table type + */ + type IStandardSqlTableType = { + /** + * The columns in this table type + */ + columns?: Array; + }; + type IStreamingbuffer = { /** * [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer. @@ -2748,6 +2928,10 @@ declare namespace bigquery { * [Output-only] The time when this table was created, in milliseconds since the epoch. */ creationTime?: string; + /** + * [Output-only] The default collation of the table. + */ + defaultCollation?: string; /** * [Optional] A user-friendly description of this table. */ @@ -2945,6 +3129,10 @@ declare namespace bigquery { */ names?: Array; }; + /** + * Optional. Collation specification of the field. It only can be set on string type field. + */ + collationSpec?: string; /** * [Optional] The field description. The maximum length is 1,024 characters. */ @@ -2953,12 +3141,16 @@ declare namespace bigquery { * [Optional] Describes the nested schema fields if the type property is set to RECORD. */ fields?: Array; + /** + * [Optional] Maximum length of values of this field for STRINGS or BYTES. If max_length is not specified, no maximum length constraint is imposed on this field. If type = "STRING", then max_length represents the maximum UTF-8 length of strings in this field. If type = "BYTES", then max_length represents the maximum number of bytes in this field. It is invalid to set this field if type ≠ "STRING" and ≠ "BYTES". + */ + maxLength?: string; /** * [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE. */ mode?: string; /** - * [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters. + * [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 300 characters. */ name?: string; policyTags?: { @@ -2968,7 +3160,15 @@ declare namespace bigquery { names?: Array; }; /** - * [Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD). + * [Optional] Precision (maximum number of total digits in base 10) and scale (maximum number of digits in the fractional part in base 10) constraints for values of this field for NUMERIC or BIGNUMERIC. It is invalid to set precision or scale if type ≠ "NUMERIC" and ≠ "BIGNUMERIC". If precision and scale are not specified, no value range constraint is imposed on this field insofar as values are permitted by the type. Values of this NUMERIC or BIGNUMERIC field must be in this range when: - Precision (P) and scale (S) are specified: [-10P-S + 10-S, 10P-S - 10-S] - Precision (P) is specified but not scale (and thus scale is interpreted to be equal to zero): [-10P + 1, 10P - 1]. Acceptable values for precision and scale if both are specified: - If type = "NUMERIC": 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. - If type = "BIGNUMERIC": 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. Acceptable values for precision if only precision is specified but not scale (and thus scale is interpreted to be equal to zero): - If type = "NUMERIC": 1 ≤ precision ≤ 29. - If type = "BIGNUMERIC": 1 ≤ precision ≤ 38. If scale is specified but not precision, then it is invalid. + */ + precision?: string; + /** + * [Optional] See documentation for precision. + */ + scale?: string; + /** + * [Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), NUMERIC, BIGNUMERIC, BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, INTERVAL, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD). */ type?: string; }; @@ -3115,7 +3315,14 @@ declare namespace bigquery { type?: string; }; + /** + * Options used in model training. + */ type ITrainingOptions = { + /** + * If true, detect step changes and make data adjustment in the input time series. + */ + adjustStepChanges?: boolean; /** * Whether to enable auto ARIMA or not. */ @@ -3128,6 +3335,10 @@ declare namespace bigquery { * Batch size for dnn models. */ batchSize?: string; + /** + * If true, clean spikes and dips in the input time series. + */ + cleanSpikesAndDips?: boolean; /** * The data frequency of a time series. */ @@ -3139,7 +3350,8 @@ declare namespace bigquery { | 'MONTHLY' | 'WEEKLY' | 'DAILY' - | 'HOURLY'; + | 'HOURLY' + | 'PER_MINUTE'; /** * The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties */ @@ -3158,6 +3370,10 @@ declare namespace bigquery { | 'SEQUENTIAL' | 'NO_SPLIT' | 'AUTO_SPLIT'; + /** + * If true, perform decompose time series and save the results. + */ + decomposeTimeSeries?: boolean; /** * Distance type for clustering models. */ @@ -3327,7 +3543,7 @@ declare namespace bigquery { */ minSplitLoss?: number; /** - * [Beta] Google Cloud Storage URI from which the model was imported. Only applicable for imported models. + * Google Cloud Storage URI from which the model was imported. Only applicable for imported models. */ modelUri?: string; /** @@ -3362,9 +3578,13 @@ declare namespace bigquery { */ timeSeriesDataColumn?: string; /** - * The id column that will be used to indicate different time series to forecast in parallel. + * The time series id column that was used during ARIMA model training. */ timeSeriesIdColumn?: string; + /** + * The time series id columns that were used during ARIMA model training. + */ + timeSeriesIdColumns?: Array; /** * Column to be designated as time series timestamp for ARIMA model. */ @@ -3395,10 +3615,6 @@ declare namespace bigquery { * The evaluation metrics over training/eval data that were computed at the end of training. */ evaluationMetrics?: IEvaluationMetrics; - /** - * Global explanations for important features of the model. For multi-class models, there is one entry for each label class. For other models, there is only one entry in the list. - */ - globalExplanations?: Array; /** * Output of each iteration run, results.size() <= max_iterations. */ @@ -3439,6 +3655,10 @@ declare namespace bigquery { * [Required] A query that BigQuery executes when the view is referenced. */ query?: string; + /** + * True if the column names are explicitly specified. For example by using the 'CREATE VIEW v(c1, c2) AS ...' syntax. Can only be set using BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ + */ + useExplicitColumnNames?: boolean; /** * Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ Queries and views that reference this view must use the same flag value. */ @@ -3494,6 +3714,16 @@ declare namespace bigquery { location?: string; }; + /** + * Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview. + */ + type IDeleteParams = { + /** + * The geographic location of the job. Required. See details at: https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + location?: string; + }; + /** * Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role. */ @@ -3571,7 +3801,7 @@ declare namespace bigquery { namespace models { /** - * Lists all models in the specified dataset. Requires the READER dataset role. + * Lists all models in the specified dataset. Requires the READER dataset role. After retrieving the list of models, you can get information about a particular model by calling the models.get method. */ type IListParams = { /** diff --git a/system-test/bigquery.ts b/system-test/bigquery.ts index 2810e899..d7e9cb8e 100644 --- a/system-test/bigquery.ts +++ b/system-test/bigquery.ts @@ -646,6 +646,25 @@ describe('BigQuery', () => { }); }); + describe.only('BigQuery/Job', () => { + it('should delete a job', async () => { + const opts = { + configuration: { + query: { + query: 'SELECT 100 as foo', + }, + }, + location: 'us-east1', + }; + + const [job] = await bigquery.createJob(opts); + const [resp] = await job.delete(); + const [exists] = await job.exists(); + assert.deepStrictEqual(resp, {}); + assert.strictEqual(exists, false); + }); + }); + describe('BigQuery/Model', () => { let model: Model; const bucket = storage.bucket(generateName('bucket')); From e0d78201ff9e4e0e992751b05e403809a1fb0978 Mon Sep 17 00:00:00 2001 From: steffnay Date: Fri, 8 Oct 2021 14:32:09 -0700 Subject: [PATCH 02/13] fix test --- system-test/bigquery.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/bigquery.ts b/system-test/bigquery.ts index d7e9cb8e..f5603276 100644 --- a/system-test/bigquery.ts +++ b/system-test/bigquery.ts @@ -646,7 +646,7 @@ describe('BigQuery', () => { }); }); - describe.only('BigQuery/Job', () => { + describe('BigQuery/Job', () => { it('should delete a job', async () => { const opts = { configuration: { From 76bfa9e24df44842abf82f0e388870c011f04798 Mon Sep 17 00:00:00 2001 From: steffnay Date: Sat, 13 Nov 2021 15:45:29 -0800 Subject: [PATCH 03/13] refactor --- src/job.ts | 107 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 60 insertions(+), 47 deletions(-) diff --git a/src/job.ts b/src/job.ts index 7bcae05b..15bfc7ca 100644 --- a/src/job.ts +++ b/src/job.ts @@ -27,6 +27,7 @@ import { import {paginator, ResourceStream} from '@google-cloud/paginator'; import {promisifyAll} from '@google-cloud/promisify'; import * as extend from 'extend'; +import * as r from 'teeny-request'; import { BigQuery, @@ -220,6 +221,18 @@ class Job extends Operation { */ get: true, + delete: { + reqOpts: { + method: 'DELETE', + uri: '/delete', + qs: { + get location() { + return location; + }, + }, + }, + }, + /** * @callback GetJobMetadataCallback * @param {?Error} err Request error, if any. @@ -387,53 +400,53 @@ class Job extends Operation { ); } - delete(): Promise; - delete(callback: DeleteCallback): void; - /** - * @callback DeleteJobCallback - * @param {?Error} err Request error, if any. - * @param {object} apiResponse The full API response. - */ - /** - * Delete the job. - * - * @see [Jobs: delete API Documentation]{@link https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/delete} - * - * @method Job#delete - * @param {DeleteJobCallback} [callback] The callback function. - * @param {?error} callback.err An error returned while making this - * request. - * @param {object} callback.apiResponse The full API response. - * @returns {Promise} - * - * @example - * const {BigQuery} = require('@google-cloud/bigquery'); - * const bigquery = new BigQuery(); - * - * const job = bigquery.job(jobId); - * - * job.delete((err, apiResponse) => {}); - * - * @example If the callback is omitted we'll return a Promise. - * const [apiResponse] = await job.delete(); - * @example If successful, the response body is empty. - */ - delete(callback?: DeleteCallback): void | Promise { - let qs; - - if (this.location) { - qs = {location: this.location}; - } - - this.request( - { - method: 'DELETE', - uri: '/delete', - qs, - }, - callback! - ); - } + // delete(): Promise; + // delete(callback: DeleteCallback): void; + // /** + // * @callback DeleteJobCallback + // * @param {?Error} err Request error, if any. + // * @param {object} apiResponse The full API response. + // */ + // /** + // * Delete the job. + // * + // * @see [Jobs: delete API Documentation]{@link https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/delete} + // * + // * @method Job#delete + // * @param {DeleteJobCallback} [callback] The callback function. + // * @param {?error} callback.err An error returned while making this + // * request. + // * @param {object} callback.apiResponse The full API response. + // * @returns {Promise} + // * + // * @example + // * const {BigQuery} = require('@google-cloud/bigquery'); + // * const bigquery = new BigQuery(); + // * + // * const job = bigquery.job(jobId); + // * + // * job.delete((err, apiResponse) => {}); + // * + // * @example If the callback is omitted we'll return a Promise. + // * const [apiResponse] = await job.delete(); + // * @example If successful, the response body is empty. + // */ + // delete(callback?: DeleteCallback): void | Promise { + // let qs; + + // if (this.location) { + // qs = {location: this.location}; + // } + + // this.request( + // { + // method: 'DELETE', + // uri: '/delete', + // qs, + // }, + // callback! + // ); + // } getQueryResults(options?: QueryResultsOptions): Promise; getQueryResults( From 70622a81589461ec6418bdc3f95bcdd387c4c1d6 Mon Sep 17 00:00:00 2001 From: steffnay Date: Sat, 13 Nov 2021 15:52:36 -0800 Subject: [PATCH 04/13] lint --- src/job.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/job.ts b/src/job.ts index 15bfc7ca..8c391fa3 100644 --- a/src/job.ts +++ b/src/job.ts @@ -27,7 +27,6 @@ import { import {paginator, ResourceStream} from '@google-cloud/paginator'; import {promisifyAll} from '@google-cloud/promisify'; import * as extend from 'extend'; -import * as r from 'teeny-request'; import { BigQuery, From 6f5e237c8d2ea9248a459da3f6cddf66bdbb795a Mon Sep 17 00:00:00 2001 From: steffnay Date: Sat, 13 Nov 2021 16:25:12 -0800 Subject: [PATCH 05/13] lint --- src/job.ts | 59 ++++++++++++++++++++++++++++++++++++++++++----------- test/job.ts | 2 +- 2 files changed, 48 insertions(+), 13 deletions(-) diff --git a/src/job.ts b/src/job.ts index 8c391fa3..55907606 100644 --- a/src/job.ts +++ b/src/job.ts @@ -133,6 +133,53 @@ class Job extends Operation { let location: string; const methods = { + /** + * @callback DeleteJobCallback + * @param {?Error} err Request error, if any. + * @param {object} apiResponse The full API response. + */ + /** + * @typedef {array} DeleteJobResponse + * @property {object} 0 The full API response. + */ + /** + * Delete the job. + * + * @see [Jobs: delete API Documentation]{@link https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/delete} + * + * @method Job#delete + * @param {DeleteJobCallback} [callback] The callback function. + * @param {?error} callback.err An error returned while making this + * request. + * @param {object} callback.apiResponse The full API response. + * @returns {Promise} + * + * @example + * const {BigQuery} = require('@google-cloud/bigquery'); + * const bigquery = new BigQuery(); + * + * const job = bigquery.job(jobId); + * job.delete((err, apiResponse) => { + * if (!err) { + * // The job was deleted successfully. + * } + * }); + * + * @example If the callback is omitted a Promise will be returned + * const [apiResponse] = await job.delete(); + */ + delete: { + reqOpts: { + method: 'DELETE', + uri: '/delete', + qs: { + get location() { + return location; + }, + }, + }, + }, + /** * @callback JobExistsCallback * @param {?Error} err Request error, if any. @@ -220,18 +267,6 @@ class Job extends Operation { */ get: true, - delete: { - reqOpts: { - method: 'DELETE', - uri: '/delete', - qs: { - get location() { - return location; - }, - }, - }, - }, - /** * @callback GetJobMetadataCallback * @param {?Error} err Request error, if any. diff --git a/test/job.ts b/test/job.ts index ab195ff8..0f2deee4 100644 --- a/test/job.ts +++ b/test/job.ts @@ -73,7 +73,7 @@ const fakePaginator = { const sandbox = sinon.createSandbox(); -describe('BigQuery/Job', () => { +describe.only('BigQuery/Job', () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const BIGQUERY: any = { projectId: 'my-project', From af6a343977a11be798399590688a55dcdd061eb9 Mon Sep 17 00:00:00 2001 From: steffnay Date: Sat, 13 Nov 2021 16:33:17 -0800 Subject: [PATCH 06/13] update jobs test --- test/job.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/job.ts b/test/job.ts index 0f2deee4..ce39a7cc 100644 --- a/test/job.ts +++ b/test/job.ts @@ -73,7 +73,7 @@ const fakePaginator = { const sandbox = sinon.createSandbox(); -describe.only('BigQuery/Job', () => { +describe('BigQuery/Job', () => { // eslint-disable-next-line @typescript-eslint/no-explicit-any const BIGQUERY: any = { projectId: 'my-project', @@ -123,6 +123,13 @@ describe.only('BigQuery/Job', () => { assert.strictEqual(calledWith.baseUrl, '/jobs'); assert.strictEqual(calledWith.id, JOB_ID); assert.deepStrictEqual(calledWith.methods, { + delete: { + reqOpts: { + method: 'DELETE', + uri: '/delete', + qs: {location: undefined}, + }, + }, exists: true, get: true, getMetadata: { From 5d3c2a4429b5fb2c903a74559251da65da01d2e8 Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 15 Nov 2021 13:49:15 -0800 Subject: [PATCH 07/13] refactor --- src/job.ts | 48 ------------------------------------------------ 1 file changed, 48 deletions(-) diff --git a/src/job.ts b/src/job.ts index 55907606..863307f5 100644 --- a/src/job.ts +++ b/src/job.ts @@ -434,54 +434,6 @@ class Job extends Operation { ); } - // delete(): Promise; - // delete(callback: DeleteCallback): void; - // /** - // * @callback DeleteJobCallback - // * @param {?Error} err Request error, if any. - // * @param {object} apiResponse The full API response. - // */ - // /** - // * Delete the job. - // * - // * @see [Jobs: delete API Documentation]{@link https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/delete} - // * - // * @method Job#delete - // * @param {DeleteJobCallback} [callback] The callback function. - // * @param {?error} callback.err An error returned while making this - // * request. - // * @param {object} callback.apiResponse The full API response. - // * @returns {Promise} - // * - // * @example - // * const {BigQuery} = require('@google-cloud/bigquery'); - // * const bigquery = new BigQuery(); - // * - // * const job = bigquery.job(jobId); - // * - // * job.delete((err, apiResponse) => {}); - // * - // * @example If the callback is omitted we'll return a Promise. - // * const [apiResponse] = await job.delete(); - // * @example If successful, the response body is empty. - // */ - // delete(callback?: DeleteCallback): void | Promise { - // let qs; - - // if (this.location) { - // qs = {location: this.location}; - // } - - // this.request( - // { - // method: 'DELETE', - // uri: '/delete', - // qs, - // }, - // callback! - // ); - // } - getQueryResults(options?: QueryResultsOptions): Promise; getQueryResults( options: QueryResultsOptions, From 4d9627f1ae11b294c02ce66be06a0958a0e030c9 Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 15 Nov 2021 13:52:44 -0800 Subject: [PATCH 08/13] lint --- src/job.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/job.ts b/src/job.ts index 863307f5..6d17b272 100644 --- a/src/job.ts +++ b/src/job.ts @@ -434,12 +434,6 @@ class Job extends Operation { ); } - getQueryResults(options?: QueryResultsOptions): Promise; - getQueryResults( - options: QueryResultsOptions, - callback: QueryRowsCallback - ): void; - getQueryResults(callback: QueryRowsCallback): void; /** * Get the results of a job. * From 0ac46764ff9f88cd502d741e9c2ecd20499345d1 Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 15 Nov 2021 13:55:28 -0800 Subject: [PATCH 09/13] remove unnecessary DeleteCallback --- src/index.ts | 1 - src/job.ts | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index 04b347e0..f852cffa 100644 --- a/src/index.ts +++ b/src/index.ts @@ -82,7 +82,6 @@ export { export { CancelCallback, CancelResponse, - DeleteCallback, Job, JobMetadata, JobOptions, diff --git a/src/job.ts b/src/job.ts index 6d17b272..a1f28778 100644 --- a/src/job.ts +++ b/src/job.ts @@ -46,7 +46,7 @@ export type JobOptions = JobRequest; export type CancelCallback = RequestCallback; export type CancelResponse = [bigquery.IJobCancelResponse]; -export type DeleteCallback = RequestCallback; +// export type DeleteCallback = RequestCallback; export type QueryResultsOptions = { job?: Job; From 85eab004a4facd98e5acca9bbad73955701a35fb Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 15 Nov 2021 13:56:06 -0800 Subject: [PATCH 10/13] remove unnecessary DeleteCallback --- src/job.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/job.ts b/src/job.ts index a1f28778..c20579c8 100644 --- a/src/job.ts +++ b/src/job.ts @@ -46,8 +46,6 @@ export type JobOptions = JobRequest; export type CancelCallback = RequestCallback; export type CancelResponse = [bigquery.IJobCancelResponse]; -// export type DeleteCallback = RequestCallback; - export type QueryResultsOptions = { job?: Job; wrapIntegers?: boolean | IntegerTypeCastOptions; From 7f959646aa2d2914545930508ea07a4f10f72bf1 Mon Sep 17 00:00:00 2001 From: steffnay Date: Tue, 7 Dec 2021 11:58:00 -0800 Subject: [PATCH 11/13] update --- src/types.d.ts | 338 ++++++++----------------------------------------- 1 file changed, 54 insertions(+), 284 deletions(-) diff --git a/src/types.d.ts b/src/types.d.ts index 67162c80..6b74711f 100644 --- a/src/types.d.ts +++ b/src/types.d.ts @@ -150,18 +150,6 @@ declare namespace bigquery { * Whether Arima model fitted with drift or not. It is always false when d is not 1. */ hasDrift?: boolean; - /** - * If true, holiday_effect is a part of time series decomposition result. - */ - hasHolidayEffect?: boolean; - /** - * If true, spikes_and_dips is a part of time series decomposition result. - */ - hasSpikesAndDips?: boolean; - /** - * If true, step_changes is a part of time series decomposition result. - */ - hasStepChanges?: boolean; /** * Non-seasonal order. */ @@ -179,13 +167,9 @@ declare namespace bigquery { | 'YEARLY' >; /** - * The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used. + * The id to indicate different time series. */ timeSeriesId?: string; - /** - * The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns. - */ - timeSeriesIds?: Array; }; /** @@ -240,18 +224,6 @@ declare namespace bigquery { * Is arima model fitted with drift or not. It is always false when d is not 1. */ hasDrift?: boolean; - /** - * If true, holiday_effect is a part of time series decomposition result. - */ - hasHolidayEffect?: boolean; - /** - * If true, spikes_and_dips is a part of time series decomposition result. - */ - hasSpikesAndDips?: boolean; - /** - * If true, step_changes is a part of time series decomposition result. - */ - hasStepChanges?: boolean; /** * Non-seasonal order. */ @@ -269,13 +241,9 @@ declare namespace bigquery { | 'YEARLY' >; /** - * The time_series_id value for this time series. It will be one of the unique values from the time_series_id_column specified during ARIMA model training. Only present when time_series_id_column training option was used. + * The id to indicate different time series. */ timeSeriesId?: string; - /** - * The tuple of time_series_ids identifying this time series. It will be one of the unique tuples of values present in the time_series_id_columns specified during ARIMA model training. Only present when time_series_id_columns training option was used and the order of values here are same as the order of time_series_id_columns. - */ - timeSeriesIds?: Array; }; /** @@ -310,35 +278,6 @@ declare namespace bigquery { | 'DATA_READ'; }; - type IAvroOptions = { - /** - * [Optional] If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). - */ - useAvroLogicalTypes?: boolean; - }; - - type IBiEngineReason = { - /** - * [Output-only] High-level BI Engine reason for partial or disabled acceleration. - */ - code?: string; - /** - * [Output-only] Free form human-readable reason for partial or disabled acceleration. - */ - message?: string; - }; - - type IBiEngineStatistics = { - /** - * [Output-only] Specifies which mode of BI Engine acceleration was performed (if any). - */ - biEngineMode?: string; - /** - * In case of DISABLED or PARTIAL bi_engine_mode, these contain the explanatory reasons as to why BI Engine could not accelerate. In case the full query was accelerated, this field is not populated. - */ - biEngineReasons?: Array; - }; - type IBigQueryModelTraining = { /** * [Output-only, Beta] Index of current ML training iteration. Updated during create model query job to show job progress. @@ -618,7 +557,7 @@ declare namespace bigquery { */ type IClusteringMetrics = { /** - * Information for all clusters. + * [Beta] Information for all clusters. */ clusters?: Array; /** @@ -673,10 +612,6 @@ declare namespace bigquery { * [Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (','). */ fieldDelimiter?: string; - /** - * [Optional] An custom string that will represent a NULL value in CSV import data. - */ - null_marker?: string; /** * [Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. */ @@ -706,10 +641,6 @@ declare namespace bigquery { * [Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER; */ access?: Array<{ - /** - * [Pick one] A grant authorizing all resources of a particular type in a particular dataset access to this dataset. Only views are supported for now. The role field is not required when this field is set. If that dataset is deleted and re-created, its access needs to be granted again via an update operation. - */ - dataset?: IDatasetAccessEntry; /** * [Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: "example.com". Maps to IAM policy member "domain:DOMAIN". */ @@ -751,10 +682,6 @@ declare namespace bigquery { * [Required] A reference that identifies the dataset. */ datasetReference?: IDatasetReference; - /** - * [Output-only] The default collation of the dataset. - */ - defaultCollation?: string; defaultEncryptionConfiguration?: IEncryptionConfiguration; /** * [Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property. @@ -780,10 +707,6 @@ declare namespace bigquery { * [Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field. */ id?: string; - /** - * [Optional] Indicates if table names are case insensitive in the dataset. - */ - isCaseInsensitive?: boolean; /** * [Output-only] The resource type. */ @@ -810,19 +733,6 @@ declare namespace bigquery { selfLink?: string; }; - type IDatasetAccessEntry = { - /** - * [Required] The dataset this entry applies to. - */ - dataset?: IDatasetReference; - target_types?: Array<{ - /** - * [Required] Which resources in the dataset this entry applies to. Currently, only views are supported, but additional target types may be added in the future. Possible values: VIEWS: This entry applies to all views in the dataset. - */ - targetType?: string; - }>; - }; - type IDatasetList = { /** * An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project. @@ -893,21 +803,6 @@ declare namespace bigquery { labels?: {[key: string]: string}; }; - type IDmlStatistics = { - /** - * Number of deleted Rows. populated by DML DELETE, MERGE and TRUNCATE statements. - */ - deletedRowCount?: string; - /** - * Number of inserted Rows. Populated by DML INSERT and MERGE statements. - */ - insertedRowCount?: string; - /** - * Number of updated Rows. Populated by DML UPDATE and MERGE statements. - */ - updatedRowCount?: string; - }; - type IEncryptionConfiguration = { /** * [Optional] Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. @@ -1112,6 +1007,20 @@ declare namespace bigquery { substeps?: Array; }; + /** + * Explanation for a single feature. + */ + type IExplanation = { + /** + * Attribution of feature. + */ + attribution?: number; + /** + * Full name of the feature. For non-numerical features, will be formatted like .. Overall size of feature name will always be truncated to first 120 characters. + */ + featureName?: string; + }; + /** * Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. */ @@ -1139,10 +1048,6 @@ declare namespace bigquery { * Try to detect schema and format options automatically. Any option specified explicitly will be honored. */ autodetect?: boolean; - /** - * Additional properties to set if sourceFormat is set to Avro. - */ - avroOptions?: IAvroOptions; /** * [Optional] Additional options if sourceFormat is set to BIGTABLE. */ @@ -1159,16 +1064,12 @@ declare namespace bigquery { * Additional properties to set if sourceFormat is set to CSV. */ csvOptions?: ICsvOptions; - /** - * [Optional] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: (38,9) -> NUMERIC; (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); (76,38) -> BIGNUMERIC; (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. - */ - decimalTargetTypes?: Array; /** * [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. */ googleSheetsOptions?: IGoogleSheetsOptions; /** - * [Optional] Options to configure hive partitioning support. + * [Optional, Trusted Tester] Options to configure hive partitioning support. */ hivePartitioningOptions?: IHivePartitioningOptions; /** @@ -1179,10 +1080,6 @@ declare namespace bigquery { * [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV, JSON, and Google Sheets. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. */ maxBadRecords?: number; - /** - * Additional properties to set if sourceFormat is set to Parquet. - */ - parquetOptions?: IParquetOptions; /** * [Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats. */ @@ -1297,6 +1194,20 @@ declare namespace bigquery { kind?: string; }; + /** + * Global explanations containing the top most important features after training. + */ + type IGlobalExplanation = { + /** + * Class label for this set of global explanations. Will be empty/null for binary logistic and linear regression models. Sorted alphabetically in descending order. + */ + classLabel?: string; + /** + * A list of the top global explanations. Sorted by absolute value of attribution in descending order. + */ + explanations?: Array; + }; + type IGoogleSheetsOptions = { /** * [Optional] Range of a sheet to query from. Only used when non-empty. Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id For example: sheet1!A1:B20 @@ -1500,7 +1411,7 @@ declare namespace bigquery { */ createDisposition?: string; /** - * [Optional] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: (38,9) -> NUMERIC; (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); (76,38) -> BIGNUMERIC; (77,38) -> BIGNUMERIC (error if value exeeds supported range). This field cannot contain duplicate types. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other file formats. + * [Trusted Tester] Defines the list of possible SQL data types to which the source decimal values are converted. This list and the precision and the scale parameters of the decimal field determine the target type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the specified list and if it supports the precision and the scale. STRING supports all precision and scale values. If none of the listed types supports the precision and the scale, the type supporting the widest range in the specified list is picked, and if a value exceeds the supported range when reading the data, an error will be thrown. For example: suppose decimal_target_type = ["NUMERIC", "BIGNUMERIC"]. Then if (precision,scale) is: * (38,9) -> NUMERIC; * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value exeeds supported range). For duplicated types in this field, only one will be considered and the rest will be ignored. The order of the types in this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. */ decimalTargetTypes?: Array; /** @@ -1524,17 +1435,13 @@ declare namespace bigquery { */ fieldDelimiter?: string; /** - * [Optional] Options to configure hive partitioning support. + * [Optional, Trusted Tester] Options to configure hive partitioning support. */ hivePartitioningOptions?: IHivePartitioningOptions; /** * [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names */ ignoreUnknownValues?: boolean; - /** - * [Optional] If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited GeoJSON: set to GEOJSON. - */ - jsonExtension?: string; /** * [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. This is only valid for CSV and JSON. The default value is 0, which requires that all records are valid. */ @@ -1543,10 +1450,6 @@ declare namespace bigquery { * [Optional] Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as an empty value. */ nullMarker?: string; - /** - * [Optional] Options to configure parquet support. - */ - parquetOptions?: IParquetOptions; /** * If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result. */ @@ -1592,7 +1495,7 @@ declare namespace bigquery { */ timePartitioning?: ITimePartitioning; /** - * [Optional] If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER). + * [Optional] If sourceFormat is set to "AVRO", indicates whether to enable interpreting logical types into their corresponding types (ie. TIMESTAMP), instead of only using their raw types (ie. INTEGER). */ useAvroLogicalTypes?: boolean; /** @@ -1618,10 +1521,6 @@ declare namespace bigquery { * [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion. */ createDisposition?: string; - /** - * If true, creates a new session, where session id will be a server generated random id. If false, runs query with an existing session_id passed in ConnectionProperty, otherwise runs query in non-session mode. - */ - createSession?: boolean; /** * [Optional] Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. */ @@ -1868,10 +1767,6 @@ declare namespace bigquery { * [Output-only] Statistics for a child job of a script. */ scriptStatistics?: IScriptStatistics; - /** - * [Output-only] [Preview] Information of the session if this job is part of one. - */ - sessionInfo?: ISessionInfo; /** * [Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE. */ @@ -1887,14 +1782,10 @@ declare namespace bigquery { /** * [Output-only] [Alpha] Information of the multi-statement transaction if this job is part of one. */ - transactionInfo?: ITransactionInfo; + transactionInfoTemplate?: ITransactionInfo; }; type IJobStatistics2 = { - /** - * BI Engine specific Statistics. [Output-only] BI Engine specific Statistics. - */ - biEngineStatistics?: IBiEngineStatistics; /** * [Output-only] Billing tier for the job. */ @@ -1907,18 +1798,10 @@ declare namespace bigquery { * [Output-only] [Preview] The number of row access policies affected by a DDL statement. Present only for DROP ALL ROW ACCESS POLICIES queries. */ ddlAffectedRowAccessPolicyCount?: string; - /** - * [Output-only] The DDL destination table. Present only for ALTER TABLE RENAME TO queries. Note that ddl_target_table is used just for its type information. - */ - ddlDestinationTable?: ITableReference; /** * The DDL operation performed, possibly dependent on the pre-existence of the DDL target. Possible values (new values might be added in the future): "CREATE": The query created the DDL target. "SKIP": No-op. Example cases: the query is CREATE TABLE IF NOT EXISTS while the table already exists, or the query is DROP TABLE IF EXISTS while the table does not exist. "REPLACE": The query replaced the DDL target. Example case: the query is CREATE OR REPLACE TABLE, and the table already exists. "DROP": The query deleted the DDL target. */ ddlOperationPerformed?: string; - /** - * [Output-only] The DDL target dataset. Present only for CREATE/ALTER/DROP SCHEMA queries. - */ - ddlTargetDataset?: IDatasetReference; /** * The DDL target routine. Present only for CREATE/DROP FUNCTION/PROCEDURE queries. */ @@ -1931,10 +1814,6 @@ declare namespace bigquery { * [Output-only] The DDL target table. Present only for CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. */ ddlTargetTable?: ITableReference; - /** - * [Output-only] Detailed statistics for DML statements Present only for DML statements INSERT, UPDATE, DELETE or TRUNCATE. - */ - dmlStats?: IDmlStatistics; /** * [Output-only] The original estimate of bytes processed for the job. */ @@ -2140,10 +2019,6 @@ declare namespace bigquery { }; type IModel = { - /** - * The best trial_id across all training runs. - */ - bestTrialId?: string; /** * Output only. The time when this model was created, in millisecs since the epoch. */ @@ -2208,8 +2083,7 @@ declare namespace bigquery { | 'BOOSTED_TREE_CLASSIFIER' | 'ARIMA' | 'AUTOML_REGRESSOR' - | 'AUTOML_CLASSIFIER' - | 'ARIMA_PLUS'; + | 'AUTOML_CLASSIFIER'; /** * Output only. Information for all training runs in increasing order of start_time. */ @@ -2260,17 +2134,6 @@ declare namespace bigquery { confusionMatrixList?: Array; }; - type IParquetOptions = { - /** - * [Optional] Indicates whether to use schema inference specifically for Parquet LIST logical type. - */ - enableListInference?: boolean; - /** - * [Optional] Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. - */ - enumAsString?: boolean; - }; - /** * An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the {@link https://cloud.google.com/iam/docs/| IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation}. */ @@ -2407,10 +2270,6 @@ declare namespace bigquery { * Connection properties. */ connectionProperties?: Array; - /** - * If true, creates a new session, where session id will be a server generated random id. If false, runs query with an existing session_id passed in ConnectionProperty, otherwise runs query in non-session mode. - */ - createSession?: boolean; /** * [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. */ @@ -2478,10 +2337,6 @@ declare namespace bigquery { * Whether the query result was fetched from the query cache. */ cacheHit?: boolean; - /** - * [Output-only] Detailed statistics for DML statements Present only for DML statements INSERT, UPDATE, DELETE or TRUNCATE. - */ - dmlStats?: IDmlStatistics; /** * [Output-only] The first errors or warnings encountered during the running of the job. The final message includes the number of errors that caused the process to stop. Errors here do not necessarily mean that the job has completed or was unsuccessful. */ @@ -2514,10 +2369,6 @@ declare namespace bigquery { * The schema of the results. Present only when the query completes successfully. */ schema?: ITableSchema; - /** - * [Output-only] [Preview] Information of the session if this job is part of one. - */ - sessionInfo?: ISessionInfo; /** * The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run. */ @@ -2618,7 +2469,7 @@ declare namespace bigquery { */ medianAbsoluteError?: number; /** - * R^2 score. This corresponds to r2_score in ML.EVALUATE. + * R^2 score. */ rSquared?: number; }; @@ -2640,11 +2491,11 @@ declare namespace bigquery { */ definitionBody?: string; /** - * Optional. The description of the routine, if defined. + * Optional. [Experimental] The description of the routine if defined. */ description?: string; /** - * Optional. The determinism level of the JavaScript UDF, if defined. + * Optional. [Experimental] The determinism level of the JavaScript UDF if defined. */ determinismLevel?: | 'DETERMINISM_LEVEL_UNSPECIFIED' @@ -2667,11 +2518,7 @@ declare namespace bigquery { */ lastModifiedTime?: string; /** - * Optional. Can be set only if routine_type = "TABLE_VALUED_FUNCTION". If absent, the return table type is inferred from definition_body at query time in each query that references this routine. If present, then the columns in the evaluated table result will be cast to match the column types specificed in return table type, at query time. - */ - returnTableType?: IStandardSqlTableType; - /** - * Optional if language = "SQL"; required otherwise. Cannot be set if routine_type = "TABLE_VALUED_FUNCTION". If absent, the return type is inferred from definition_body at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. For example, for the functions created with the following statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and is absent for `Increment` (inferred as FLOAT64 at query time). Suppose the function `Add` is replaced by `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then the inferred return type of `Increment` is automatically changed to INT64 at query time, while the return type of `Decrement` remains FLOAT64. + * Optional if language = "SQL"; required otherwise. If absent, the return type is inferred from definition_body at query time in each query that references this routine. If present, then the evaluated result will be cast to the specified returned type at query time. For example, for the functions created with the following statements: * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and is absent for `Increment` (inferred as FLOAT64 at query time). Suppose the function `Add` is replaced by `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` Then the inferred return type of `Increment` is automatically changed to INT64 at query time, while the return type of `Decrement` remains FLOAT64. */ returnType?: IStandardSqlDataType; /** @@ -2681,15 +2528,7 @@ declare namespace bigquery { /** * Required. The type of routine. */ - routineType?: - | 'ROUTINE_TYPE_UNSPECIFIED' - | 'SCALAR_FUNCTION' - | 'PROCEDURE' - | 'TABLE_VALUED_FUNCTION'; - /** - * Optional. Can be set for procedures only. If true (default), the definition body will be validated in the creation and the updates of the procedure. For procedures with an argument of ANY TYPE, the definition body validtion is not supported at creation/update time, and thus this field must be set to false explicitly. - */ - strictMode?: boolean; + routineType?: 'ROUTINE_TYPE_UNSPECIFIED' | 'SCALAR_FUNCTION' | 'PROCEDURE'; }; type IRoutineReference = { @@ -2811,13 +2650,6 @@ declare namespace bigquery { stackFrames?: Array; }; - type ISessionInfo = { - /** - * [Output-only] // [Preview] Id of the session. - */ - sessionId?: string; - }; - /** * Request message for `SetIamPolicy` method. */ @@ -2834,11 +2666,11 @@ declare namespace bigquery { type ISnapshotDefinition = { /** - * [Required] Reference describing the ID of the table that was snapshot. + * [Required] Reference describing the ID of the table that is snapshotted. */ baseTableReference?: ITableReference; /** - * [Required] The time at which the base table was snapshot. This value is reported in the JSON response using RFC3339 format. + * [Required] The time at which the base table was snapshot. */ snapshotTime?: string; }; @@ -2869,11 +2701,9 @@ declare namespace bigquery { | 'DATE' | 'TIME' | 'DATETIME' - | 'INTERVAL' | 'GEOGRAPHY' | 'NUMERIC' | 'BIGNUMERIC' - | 'JSON' | 'ARRAY' | 'STRUCT'; }; @@ -2894,16 +2724,6 @@ declare namespace bigquery { type IStandardSqlStructType = {fields?: Array}; - /** - * A table type - */ - type IStandardSqlTableType = { - /** - * The columns in this table type - */ - columns?: Array; - }; - type IStreamingbuffer = { /** * [Output-only] A lower-bound estimate of the number of bytes currently in the streaming buffer. @@ -2928,10 +2748,6 @@ declare namespace bigquery { * [Output-only] The time when this table was created, in milliseconds since the epoch. */ creationTime?: string; - /** - * [Output-only] The default collation of the table. - */ - defaultCollation?: string; /** * [Optional] A user-friendly description of this table. */ @@ -3129,10 +2945,6 @@ declare namespace bigquery { */ names?: Array; }; - /** - * Optional. Collation specification of the field. It only can be set on string type field. - */ - collationSpec?: string; /** * [Optional] The field description. The maximum length is 1,024 characters. */ @@ -3141,16 +2953,12 @@ declare namespace bigquery { * [Optional] Describes the nested schema fields if the type property is set to RECORD. */ fields?: Array; - /** - * [Optional] Maximum length of values of this field for STRINGS or BYTES. If max_length is not specified, no maximum length constraint is imposed on this field. If type = "STRING", then max_length represents the maximum UTF-8 length of strings in this field. If type = "BYTES", then max_length represents the maximum number of bytes in this field. It is invalid to set this field if type ≠ "STRING" and ≠ "BYTES". - */ - maxLength?: string; /** * [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE. */ mode?: string; /** - * [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 300 characters. + * [Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters. */ name?: string; policyTags?: { @@ -3160,15 +2968,7 @@ declare namespace bigquery { names?: Array; }; /** - * [Optional] Precision (maximum number of total digits in base 10) and scale (maximum number of digits in the fractional part in base 10) constraints for values of this field for NUMERIC or BIGNUMERIC. It is invalid to set precision or scale if type ≠ "NUMERIC" and ≠ "BIGNUMERIC". If precision and scale are not specified, no value range constraint is imposed on this field insofar as values are permitted by the type. Values of this NUMERIC or BIGNUMERIC field must be in this range when: - Precision (P) and scale (S) are specified: [-10P-S + 10-S, 10P-S - 10-S] - Precision (P) is specified but not scale (and thus scale is interpreted to be equal to zero): [-10P + 1, 10P - 1]. Acceptable values for precision and scale if both are specified: - If type = "NUMERIC": 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. - If type = "BIGNUMERIC": 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. Acceptable values for precision if only precision is specified but not scale (and thus scale is interpreted to be equal to zero): - If type = "NUMERIC": 1 ≤ precision ≤ 29. - If type = "BIGNUMERIC": 1 ≤ precision ≤ 38. If scale is specified but not precision, then it is invalid. - */ - precision?: string; - /** - * [Optional] See documentation for precision. - */ - scale?: string; - /** - * [Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), NUMERIC, BIGNUMERIC, BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, INTERVAL, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD). + * [Required] The field data type. Possible values include STRING, BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD (where RECORD indicates that the field contains a nested schema) or STRUCT (same as RECORD). */ type?: string; }; @@ -3315,14 +3115,7 @@ declare namespace bigquery { type?: string; }; - /** - * Options used in model training. - */ type ITrainingOptions = { - /** - * If true, detect step changes and make data adjustment in the input time series. - */ - adjustStepChanges?: boolean; /** * Whether to enable auto ARIMA or not. */ @@ -3335,10 +3128,6 @@ declare namespace bigquery { * Batch size for dnn models. */ batchSize?: string; - /** - * If true, clean spikes and dips in the input time series. - */ - cleanSpikesAndDips?: boolean; /** * The data frequency of a time series. */ @@ -3350,8 +3139,7 @@ declare namespace bigquery { | 'MONTHLY' | 'WEEKLY' | 'DAILY' - | 'HOURLY' - | 'PER_MINUTE'; + | 'HOURLY'; /** * The column to split data with. This column won't be used as a feature. 1. When data_split_method is CUSTOM, the corresponding column should be boolean. The rows with true value tag are eval data, and the false are training data. 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION rows (from smallest to largest) in the corresponding column are used as training data, and the rest are eval data. It respects the order in Orderable data types: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties */ @@ -3370,10 +3158,6 @@ declare namespace bigquery { | 'SEQUENTIAL' | 'NO_SPLIT' | 'AUTO_SPLIT'; - /** - * If true, perform decompose time series and save the results. - */ - decomposeTimeSeries?: boolean; /** * Distance type for clustering models. */ @@ -3543,7 +3327,7 @@ declare namespace bigquery { */ minSplitLoss?: number; /** - * Google Cloud Storage URI from which the model was imported. Only applicable for imported models. + * [Beta] Google Cloud Storage URI from which the model was imported. Only applicable for imported models. */ modelUri?: string; /** @@ -3578,13 +3362,9 @@ declare namespace bigquery { */ timeSeriesDataColumn?: string; /** - * The time series id column that was used during ARIMA model training. + * The id column that will be used to indicate different time series to forecast in parallel. */ timeSeriesIdColumn?: string; - /** - * The time series id columns that were used during ARIMA model training. - */ - timeSeriesIdColumns?: Array; /** * Column to be designated as time series timestamp for ARIMA model. */ @@ -3615,6 +3395,10 @@ declare namespace bigquery { * The evaluation metrics over training/eval data that were computed at the end of training. */ evaluationMetrics?: IEvaluationMetrics; + /** + * Global explanations for important features of the model. For multi-class models, there is one entry for each label class. For other models, there is only one entry in the list. + */ + globalExplanations?: Array; /** * Output of each iteration run, results.size() <= max_iterations. */ @@ -3655,10 +3439,6 @@ declare namespace bigquery { * [Required] A query that BigQuery executes when the view is referenced. */ query?: string; - /** - * True if the column names are explicitly specified. For example by using the 'CREATE VIEW v(c1, c2) AS ...' syntax. Can only be set using BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ - */ - useExplicitColumnNames?: boolean; /** * Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ Queries and views that reference this view must use the same flag value. */ @@ -3714,16 +3494,6 @@ declare namespace bigquery { location?: string; }; - /** - * Requests that a job is deleted. This call will return when the job is deleted. This method is available in limited preview. - */ - type IDeleteParams = { - /** - * The geographic location of the job. Required. See details at: https://cloud.google.com/bigquery/docs/locations#specifying_your_location. - */ - location?: string; - }; - /** * Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role. */ @@ -3801,7 +3571,7 @@ declare namespace bigquery { namespace models { /** - * Lists all models in the specified dataset. Requires the READER dataset role. After retrieving the list of models, you can get information about a particular model by calling the models.get method. + * Lists all models in the specified dataset. Requires the READER dataset role. */ type IListParams = { /** From 314457d273dc90eb06aebca066c971a0441bbd82 Mon Sep 17 00:00:00 2001 From: steffnay Date: Sat, 1 Jan 2022 19:07:07 -0800 Subject: [PATCH 12/13] add deleteJobs() to system-test to delete stale job resources --- system-test/bigquery.ts | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/system-test/bigquery.ts b/system-test/bigquery.ts index f5603276..bd5aebb4 100644 --- a/system-test/bigquery.ts +++ b/system-test/bigquery.ts @@ -107,6 +107,9 @@ describe('BigQuery', () => { // Remove datasets created for the tests. await deleteDatasets(); + // Remove jobs created for the tests. + await deleteJobs(); + // Create the test dataset with a label tagging this as a test run. await dataset.create({labels: {[GCLOUD_TESTS_PREFIX]: ''}}); @@ -655,6 +658,7 @@ describe('BigQuery', () => { }, }, location: 'us-east1', + jobPrefix: GCLOUD_TESTS_PREFIX, }; const [job] = await bigquery.createJob(opts); @@ -1764,4 +1768,22 @@ describe('BigQuery', () => { } } } + + async function deleteJobs() { + const oneDayMs = 86400000; + const now = new Date(); + const maxCreationTime = (now.getTime() - oneDayMs).toString(); + const [jobs] = await bigquery.getJobs({maxCreationTime}); + + for (const job of jobs) { + if (job.metadata.id.includes(GCLOUD_TESTS_PREFIX)) { + try { + await job.delete(); + } catch (e) { + console.log(`job(${job.id}).delete() failed`); + console.log(e); + } + } + } + } }); From 54bd2fb7d9af0cd9dceac0de41d3e57f6f9aae90 Mon Sep 17 00:00:00 2001 From: steffnay Date: Mon, 10 Jan 2022 13:12:54 -0800 Subject: [PATCH 13/13] remove unnecessary jobs cleanup --- system-test/bigquery.ts | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/system-test/bigquery.ts b/system-test/bigquery.ts index bd5aebb4..f5603276 100644 --- a/system-test/bigquery.ts +++ b/system-test/bigquery.ts @@ -107,9 +107,6 @@ describe('BigQuery', () => { // Remove datasets created for the tests. await deleteDatasets(); - // Remove jobs created for the tests. - await deleteJobs(); - // Create the test dataset with a label tagging this as a test run. await dataset.create({labels: {[GCLOUD_TESTS_PREFIX]: ''}}); @@ -658,7 +655,6 @@ describe('BigQuery', () => { }, }, location: 'us-east1', - jobPrefix: GCLOUD_TESTS_PREFIX, }; const [job] = await bigquery.createJob(opts); @@ -1768,22 +1764,4 @@ describe('BigQuery', () => { } } } - - async function deleteJobs() { - const oneDayMs = 86400000; - const now = new Date(); - const maxCreationTime = (now.getTime() - oneDayMs).toString(); - const [jobs] = await bigquery.getJobs({maxCreationTime}); - - for (const job of jobs) { - if (job.metadata.id.includes(GCLOUD_TESTS_PREFIX)) { - try { - await job.delete(); - } catch (e) { - console.log(`job(${job.id}).delete() failed`); - console.log(e); - } - } - } - } });