diff --git a/src/commands/functions-delete.ts b/src/commands/functions-delete.ts index 1d639dfe4a2..3310581f0cf 100644 --- a/src/commands/functions-delete.ts +++ b/src/commands/functions-delete.ts @@ -1,16 +1,21 @@ -import { Command } from "../command"; import * as clc from "cli-color"; import * as functionsConfig from "../functionsConfig"; -import { deleteFunctions } from "../functionsDelete"; + +import { Command } from "../command"; +import { FirebaseError } from "../error"; +import { Options } from "../options"; import { needProjectId } from "../projectUtils"; import { promptOnce } from "../prompt"; -import * as helper from "../deploy/functions/functionsDeployHelper"; +import { reduceFlat } from "../functional"; import { requirePermissions } from "../requirePermissions"; -import * as utils from "../utils"; import * as args from "../deploy/functions/args"; +import * as helper from "../deploy/functions/functionsDeployHelper"; +import * as utils from "../utils"; import * as backend from "../deploy/functions/backend"; -import { Options } from "../options"; -import { FirebaseError } from "../error"; +import * as planner from "../deploy/functions/release/planner"; +import * as fabricator from "../deploy/functions/release/fabricator"; +import * as executor from "../deploy/functions/release/executor"; +import * as reporter from "../deploy/functions/release/reporter"; export default new Command("functions:delete [filters...]") .description("delete one or more Cloud Functions by name or group name.") @@ -26,72 +31,73 @@ export default new Command("functions:delete [filters...]") return utils.reject("Must supply at least function or group name."); } - const context = { + const context: args.Context = { projectId: needProjectId(options), - } as args.Context; + filters: filters.map((f) => f.split(".")), + }; + + const [config, existingBackend] = await Promise.all([ + functionsConfig.getFirebaseConfig(options), + backend.existingBackend(context), + ]); + await backend.checkAvailability(context, /* want=*/ backend.empty()); + const appEngineLocation = functionsConfig.getAppEngineLocation(config); - // Dot notation can be used to indicate function inside of a group - const filterChunks = filters.map((filter: string) => { - return filter.split("."); + if (options.region) { + existingBackend.endpoints = { [options.region]: existingBackend.endpoints[options.region] }; + } + const plan = planner.createDeploymentPlan(/* want= */ backend.empty(), existingBackend, { + filters: context.filters, + deleteAll: true, }); + const allEpToDelete = Object.values(plan) + .map((changes) => changes.endpointsToDelete) + .reduce(reduceFlat, []) + .sort(backend.compareFunctions); + if (allEpToDelete.length === 0) { + throw new FirebaseError( + `The specified filters do not match any existing functions in project ${clc.bold( + context.projectId + )}.` + ); + } - try { - const [config, existingBackend] = await Promise.all([ - functionsConfig.getFirebaseConfig(options), - backend.existingBackend(context), - ]); - await backend.checkAvailability(context, /* want=*/ backend.empty()); - const appEngineLocation = functionsConfig.getAppEngineLocation(config); + const deleteList = allEpToDelete.map((func) => `\t${helper.getFunctionLabel(func)}`).join("\n"); + const confirmDeletion = await promptOnce( + { + type: "confirm", + name: "force", + default: false, + message: + "You are about to delete the following Cloud Functions:\n" + + deleteList + + "\n Are you sure?", + }, + options + ); + if (!confirmDeletion) { + throw new FirebaseError("Command aborted."); + } - const functionsToDelete = existingBackend.cloudFunctions.filter((fn) => { - const regionMatches = options.region ? fn.region === options.region : true; - const nameMatches = helper.functionMatchesAnyGroup(fn, filterChunks); - return regionMatches && nameMatches; - }); - if (functionsToDelete.length === 0) { - throw new Error( - `The specified filters do not match any existing functions in project ${clc.bold( - context.projectId - )}.` - ); - } + const functionExecutor: executor.QueueExecutor = new executor.QueueExecutor({ + retries: 30, + backoff: 20000, + concurrency: 40, + maxBackoff: 40000, + }); - const schedulesToDelete = existingBackend.schedules.filter((schedule) => { - functionsToDelete.some(backend.sameFunctionName(schedule.targetService)); - }); - const topicsToDelete = existingBackend.topics.filter((topic) => { - functionsToDelete.some(backend.sameFunctionName(topic.targetService)); + try { + const fab = new fabricator.Fabricator({ + functionExecutor, + executor: new executor.QueueExecutor({}), + appEngineLocation, }); - - const deleteList = functionsToDelete - .map((func) => { - return "\t" + helper.getFunctionLabel(func); - }) - .join("\n"); - const confirmDeletion = await promptOnce( - { - type: "confirm", - name: "force", - default: false, - message: - "You are about to delete the following Cloud Functions:\n" + - deleteList + - "\n Are you sure?", - }, - options - ); - if (!confirmDeletion) { - throw new Error("Command aborted."); - } - return await deleteFunctions( - functionsToDelete, - schedulesToDelete, - topicsToDelete, - appEngineLocation - ); + const summary = await fab.applyPlan(plan); + await reporter.logAndTrackDeployStats(summary); + reporter.printErrors(summary); } catch (err) { throw new FirebaseError("Failed to delete functions", { - original: err, + original: err as Error, exit: 1, }); } diff --git a/src/commands/functions-list.ts b/src/commands/functions-list.ts index 921df7d9812..929d40804bb 100644 --- a/src/commands/functions-list.ts +++ b/src/commands/functions-list.ts @@ -5,7 +5,6 @@ import { needProjectId } from "../projectUtils"; import { Options } from "../options"; import { requirePermissions } from "../requirePermissions"; import * as backend from "../deploy/functions/backend"; -import { listFunctions } from "../functions/listFunctions"; import { previews } from "../previews"; import { logger } from "../logger"; import Table = require("cli-table"); @@ -18,7 +17,8 @@ export default new Command("functions:list") const context = { projectId: needProjectId(options), } as args.Context; - const functionList = await listFunctions(context); + const existing = await backend.existingBackend(context); + const endpointsList = backend.allEndpoints(existing).sort(backend.compareFunctions); const table = previews.functionsv2 ? new Table({ head: ["Function", "Version", "Trigger", "Location", "Memory", "Runtime"], @@ -28,23 +28,23 @@ export default new Command("functions:list") head: ["Function", "Trigger", "Location", "Memory", "Runtime"], style: { head: ["yellow"] }, }); - for (const fnSpec of functionList.functions) { - const trigger = backend.isEventTrigger(fnSpec.trigger) ? fnSpec.trigger.eventType : "https"; - const availableMemoryMb = fnSpec.availableMemoryMb || "---"; + for (const endpoint of endpointsList) { + const trigger = backend.endpointTriggerType(endpoint); + const availableMemoryMb = endpoint.availableMemoryMb || "---"; const entry = previews.functionsv2 ? [ - fnSpec.entryPoint, - fnSpec.platform === "gcfv2" ? "v2" : "v1", + endpoint.id, + endpoint.platform === "gcfv2" ? "v2" : "v1", trigger, - fnSpec.region, + endpoint.region, availableMemoryMb, - fnSpec.runtime, + endpoint.runtime, ] - : [fnSpec.entryPoint, trigger, fnSpec.region, availableMemoryMb, fnSpec.runtime]; + : [endpoint.id, trigger, endpoint.region, availableMemoryMb, endpoint.runtime]; table.push(entry); } logger.info(table.toString()); - return functionList; + return endpointsList; } catch (err) { throw new FirebaseError("Failed to list functions", { exit: 1, diff --git a/src/deploy/functions/backend.ts b/src/deploy/functions/backend.ts index c0d40df4d88..1d4fda39f3a 100644 --- a/src/deploy/functions/backend.ts +++ b/src/deploy/functions/backend.ts @@ -6,7 +6,6 @@ import * as runtimes from "./runtimes"; import { FirebaseError } from "../../error"; import { Context } from "./args"; import { previews } from "../../previews"; -import { backendFromV1Alpha1 } from "./runtimes/discovery/v1alpha1"; /** Retry settings for a ScheduleSpec. */ export interface ScheduleRetryConfig { @@ -17,16 +16,6 @@ export interface ScheduleRetryConfig { maxDoublings?: number; } -/** API agnostic version of a Pub/Sub topic. */ -export interface PubSubSpec { - id: string; - project: string; - labels?: Record; - - // What we're actually planning to invoke with this topic - targetService: TargetIds; -} - export interface ScheduleTrigger { // Note: schedule is missing in the existingBackend because we // don't actually spend the API call looking up the schedule; @@ -36,16 +25,6 @@ export interface ScheduleTrigger { retryConfig?: ScheduleRetryConfig; } -/** API agnostic version of a CloudScheduler Job */ -export interface ScheduleSpec extends ScheduleTrigger { - id: string; - project: string; - transport: "pubsub" | "https"; - - // What we're actually planning to invoke with this schedule - targetService: TargetIds; -} - /** Something that has a ScheduleTrigger */ export interface ScheduleTriggered { scheduleTrigger: ScheduleTrigger; @@ -110,32 +89,17 @@ export interface EventTriggered { eventTrigger: EventTrigger; } -/** Type deduction helper for a function trigger. */ -export function isEventTrigger(trigger: HttpsTrigger | EventTrigger): trigger is EventTrigger { - return "eventType" in trigger; -} - -/** Friendly name to label a function in stats */ -export function triggerTag(fn: FunctionSpec): string { - if (fn.labels?.["deployment-scheduled"]) { - if (fn.platform === "gcfv1") { - return "v1.scheduled"; - } - return "v2.scheduled"; - } - if (fn.labels?.["deployment-callable"]) { - if (fn.platform === "gcfv1") { - return "v1.callable"; - } - return "v2.callable"; - } - if (!isEventTrigger(fn.trigger)) { - if (fn.platform === "gcfv1") { - return "v1.https"; - } - return "v2.https"; +/** A user-friendly string for the kind of trigger of an endpoint. */ +export function endpointTriggerType(endpoint: Endpoint): string { + if (isScheduleTriggered(endpoint)) { + return "scheduled"; + } else if (isHttpsTriggered(endpoint)) { + return "https"; + } else if (isEventTriggered(endpoint)) { + return endpoint.eventTrigger.eventType; + } else { + throw new Error("Unexpected trigger type for endpoint " + JSON.stringify(endpoint)); } - return fn.trigger.eventType; } // TODO(inlined): Enum types should be singularly named @@ -188,22 +152,6 @@ export interface ServiceConfiguration { serviceAccountEmail?: "default" | string; } -/** An API agnostic definition of a Cloud Function. */ -export type FunctionSpec = TargetIds & - ServiceConfiguration & { - entryPoint: string; - platform: FunctionsPlatform; - runtime: runtimes.Runtime | runtimes.DeprecatedRuntime; - trigger: EventTrigger | HttpsTrigger; - - // Output only - - // URI is available on GCFv1 for HTTPS triggers and - // on GCFv2 always - uri?: string; - sourceUploadUrl?: string; - }; - export type FunctionsPlatform = "gcfv1" | "gcfv2"; export type Triggered = HttpsTriggered | EventTriggered | ScheduleTriggered; @@ -252,11 +200,9 @@ export interface Backend { * E.g. "scheduler" => "cloudscheduler.googleapis.com" */ requiredAPIs: Record; - cloudFunctions: FunctionSpec[]; - schedules: ScheduleSpec[]; - topics: PubSubSpec[]; environmentVariables: EnvironmentVariables; - endpoints: Endpoint[]; + // region -> id -> Endpoint + endpoints: Record>; } /** @@ -267,14 +213,27 @@ export interface Backend { export function empty(): Backend { return { requiredAPIs: {}, - endpoints: [], - cloudFunctions: [], - schedules: [], - topics: [], + endpoints: {}, environmentVariables: {}, }; } +/** + * A helper utility to create a backend from a list of endpoints. + * Useful in unit tests. + */ +export function of(...endpoints: Endpoint[]): Backend { + const bkend = { ...empty() }; + for (const endpoint of endpoints) { + bkend.endpoints[endpoint.region] = bkend.endpoints[endpoint.region] || {}; + if (bkend.endpoints[endpoint.region][endpoint.id]) { + throw new Error("Trying to create a backend with the same endpiont twice"); + } + bkend.endpoints[endpoint.region][endpoint.id] = endpoint; + } + return bkend; +} + /** * A helper utility to test whether a backend is empty. * Consumers should use this before assuming a backend is empty (e.g. nooping @@ -282,10 +241,7 @@ export function empty(): Backend { */ export function isEmptyBackend(backend: Backend): boolean { return ( - Object.keys(backend.requiredAPIs).length == 0 && - backend.cloudFunctions.length === 0 && - backend.schedules.length === 0 && - backend.topics.length === 0 + Object.keys(backend.requiredAPIs).length == 0 && Object.keys(backend.endpoints).length === 0 ); } @@ -309,32 +265,6 @@ export function functionName(cloudFunction: TargetIds): string { return `projects/${cloudFunction.project}/locations/${cloudFunction.region}/functions/${cloudFunction.id}`; } -/** - * Creates a matcher function that detects whether two functions match. - * This is useful for list comprehensions, e.g. - * const newFunctions = wantFunctions.filter(fn => !haveFunctions.some(sameFunctionName(fn))); - */ -export const sameFunctionName = (func: TargetIds) => (test: TargetIds): boolean => { - return func.id === test.id && func.region === test.region && func.project == test.project; -}; - -/** - * Gets the formal resource name for a Cloud Scheduler job. - * @param appEngineLocation Must be the region where the customer has enabled App Engine. - */ -export function scheduleName(schedule: ScheduleSpec, appEngineLocation: string): string { - return `projects/${schedule.project}/locations/${appEngineLocation}/jobs/${schedule.id}`; -} - -/** - * Gets the formal resource name for a Pub/Sub topic. - * @param topic Something that implements project/id. This is intentionally vauge so - * that a schedule can be passed and the topic name generated. - */ -export function topicName(topic: { project: string; id: string }): string { - return `projects/${topic.project}/topics/${topic.id}`; -} - /** * The naming pattern used to create a Pub/Sub Topic or Scheduler Job ID for a given scheduled function. * This pattern is hard-coded and assumed throughout tooling, both in the Firebase Console and in the CLI. @@ -394,32 +324,10 @@ async function loadExistingBackend(ctx: Context & PrivateContextFields): Promise }; const gcfV1Results = await gcf.listAllFunctions(ctx.projectId); for (const apiFunction of gcfV1Results.functions) { - const specFunction = gcf.specFromFunction(apiFunction); - ctx.existingBackend.cloudFunctions.push(specFunction); - const isScheduled = apiFunction.labels?.["deployment-scheduled"] === "true"; - if (isScheduled) { - const id = scheduleIdForFunction(specFunction); - ctx.existingBackend.schedules.push({ - id, - project: specFunction.project, - transport: "pubsub", - targetService: { - id: specFunction.id, - region: specFunction.region, - project: specFunction.project, - }, - }); - ctx.existingBackend.topics.push({ - id, - project: specFunction.project, - labels: SCHEDULED_FUNCTION_LABEL, - targetService: { - id: specFunction.id, - region: specFunction.region, - project: specFunction.project, - }, - }); - } + const endpoint = gcf.endpointFromFunction(apiFunction); + ctx.existingBackend.endpoints[endpoint.region] = + ctx.existingBackend.endpoints[endpoint.region] || {}; + ctx.existingBackend.endpoints[endpoint.region][endpoint.id] = endpoint; } ctx.unreachableRegions.gcfV1 = gcfV1Results.unreachable; @@ -437,46 +345,10 @@ async function loadExistingBackend(ctx: Context & PrivateContextFields): Promise throw err; } for (const apiFunction of gcfV2Results.functions) { - const specFunction = gcfV2.specFromFunction(apiFunction); - ctx.existingBackend.cloudFunctions.push(specFunction); - const pubsubScheduled = apiFunction.labels?.["deployment-scheduled"] === "true"; - const httpsScheduled = apiFunction.labels?.["deployment-scheduled"] === "https"; - if (pubsubScheduled) { - const id = scheduleIdForFunction(specFunction); - ctx.existingBackend.schedules.push({ - id, - project: specFunction.project, - transport: "pubsub", - targetService: { - id: specFunction.id, - region: specFunction.region, - project: specFunction.project, - }, - }); - ctx.existingBackend.topics.push({ - id, - project: specFunction.project, - labels: SCHEDULED_FUNCTION_LABEL, - targetService: { - id: specFunction.id, - region: specFunction.region, - project: specFunction.project, - }, - }); - } - if (httpsScheduled) { - const id = scheduleIdForFunction(specFunction); - ctx.existingBackend.schedules.push({ - id, - project: specFunction.project, - transport: "https", - targetService: { - id: specFunction.id, - region: specFunction.region, - project: specFunction.project, - }, - }); - } + const endpoint = gcfV2.endpointFromFunction(apiFunction); + ctx.existingBackend.endpoints[endpoint.region] = + ctx.existingBackend.endpoints[endpoint.region] || {}; + ctx.existingBackend.endpoints[endpoint.region][endpoint.id] = endpoint; } ctx.unreachableRegions.gcfV2 = gcfV2Results.unreachable; } @@ -496,11 +368,11 @@ export async function checkAvailability(context: Context, want: Backend): Promis } const gcfV1Regions = new Set(); const gcfV2Regions = new Set(); - for (const fn of want.cloudFunctions) { - if (fn.platform == "gcfv1") { - gcfV1Regions.add(fn.region); + for (const ep of allEndpoints(want)) { + if (ep.platform == "gcfv1") { + gcfV1Regions.add(ep.region); } else { - gcfV2Regions.add(fn.region); + gcfV2Regions.add(ep.region); } } @@ -545,10 +417,67 @@ export async function checkAvailability(context: Context, want: Backend): Promis } } -// To be a bit more deterministic, print function lists in a prescribed order. -// Future versions might want to compare regions by GCF/Run pricing tier before -// location. -export function compareFunctions(left: FunctionSpec, right: FunctionSpec): number { +/** A helper utility for flattening all endpoints in a backend since typing is a bit wonky. */ +export function allEndpoints(backend: Backend): Endpoint[] { + return Object.values(backend.endpoints).reduce((accum, perRegion) => { + return [...accum, ...Object.values(perRegion)]; + }, [] as Endpoint[]); +} + +/** A helper utility for checking whether an endpoint matches a predicate. */ +export function someEndpoint( + backend: Backend, + predicate: (endpoint: Endpoint) => boolean +): boolean { + for (const endpoints of Object.values(backend.endpoints)) { + if (Object.values(endpoints).some(predicate)) { + return true; + } + } + return false; +} + +/** A helper utility function that returns a subset of the backend that includes only matching endpoints */ +export function matchingBackend( + backend: Backend, + predicate: (endpoint: Endpoint) => boolean +): Backend { + const filtered: Backend = { + ...empty(), + }; + for (const endpoint of allEndpoints(backend)) { + if (!predicate(endpoint)) { + continue; + } + filtered.endpoints[endpoint.region] = filtered.endpoints[endpoint.region] || {}; + filtered.endpoints[endpoint.region][endpoint.id] = endpoint; + } + return filtered; +} + +/** A helper utility for flattening all endpoints in a region since typing is a bit wonky. */ +export function regionalEndpoints(backend: Backend, region: string): Endpoint[] { + return backend.endpoints[region] ? Object.values(backend.endpoints[region]) : []; +} + +/** A curried function used for filters, returns a matcher for functions in a backend. */ +export const hasEndpoint = (backend: Backend) => (endpoint: Endpoint): boolean => { + return !!backend.endpoints[endpoint.region] && !!backend.endpoints[endpoint.region][endpoint.id]; +}; + +/** A curried function that is the opposite of hasEndpoint */ +export const missingEndpoint = (backend: Backend) => (endpoint: Endpoint): boolean => { + return !hasEndpoint(backend)(endpoint); +}; + +/** A standard method for sorting endpoints for display. + * Future versions might consider sorting region by pricing tier before + * alphabetically + */ +export function compareFunctions( + left: TargetIds & { platform: FunctionsPlatform }, + right: TargetIds & { platform: FunctionsPlatform } +): number { if (left.platform != right.platform) { return right.platform < left.platform ? -1 : 1; } diff --git a/src/deploy/functions/checkIam.ts b/src/deploy/functions/checkIam.ts index e4eaaafc48e..f58a17ac5b3 100644 --- a/src/deploy/functions/checkIam.ts +++ b/src/deploy/functions/checkIam.ts @@ -57,25 +57,23 @@ export async function checkHttpIam( options: Options, payload: args.Payload ): Promise { - const functions = payload.functions!.backend.cloudFunctions; const filterGroups = context.filters || getFilterGroups(options); - const httpFunctions = functions - .filter((f) => !backend.isEventTrigger(f.trigger)) + const httpEndpoints = backend + .allEndpoints(payload.functions!.backend) + .filter(backend.isHttpsTriggered) .filter((f) => functionMatchesAnyGroup(f, filterGroups)); - const existingFunctions = (await backend.existingBackend(context)).cloudFunctions; - const newHttpFunctions = httpFunctions.filter( - (func) => !existingFunctions.find(backend.sameFunctionName(func)) - ); + const existing = await backend.existingBackend(context); + const newHttpsEndpoints = httpEndpoints.filter(backend.missingEndpoint(existing)); - if (newHttpFunctions.length === 0) { + if (newHttpsEndpoints.length === 0) { return; } logger.debug( "[functions] found", - newHttpFunctions.length, + newHttpsEndpoints.length, "new HTTP functions, testing setIamPolicy permission..." ); @@ -100,7 +98,7 @@ export async function checkHttpIam( )} to deploy new HTTPS functions. The permission ${bold( PERMISSION )} is required to deploy the following functions:\n\n- ` + - newHttpFunctions.map((func) => func.id).join("\n- ") + + newHttpsEndpoints.map((func) => func.id).join("\n- ") + `\n\nTo address this error, please ask a project Owner to assign your account the "Cloud Functions Admin" role at the following URL:\n\nhttps://console.cloud.google.com/iam-admin/iam?project=${context.projectId}` ); } diff --git a/src/deploy/functions/containerCleaner.ts b/src/deploy/functions/containerCleaner.ts index 5785b1b76b0..8fb878951c4 100644 --- a/src/deploy/functions/containerCleaner.ts +++ b/src/deploy/functions/containerCleaner.ts @@ -64,7 +64,7 @@ async function retry(func: () => Promise): Promise { } } -export async function cleanupBuildImages(functions: backend.FunctionSpec[]): Promise { +export async function cleanupBuildImages(functions: backend.TargetIds[]): Promise { utils.logBullet(clc.bold.cyan("functions: ") + "cleaning up build files..."); const gcrCleaner = new ContainerRegistryCleaner(); const failedDomains: Set = new Set(); @@ -123,7 +123,7 @@ export class ContainerRegistryCleaner { // The underlying Helper's caching should make this expensive for // the first function and free for the next functions in the same // region. - async cleanupFunction(func: backend.FunctionSpec): Promise { + async cleanupFunction(func: backend.TargetIds): Promise { const helper = this.helper(func.region); const uuids = (await helper.ls(`${func.project}/gcf/${func.region}`)).children; @@ -295,7 +295,7 @@ export class DockerHelper { // While we can't guarantee all promises will succeed, we can do our darndest // to expunge as much as possible before throwing. async rm(path: string): Promise { - let toThrowLater: any = undefined; + let toThrowLater: unknown = undefined; const stat = await this.ls(path); const recursive = stat.children.map((child) => (async () => { diff --git a/src/deploy/functions/deploy.ts b/src/deploy/functions/deploy.ts index 8d4c7bd1265..5604dad7662 100644 --- a/src/deploy/functions/deploy.ts +++ b/src/deploy/functions/deploy.ts @@ -11,6 +11,7 @@ import * as gcs from "../../gcp/storage"; import * as gcf from "../../gcp/cloudfunctions"; import * as gcfv2 from "../../gcp/cloudfunctionsv2"; import * as utils from "../../utils"; +import * as backend from "./backend"; const GCP_REGION = functionsUploadRegion; @@ -62,21 +63,15 @@ export async function deploy( try { const want = payload.functions!.backend; const uploads: Promise[] = []; - if (want.cloudFunctions.some((fn) => fn.platform === "gcfv1")) { + if (backend.allEndpoints(want).some((endpoint) => endpoint.platform === "gcfv1")) { uploads.push(uploadSourceV1(context)); } - if (want.cloudFunctions.some((fn) => fn.platform === "gcfv2")) { + + for (const region of Object.keys(want.endpoints)) { // GCFv2 cares about data residency and will possibly block deploys coming from other // regions. At minimum, the implementation would consider it user-owned source and // would break download URLs + console source viewing. - const functions = payload.functions!.backend.cloudFunctions; - const regions: string[] = []; - for (const func of functions) { - if (func.platform === "gcfv2" && -1 === regions.indexOf(func.region)) { - regions.push(func.region); - } - } - for (const region of regions) { + if (backend.regionalEndpoints(want, region).some((e) => e.platform === "gcfv2")) { uploads.push(uploadSourceV2(context, region)); } } diff --git a/src/deploy/functions/deploymentPlanner.ts b/src/deploy/functions/deploymentPlanner.ts deleted file mode 100644 index 2e29dd8482f..00000000000 --- a/src/deploy/functions/deploymentPlanner.ts +++ /dev/null @@ -1,185 +0,0 @@ -import { functionMatchesAnyGroup } from "./functionsDeployHelper"; -import { checkForInvalidChangeOfTrigger } from "./validate"; -import { isFirebaseManaged } from "../../deploymentTool"; -import { logLabeledBullet } from "../../utils"; -import * as backend from "./backend"; -import * as gcfv2 from "../../gcp/cloudfunctionsv2"; - -export interface RegionalFunctionChanges { - functionsToCreate: backend.FunctionSpec[]; - functionsToUpdate: { - func: backend.FunctionSpec; - deleteAndRecreate: boolean; - }[]; - functionsToDelete: backend.FunctionSpec[]; -} - -export interface DeploymentPlan { - regionalDeployments: Record; - schedulesToUpsert: backend.ScheduleSpec[]; - schedulesToDelete: backend.ScheduleSpec[]; - - // NOTE(inlined): - // Topics aren't created yet explicitly because the Functions API creates them - // automatically. This may change in GCFv2 and would certainly change in Run, - // so we should be ready to start creating topics before schedules or functions. - // OTOH, we could just say that schedules targeting Pub/Sub are just a v1 thing - // and save ourselves the topic management in GCFv2 or Run. - topicsToDelete: backend.PubSubSpec[]; -} - -// export for testing -export function functionsByRegion( - allFunctions: backend.FunctionSpec[] -): Record { - const partitioned: Record = {}; - for (const fn of allFunctions) { - partitioned[fn.region] = partitioned[fn.region] || []; - partitioned[fn.region].push(fn); - } - return partitioned; -} - -export function allRegions( - spec: Record, - existing: Record -): string[] { - return Object.keys({ ...spec, ...existing }); -} - -const matchesId = (hasId: { id: string }) => (test: { id: string }) => { - return hasId.id === test.id; -}; - -// export for testing -// Assumes we don't have cross-project functions and that, per function name, functions exist -// in the same region. -export function calculateRegionalFunctionChanges( - want: backend.FunctionSpec[], - have: backend.FunctionSpec[], - options: { - filters: string[][]; - overwriteEnvs?: boolean; - } -): RegionalFunctionChanges { - want = want.filter((fn) => functionMatchesAnyGroup(fn, options.filters)); - have = have.filter((fn) => functionMatchesAnyGroup(fn, options.filters)); - let upgradedToGCFv2WithoutSettingConcurrency = false; - - const functionsToCreate = want.filter((fn) => !have.some(matchesId(fn))); - const functionsToUpdate = want - .filter((fn) => { - const haveFn = have.find(matchesId(fn)); - if (!haveFn) { - return false; - } - - checkForInvalidChangeOfTrigger(fn, haveFn); - - if (!options.overwriteEnvs) { - // Remember old environment variables that might have been set with gcloud or the cloud console. - fn.environmentVariables = { - ...haveFn.environmentVariables, - ...fn.environmentVariables, - }; - } - - if (haveFn.platform === "gcfv1" && fn.platform === "gcfv2" && !fn.concurrency) { - upgradedToGCFv2WithoutSettingConcurrency = true; - } - return true; - }) - .map((fn) => { - const haveFn = have.find(matchesId(fn)); - const deleteAndRecreate = needsDeleteAndRecreate(haveFn!, fn); - return { - func: fn, - deleteAndRecreate, - }; - }); - const functionsToDelete = have - .filter((fn) => !want.some(matchesId(fn))) - .filter((fn) => isFirebaseManaged(fn.labels || {})); - - if (upgradedToGCFv2WithoutSettingConcurrency) { - logLabeledBullet( - "functions", - "You are updating one or more functions to Google Cloud Functions v2, " + - "which introduces support for concurrent execution. New functions " + - "default to 80 concurrent executions, but existing functions keep the " + - "old default of 1. You can change this with the 'concurrency' option." - ); - } - return { functionsToCreate, functionsToUpdate, functionsToDelete }; -} - -/** - * Create a plan for deploying all functions in one region. - * @param region The region of this deployment - * @param loclFunctionsByRegion The functions present in the code currently being deployed. - * @param existingFunctionNames The names of all functions that already exist. - * @param existingScheduledFunctionNames The names of all schedules functions that already exist. - * @param filters The filters, passed in by the user via `--only functions:` - */ -export function createDeploymentPlan( - want: backend.Backend, - have: backend.Backend, - options: { - filters: string[][]; - overwriteEnvs?: boolean; - } -): DeploymentPlan { - const deployment: DeploymentPlan = { - regionalDeployments: {}, - schedulesToUpsert: [], - schedulesToDelete: [], - topicsToDelete: [], - }; - - const wantRegionalFunctions = functionsByRegion(want.cloudFunctions); - const haveRegionalFunctions = functionsByRegion(have.cloudFunctions); - for (const region of allRegions(wantRegionalFunctions, haveRegionalFunctions)) { - const want = wantRegionalFunctions[region] || []; - const have = haveRegionalFunctions[region] || []; - deployment.regionalDeployments[region] = calculateRegionalFunctionChanges(want, have, options); - } - - deployment.schedulesToUpsert = want.schedules.filter((schedule) => - functionMatchesAnyGroup(schedule.targetService, options.filters) - ); - deployment.schedulesToDelete = have.schedules - .filter((schedule) => !want.schedules.some(matchesId(schedule))) - .filter((schedule) => functionMatchesAnyGroup(schedule.targetService, options.filters)); - deployment.topicsToDelete = have.topics - .filter((topic) => !want.topics.some(matchesId(topic))) - .filter((topic) => functionMatchesAnyGroup(topic.targetService, options.filters)); - - return deployment; -} - -function needsDeleteAndRecreate(exFn: backend.FunctionSpec, fn: backend.FunctionSpec): boolean { - return changedV2PubSubTopic(exFn, fn); - // TODO: is scheduled function upgrading from v1 to v2 -} - -function changedV2PubSubTopic(exFn: backend.FunctionSpec, fn: backend.FunctionSpec): boolean { - if (exFn.platform !== "gcfv2") { - return false; - } - if (fn.platform !== "gcfv2") { - return false; - } - if (!backend.isEventTrigger(exFn.trigger)) { - return false; - } - if (!backend.isEventTrigger(fn.trigger)) { - return false; - } - if (exFn.trigger.eventType !== gcfv2.PUBSUB_PUBLISH_EVENT) { - return false; - } - if (fn.trigger.eventType != gcfv2.PUBSUB_PUBLISH_EVENT) { - return false; - } - return exFn.trigger.eventFilters["resource"] != fn.trigger.eventFilters["resource"]; -} diff --git a/src/deploy/functions/deploymentTimer.ts b/src/deploy/functions/deploymentTimer.ts deleted file mode 100644 index 37f81463f2f..00000000000 --- a/src/deploy/functions/deploymentTimer.ts +++ /dev/null @@ -1,32 +0,0 @@ -import { logger } from "../../logger"; -import * as track from "../../track"; - -interface Timing { - type?: string; - t0?: [number, number]; // [seconds, nanos] -} - -export class DeploymentTimer { - timings: { [name: string]: Timing } = {}; - - startTimer(name: string, type: string) { - this.timings[name] = { type: type, t0: process.hrtime() }; - } - - endTimer(name: string): number { - if (!this.timings[name]) { - logger.debug("[functions] no timer initialized for", name); - return 0; - } - - // hrtime returns a duration as an array of [seconds, nanos] - const duration = process.hrtime(this.timings[name].t0); - track( - "Functions Deploy (Duration)", - this.timings[name].type, - duration[0] * 1000 + Math.round(duration[1] * 1e-6) - ); - - return duration[0] * 1000 * Math.round(duration[1] * 1e-6); - } -} diff --git a/src/deploy/functions/errorHandler.ts b/src/deploy/functions/errorHandler.ts deleted file mode 100644 index 5cda7f81a73..00000000000 --- a/src/deploy/functions/errorHandler.ts +++ /dev/null @@ -1,100 +0,0 @@ -import * as clc from "cli-color"; - -import { logger } from "../../logger"; -import { getFunctionId, getFunctionLabel } from "./functionsDeployHelper"; -import { FirebaseError } from "../../error"; -import { OperationType } from "./tasks"; - -type Level = "error" | "warning"; - -interface ErrorInfo { - functionName: string; - operationType: OperationType; - message: string; -} - -export class ErrorHandler { - errors: ErrorInfo[] = []; - warnings: ErrorInfo[] = []; - - record(level: Level, functionName: string, operationType: OperationType, message: string): void { - const info: ErrorInfo = { - functionName, - operationType, - message, - }; - if (level === "error") { - this.errors.push(info); - } else if (level === "warning") { - this.warnings.push(info); - } - } - - printErrors() { - if (this.errors.length === 0) { - return; - } - logger.info(""); - logger.info("Functions deploy had errors with the following functions:"); - for (const failedDeployment of this.errors) { - logger.info(`\t${getFunctionLabel(failedDeployment.functionName)}`); - } - - const failedIamCalls = this.errors.filter((e) => e.operationType === "set invoker"); - if (failedIamCalls.length) { - logger.info(""); - logger.info("Unable to set the invoker for the IAM policy on the following functions:"); - for (const failedDep of failedIamCalls) { - logger.info(`\t${failedDep.functionName}`); - } - logger.info(""); - logger.info("Some common causes of this:"); - logger.info(""); - logger.info( - "- You may not have the roles/functions.admin IAM role. Note that roles/functions.developer does not allow you to change IAM policies." - ); - logger.info(""); - logger.info("- An organization policy that restricts Network Access on your project."); - } - - logger.info(""); - logger.info("To try redeploying those functions, run:"); - logger.info( - " " + - clc.bold("firebase deploy --only ") + - clc.bold('"') + - clc.bold( - this.errors - .map( - (failedDeployment) => - `functions:${getFunctionId(failedDeployment.functionName).replace(/-/g, ".")}` - ) - .join(",") - ) + - clc.bold('"') - ); - logger.info(""); - logger.info("To continue deploying other features (such as database), run:"); - logger.info(" " + clc.bold("firebase deploy --except functions")); - // Print all the original messages at debug level. - for (const failedDeployment of this.errors) { - logger.debug( - `\tError during ${failedDeployment.operationType} for ${failedDeployment.functionName}: ${failedDeployment.message}` - ); - } - throw new FirebaseError("Functions did not deploy properly."); - } - - printWarnings() { - if (this.warnings.length === 0) { - return; - } - - // Print all the original messages at debug level. - for (const failedDeployment of this.warnings) { - logger.debug( - `\tWarning during${failedDeployment.operationType} for ${failedDeployment.functionName}: ${failedDeployment.message}` - ); - } - } -} diff --git a/src/deploy/functions/functionsDeployHelper.ts b/src/deploy/functions/functionsDeployHelper.ts index 89ea25b55c4..edc3553e840 100644 --- a/src/deploy/functions/functionsDeployHelper.ts +++ b/src/deploy/functions/functionsDeployHelper.ts @@ -1,16 +1,4 @@ -import * as clc from "cli-color"; - -import Queue from "../../throttler/queue"; -import { ErrorHandler } from "./errorHandler"; -import { logger } from "../../logger"; -import * as args from "./args"; import * as backend from "./backend"; -import * as deploymentTool from "../../deploymentTool"; -import * as track from "../../track"; -import * as utils from "../../utils"; - -// Note: it seems like almost all of these matcher methods use IDs under the covers. -// Consider updating methods and call sites to work on ID. export function functionMatchesAnyGroup(func: backend.TargetIds, filterGroups: string[][]) { if (!filterGroups.length) { @@ -49,84 +37,6 @@ export function getFilterGroups(options: { only?: string }): string[][] { }); } -// TODO(inlined): this should eventually go away as we migrate to backend.FunctionSpec -export function getFunctionId(fullName: string): string { - return fullName.split("/")[5]; -} - -// TOOD(inlined): this should eventually go away as we migrate to backend.FunctionSpec -function getRegion(fullName: string): string { - return fullName.split("/")[3]; -} - -export function getFunctionLabel(fn: backend.TargetIds): string; - -// TODO(inlined) get rid of this version -export function getFunctionLabel(fullName: string): string; - -export function getFunctionLabel(fnOrName: string | backend.TargetIds): string { - if (typeof fnOrName === "string") { - return getFunctionId(fnOrName) + "(" + getRegion(fnOrName) + ")"; - } else { - return `${fnOrName.id}(${fnOrName.region})`; - } -} - -export function logAndTrackDeployStats(queue: Queue, errorHandler: ErrorHandler) { - const stats = queue.stats(); - logger.debug(`Total Function Deployment time: ${stats.elapsed}`); - logger.debug(`${stats.total} Functions Deployed`); - logger.debug(`${errorHandler.errors.length} Functions Errored`); - logger.debug(`Average Function Deployment time: ${stats.avg}`); - if (stats.total > 0) { - if (errorHandler.errors.length === 0) { - track("functions_deploy_result", "success", stats.total); - } else if (errorHandler.errors.length < stats.total) { - track("functions_deploy_result", "partial_success", stats.total - errorHandler.errors.length); - track("functions_deploy_result", "partial_failure", errorHandler.errors.length); - track( - "functions_deploy_result", - "partial_error_ratio", - errorHandler.errors.length / stats.total - ); - } else { - track("functions_deploy_result", "failure", stats.total); - } - } - // TODO: Track other stats here - maybe time of full deployment? - // TODO(inlined): Track functions deploy by API version -} - -export function printSuccess(func: backend.TargetIds, type: string) { - utils.logSuccess( - clc.bold.green("functions[" + getFunctionLabel(func) + "]: ") + - "Successful " + - type + - " operation. " - ); -} - -export async function printTriggerUrls(context: args.Context, want: backend.Backend) { - // TODO: We can cut an RPC out of our workflow if we record the - // results of our deploy tasks. This will also be important for scheduled functions - // that are deployed directly to HTTP endpoints. - const have = await backend.existingBackend(context, /* forceRefresh= */ true); - const httpsFunctions = have.cloudFunctions.filter((fn) => { - if (backend.isEventTrigger(fn.trigger)) { - return false; - } - - return want.cloudFunctions.some(backend.sameFunctionName(fn)); - }); - if (httpsFunctions.length === 0) { - return; - } - - for (const httpsFunc of httpsFunctions) { - if (!httpsFunc.uri) { - logger.debug("Missing URI for HTTPS function in printTriggerUrls. This shouldn't happen"); - continue; - } - logger.info(clc.bold("Function URL"), `(${getFunctionLabel(httpsFunc)}):`, httpsFunc.uri); - } +export function getFunctionLabel(fn: backend.TargetIds): string { + return `${fn.id}(${fn.region})`; } diff --git a/src/deploy/functions/prepare.ts b/src/deploy/functions/prepare.ts index 74117e414ce..d0fb661c23c 100644 --- a/src/deploy/functions/prepare.ts +++ b/src/deploy/functions/prepare.ts @@ -18,7 +18,7 @@ import * as runtimes from "./runtimes"; import * as validate from "./validate"; import * as utils from "../../utils"; import { logger } from "../../logger"; -import { setTriggerRegion } from "./triggerRegionHelper"; +import { lookupMissingTriggerRegions } from "./triggerRegionHelper"; function hasUserConfig(config: Record): boolean { // "firebase" key is always going to exist in runtime config. @@ -77,14 +77,15 @@ export async function prepare( projectAlias: options.projectAlias, }; const userEnvs = functionsEnv.loadUserEnvs(userEnvOpt); + const usedDotenv = hasDotenv(userEnvOpt); const tag = hasUserConfig(runtimeConfig) - ? hasDotenv(userEnvOpt) + ? usedDotenv ? "mixed" : "runtime_config" - : hasDotenv(userEnvOpt) + : usedDotenv ? "dotenv" : "none"; - track("functions_codebase_deploy_env_method", tag); + await track("functions_codebase_deploy_env_method", tag); logger.debug(`Analyzing ${runtimeDelegate.name} backend spec`); const wantBackend = await runtimeDelegate.discoverSpec(runtimeConfig, firebaseEnvs); @@ -94,21 +95,21 @@ export async function prepare( // Note: Some of these are premium APIs that require billing to be enabled. // We'd eventually have to add special error handling for billing APIs, but // enableCloudBuild is called above and has this special casing already. - if (wantBackend.cloudFunctions.find((f) => f.platform === "gcfv2")) { - const V2_APIS = { - artifactregistry: "artifactregistry.googleapis.com", - cloudrun: "run.googleapis.com", - eventarc: "eventarc.googleapis.com", - pubsub: "pubsub.googleapis.com", - storage: "storage.googleapis.com", - }; - const enablements = Object.entries(V2_APIS).map(([tag, api]) => { - return ensureApiEnabled.ensure(context.projectId, api, tag); + if (backend.someEndpoint(wantBackend, (e) => e.platform === "gcfv2")) { + const V2_APIS = [ + "artifactregistry.googleapis.com", + "run.googleapis.com", + "eventarc.googleapis.com", + "pubsub.googleapis.com", + "storage.googleapis.com", + ]; + const enablements = V2_APIS.map((api) => { + return ensureApiEnabled.ensure(context.projectId, api, "functions"); }); await Promise.all(enablements); } - if (wantBackend.cloudFunctions.length) { + if (backend.someEndpoint(wantBackend, () => true)) { logBullet( clc.cyan.bold("functions:") + " preparing " + @@ -116,10 +117,10 @@ export async function prepare( " directory for uploading..." ); } - if (wantBackend.cloudFunctions.find((fn) => fn.platform === "gcfv1")) { + if (backend.someEndpoint(wantBackend, (e) => e.platform === "gcfv1")) { context.functionsSourceV1 = await prepareFunctionsUpload(runtimeConfig, options); } - if (wantBackend.cloudFunctions.find((fn) => fn.platform === "gcfv2")) { + if (backend.someEndpoint(wantBackend, (e) => e.platform === "gcfv2")) { context.functionsSourceV2 = await prepareFunctionsUpload( /* runtimeConfig= */ undefined, options @@ -127,40 +128,83 @@ export async function prepare( } // Setup environment variables on each function. - wantBackend.cloudFunctions.forEach((fn: backend.FunctionSpec) => { - fn.environmentVariables = wantBackend.environmentVariables; - }); + for (const endpoint of backend.allEndpoints(wantBackend)) { + endpoint.environmentVariables = wantBackend.environmentVariables; + } // Enable required APIs. This may come implicitly from triggers (e.g. scheduled triggers // require cloudscheudler and, in v1, require pub/sub), or can eventually come from // explicit dependencies. await Promise.all( - Object.keys(wantBackend.requiredAPIs).map((friendlyName) => { - ensureApiEnabled.ensure( - projectId, - wantBackend.requiredAPIs[friendlyName], - friendlyName, - /* silent=*/ false - ); + Object.values(wantBackend.requiredAPIs).map((api) => { + return ensureApiEnabled.ensure(projectId, api, "functions", /* silent=*/ false); }) ); // Validate the function code that is being deployed. - validate.functionIdsAreValid(wantBackend.cloudFunctions); + validate.functionIdsAreValid(backend.allEndpoints(wantBackend)); // Check what --only filters have been passed in. context.filters = getFilterGroups(options); - const wantFunctions = wantBackend.cloudFunctions.filter((fn: backend.FunctionSpec) => { - return functionMatchesAnyGroup(fn, context.filters); + const matchingBackend = backend.matchingBackend(wantBackend, (endpoint) => { + return functionMatchesAnyGroup(endpoint, context.filters); }); - const haveFunctions = (await backend.existingBackend(context)).cloudFunctions; - // sets the trigger region from cached values or api lookup - await setTriggerRegion(wantFunctions, haveFunctions); + const haveBackend = await backend.existingBackend(context); + inferDetailsFromExisting(wantBackend, haveBackend, usedDotenv); + await lookupMissingTriggerRegions(wantBackend); // Display a warning and prompt if any functions in the release have failurePolicies. - await promptForFailurePolicies(options, wantFunctions, haveFunctions); - await promptForMinInstances(options, wantFunctions, haveFunctions); + await promptForFailurePolicies(options, matchingBackend, haveBackend); + await promptForMinInstances(options, matchingBackend, haveBackend); await backend.checkAvailability(context, wantBackend); } + +/** + * Adds information to the want backend types based on what we can infer from prod. + * This can help us preserve environment variables set out of band, remember the + * location of a trigger w/o lookup, etc. + */ +export function inferDetailsFromExisting( + want: backend.Backend, + have: backend.Backend, + usedDotenv: boolean +): void { + for (const wantE of backend.allEndpoints(want)) { + const haveE = have.endpoints[wantE.region]?.[wantE.id]; + if (!haveE) { + continue; + } + + // By default, preserve existing environment variables. + // Only overwrite environment variables when the dotenv preview is enabled + // AND there are user specified environment variables. + if (!usedDotenv) { + wantE.environmentVariables = { + ...haveE.environmentVariables, + ...wantE.environmentVariables, + }; + } + + maybeCopyTriggerRegion(wantE, haveE); + } +} + +function maybeCopyTriggerRegion(wantE: backend.Endpoint, haveE: backend.Endpoint): void { + if (!backend.isEventTriggered(wantE) || !backend.isEventTriggered(haveE)) { + return; + } + if (wantE.eventTrigger.region || !haveE.eventTrigger.region) { + return; + } + + // Don't copy the region if anything about the trigger changed. It's possible + // they changed the resource. + const oldTrigger: Record = { ...haveE.eventTrigger }; + delete oldTrigger.region; + if (JSON.stringify(oldTrigger) !== JSON.stringify(wantE.eventTrigger)) { + return; + } + wantE.eventTrigger.region = haveE.eventTrigger.region; +} diff --git a/src/deploy/functions/pricing.ts b/src/deploy/functions/pricing.ts index 99548154f88..37208388200 100644 --- a/src/deploy/functions/pricing.ts +++ b/src/deploy/functions/pricing.ts @@ -127,24 +127,25 @@ const MB_TO_GHZ = { 8192: 4.8, }; -export function canCalculateMinInstanceCost(functionSpec: backend.FunctionSpec): boolean { - if (!functionSpec.minInstances) { +/** Whether we have information in our price sheet to calculate the minInstance cost. */ +export function canCalculateMinInstanceCost(endpoint: backend.Endpoint): boolean { + if (!endpoint.minInstances) { return true; } - if (functionSpec.platform == "gcfv1") { - if (!MB_TO_GHZ[functionSpec.availableMemoryMb || 256]) { + if (endpoint.platform == "gcfv1") { + if (!MB_TO_GHZ[endpoint.availableMemoryMb || 256]) { return false; } - if (!V1_REGION_TO_TIER[functionSpec.region]) { + if (!V1_REGION_TO_TIER[endpoint.region]) { return false; } return true; } - if (!V2_REGION_TO_TIER[functionSpec.region]) { + if (!V2_REGION_TO_TIER[endpoint.region]) { return false; } @@ -154,7 +155,9 @@ export function canCalculateMinInstanceCost(functionSpec: backend.FunctionSpec): // A hypothetical month has 30d. ALWAYS PRINT THIS ASSUMPTION when printing // a cost estimate. const SECONDS_PER_MONTH = 30 * 24 * 60 * 60; -export function monthlyMinInstanceCost(functions: backend.FunctionSpec[]): number { + +/** The cost of a series of endpoints at 100% idle in a 30d month. */ +export function monthlyMinInstanceCost(endpoints: backend.Endpoint[]): number { // Assertion: canCalculateMinInstanceCost type Usage = { ram: number; @@ -165,28 +168,28 @@ export function monthlyMinInstanceCost(functions: backend.FunctionSpec[]): numbe gcfv2: { 1: { ram: 0, cpu: 0 }, 2: { ram: 0, cpu: 0 } }, }; - for (const func of functions) { - if (!func.minInstances) { + for (const endpoint of endpoints) { + if (!endpoint.minInstances) { continue; } - const ramMb = func.availableMemoryMb || 256; + const ramMb = endpoint.availableMemoryMb || 256; const ramGb = ramMb / 1024; - if (func.platform === "gcfv1") { + if (endpoint.platform === "gcfv1") { const cpu = MB_TO_GHZ[ramMb]; - const tier = V1_REGION_TO_TIER[func.region]; + const tier = V1_REGION_TO_TIER[endpoint.region]; usage["gcfv1"][tier].ram = - usage["gcfv1"][tier].ram + ramGb * SECONDS_PER_MONTH * func.minInstances; + usage["gcfv1"][tier].ram + ramGb * SECONDS_PER_MONTH * endpoint.minInstances; usage["gcfv1"][tier].cpu = - usage["gcfv1"][tier].cpu + MB_TO_GHZ[ramMb] * SECONDS_PER_MONTH * func.minInstances; + usage["gcfv1"][tier].cpu + cpu * SECONDS_PER_MONTH * endpoint.minInstances; } else { // V2 is currently fixed at 1vCPU. const cpu = 1; - const tier = V2_REGION_TO_TIER[func.region]; + const tier = V2_REGION_TO_TIER[endpoint.region]; usage["gcfv2"][tier].ram = - usage["gcfv2"][tier].ram + ramGb * SECONDS_PER_MONTH * func.minInstances; + usage["gcfv2"][tier].ram + ramGb * SECONDS_PER_MONTH * endpoint.minInstances; usage["gcfv2"][tier].cpu = - usage["gcfv2"][tier].cpu + cpu * SECONDS_PER_MONTH * func.minInstances; + usage["gcfv2"][tier].cpu + cpu * SECONDS_PER_MONTH * endpoint.minInstances; } } diff --git a/src/deploy/functions/prompts.ts b/src/deploy/functions/prompts.ts index 07d5f7d1053..dc25e253c04 100644 --- a/src/deploy/functions/prompts.ts +++ b/src/deploy/functions/prompts.ts @@ -4,7 +4,6 @@ import { getFunctionLabel } from "./functionsDeployHelper"; import { FirebaseError } from "../../error"; import { promptOnce } from "../../prompt"; import { logger } from "../../logger"; -import * as args from "./args"; import * as backend from "./backend"; import * as pricing from "./pricing"; import * as utils from "../../utils"; @@ -19,33 +18,30 @@ import { Options } from "../../options"; */ export async function promptForFailurePolicies( options: Options, - want: backend.FunctionSpec[], - have: backend.FunctionSpec[] + want: backend.Backend, + have: backend.Backend ): Promise { // Collect all the functions that have a retry policy - const retryFunctions = want.filter((fn) => { - return backend.isEventTrigger(fn.trigger) && fn.trigger.retry; + const retryEndpoints = backend.allEndpoints(want).filter((e) => { + return backend.isEventTriggered(e) && e.eventTrigger.retry; }); - if (retryFunctions.length === 0) { + if (retryEndpoints.length === 0) { return; } - const existingRetryFunctions = have.filter((fn) => { - return backend.isEventTrigger(fn.trigger) && fn.trigger.retry; + const newRetryEndpoints = retryEndpoints.filter((endpoint) => { + const existing = have.endpoints[endpoint.region]?.[endpoint.id]; + return !(existing && backend.isEventTriggered(existing) && existing.eventTrigger.retry); }); - const newRetryFunctions = retryFunctions.filter((fn) => { - return !existingRetryFunctions.some(backend.sameFunctionName(fn)); - }); - - if (newRetryFunctions.length == 0) { + if (newRetryEndpoints.length == 0) { return; } const warnMessage = "The following functions will newly be retried in case of failure: " + - clc.bold(newRetryFunctions.sort(backend.compareFunctions).map(getFunctionLabel).join(", ")) + + clc.bold(newRetryEndpoints.sort(backend.compareFunctions).map(getFunctionLabel).join(", ")) + ". " + "Retried executions are billed as any other execution, and functions are retried repeatedly until they either successfully execute or the maximum retry period has elapsed, which can be up to 7 days. " + "For safety, you might want to ensure that your functions are idempotent; see https://firebase.google.com/docs/functions/retries to learn more."; @@ -78,7 +74,7 @@ export async function promptForFailurePolicies( * @param functions A list of functions to be deleted. */ export async function promptForFunctionDeletion( - functionsToDelete: backend.FunctionSpec[], + functionsToDelete: (backend.TargetIds & { platform: backend.FunctionsPlatform })[], force: boolean, nonInteractive: boolean ): Promise { @@ -133,27 +129,27 @@ export async function promptForFunctionDeletion( */ export async function promptForMinInstances( options: Options, - want: backend.FunctionSpec[], - have: backend.FunctionSpec[] + want: backend.Backend, + have: backend.Backend ): Promise { if (options.force) { return; } - const increasesCost = want.some((wantFn) => { + const increasesCost = backend.someEndpoint(want, (wantE) => { // If we don't know how much this will cost, be pessimal - if (!pricing.canCalculateMinInstanceCost(wantFn)) { + if (!pricing.canCalculateMinInstanceCost(wantE)) { return true; } - const wantCost = pricing.monthlyMinInstanceCost([wantFn]); - const haveFn = have.find(backend.sameFunctionName(wantFn)); + const wantCost = pricing.monthlyMinInstanceCost([wantE]); + const haveE = have.endpoints[wantE.region]?.[wantE.id]; let haveCost; - if (!haveFn) { + if (!haveE) { haveCost = 0; - } else if (!pricing.canCalculateMinInstanceCost(wantFn)) { + } else if (!pricing.canCalculateMinInstanceCost(wantE)) { return true; } else { - haveCost = pricing.monthlyMinInstanceCost([haveFn]); + haveCost = pricing.monthlyMinInstanceCost([haveE]); } return wantCost > haveCost; }); @@ -174,7 +170,8 @@ export async function promptForMinInstances( // Considerations for future versions: // Group Tier 1 and Tier 2 regions // Add Tier 1 or Tier 2 annotations to functionLines - const functionLines = want + const functionLines = backend + .allEndpoints(want) .filter((fn) => fn.minInstances) .sort(backend.compareFunctions) .map((fn) => { @@ -187,16 +184,16 @@ export async function promptForMinInstances( .join("\n"); let costLine; - if (want.some((fn) => !pricing.canCalculateMinInstanceCost(fn))) { + if (backend.someEndpoint(want, (fn) => !pricing.canCalculateMinInstanceCost(fn))) { costLine = "Cannot calculate the minimum monthly bill for this configuration. Consider running " + clc.bold("npm install -g firebase-tools"); } else { - const cost = pricing.monthlyMinInstanceCost(want).toFixed(2); + const cost = pricing.monthlyMinInstanceCost(backend.allEndpoints(want)).toFixed(2); costLine = `With these options, your minimum bill will be $${cost} in a 30-day month`; } let cudAnnotation = ""; - if (want.some((fn) => fn.platform == "gcfv2" && fn.minInstances)) { + if (backend.someEndpoint(want, (fn) => fn.platform == "gcfv2" && !!fn.minInstances)) { cudAnnotation = "\nThis bill can be lowered with a one year commitment. See https://cloud.google.com/run/cud for more"; } diff --git a/src/deploy/functions/release.ts b/src/deploy/functions/release.ts deleted file mode 100644 index 94f3d8a732e..00000000000 --- a/src/deploy/functions/release.ts +++ /dev/null @@ -1,163 +0,0 @@ -/** - * If you make any changes to this file, run the integration test in scripts/test-functions-deploy.js - */ -import Queue from "../../throttler/queue"; -import { createDeploymentPlan } from "./deploymentPlanner"; -import { getAppEngineLocation } from "../../functionsConfig"; -import { hasUserEnvs } from "../../functions/env"; -import { promptForFunctionDeletion } from "./prompts"; -import { DeploymentTimer } from "./deploymentTimer"; -import { ErrorHandler } from "./errorHandler"; -import { Options } from "../../options"; -import { previews } from "../../previews"; -import * as args from "./args"; -import * as backend from "./backend"; -import * as containerCleaner from "./containerCleaner"; -import * as helper from "./functionsDeployHelper"; -import * as tasks from "./tasks"; -import * as utils from "../../utils"; -import { track } from "../../track"; - -export async function release(context: args.Context, options: Options, payload: args.Payload) { - if (!options.config.has("functions")) { - return; - } - - const projectId = context.projectId; - const sourceUrl = context.uploadUrl!; - const functionsSource = options.config.get("functions.source") as string; - const appEngineLocation = getAppEngineLocation(context.firebaseConfig); - - const timer = new DeploymentTimer(); - const errorHandler = new ErrorHandler(); - - const fullDeployment = createDeploymentPlan( - payload.functions!.backend, - await backend.existingBackend(context), - { - filters: context.filters, - // By default, preserve existing environment variables. - // Only overwrite environment variables when the dotenv preview is enabled - // AND there are user specified environment variables. - overwriteEnvs: - previews.dotenv && - hasUserEnvs({ - functionsSource: options.config.path(functionsSource), - projectId, - projectAlias: options.projectAlias, - }), - } - ); - - // This queue needs to retry quota errors. - // The main quotas that can be exceeded are per 1 minute quotas, - // so we start with a larger backoff to reduce the liklihood of extra retries. - const cloudFunctionsQueue = new Queue, void>({ - retries: 30, - backoff: 20000, - concurrency: 40, - maxBackoff: 40000, - handler: tasks.functionsDeploymentHandler(timer, errorHandler), - }); - const schedulerQueue = new Queue, void>({ - handler: tasks.schedulerDeploymentHandler(errorHandler), - }); - const pubSubQueue = new Queue, void>({ - // We can actually use the same handler for Scheduler and Pub/Sub - handler: tasks.schedulerDeploymentHandler(errorHandler), - }); - const regionPromises = []; - - const taskParams: tasks.TaskParams = { - projectId, - sourceUrl, - storage: context.storage!, - errorHandler, - }; - - // Note(inlined): We might increase consistency if we tried a fully regional strategy, but - // the existing code was written to process deletes before creates and updates. - const allFnsToDelete = Object.values(fullDeployment.regionalDeployments).reduce( - (accum, region) => [...accum, ...region.functionsToDelete], - [] as backend.FunctionSpec[] - ); - const shouldDeleteFunctions = await promptForFunctionDeletion( - allFnsToDelete, - options.force, - options.nonInteractive - ); - if (!shouldDeleteFunctions) { - // If we shouldn't delete functions, don't clean up their schedules either - fullDeployment.schedulesToDelete = fullDeployment.schedulesToDelete.filter((schedule) => { - return !allFnsToDelete.find(backend.sameFunctionName(schedule.targetService)); - }); - fullDeployment.topicsToDelete = fullDeployment.topicsToDelete.filter((topic) => { - return !allFnsToDelete.find(backend.sameFunctionName(topic.targetService)); - }); - for (const regionalDeployment of Object.values(fullDeployment.regionalDeployments)) { - regionalDeployment.functionsToDelete = []; - } - } - - for (const [region, deployment] of Object.entries(fullDeployment.regionalDeployments)) { - // Run the create and update function calls for the region. - regionPromises.push( - tasks.runRegionalFunctionDeployment(taskParams, region, deployment, cloudFunctionsQueue) - ); - } - - for (const schedule of fullDeployment.schedulesToUpsert) { - const task = tasks.upsertScheduleTask(taskParams, schedule, appEngineLocation); - void schedulerQueue.run(task); - } - for (const schedule of fullDeployment.schedulesToDelete) { - const task = tasks.deleteScheduleTask(taskParams, schedule, appEngineLocation); - void schedulerQueue.run(task); - } - for (const topic of fullDeployment.topicsToDelete) { - const task = tasks.deleteTopicTask(taskParams, topic); - void pubSubQueue.run(task); - } - - // Once everything has been added to queues, starting processing. - // Note: We need to set up these wait before calling process and close. - const queuePromises = [cloudFunctionsQueue.wait(), schedulerQueue.wait(), pubSubQueue.wait()]; - cloudFunctionsQueue.process(); - schedulerQueue.process(); - pubSubQueue.process(); - schedulerQueue.close(); - pubSubQueue.close(); - - // Wait until the second round of creates/updates are added to the queue before closing it. - await Promise.all(regionPromises); - cloudFunctionsQueue.close(); - - // Wait for the first function in each region to be deployed, and all the other calls to be queued, - // then close the queue. - // Wait for all of the deployments to complete. - try { - await Promise.all(queuePromises); - } catch (err) { - utils.reject( - "Exceeded maximum retries while deploying functions. " + - "If you are deploying a large number of functions, " + - "please deploy your functions in batches by using the --only flag, " + - "and wait a few minutes before deploying again. " + - "Go to https://firebase.google.com/docs/cli/#partial_deploys to learn more.", - { - original: err, - } - ); - } - const functions = payload.functions!.backend.cloudFunctions; - const gcfv1 = functions.find((fn) => fn.platform === "gcfv1"); - const gcfv2 = functions.find((fn) => fn.platform === "gcfv2"); - const tag = gcfv1 && gcfv2 ? "v1+v2" : gcfv1 ? "v1" : "v2"; - track("functions_codebase_deploy", tag, functions.length); - - helper.logAndTrackDeployStats(cloudFunctionsQueue, errorHandler); - await containerCleaner.cleanupBuildImages(payload.functions!.backend.cloudFunctions); - await helper.printTriggerUrls(context, payload.functions!.backend); - errorHandler.printWarnings(); - errorHandler.printErrors(); -} diff --git a/src/deploy/functions/release/executor.ts b/src/deploy/functions/release/executor.ts new file mode 100644 index 00000000000..a32eca51dea --- /dev/null +++ b/src/deploy/functions/release/executor.ts @@ -0,0 +1,70 @@ +import { Queue } from "../../../throttler/queue"; +import { ThrottlerOptions } from "../../../throttler/throttler"; + +/** + * An Executor runs lambdas (which may be async). + */ +export interface Executor { + run(func: () => Promise): Promise; +} + +interface Operation { + func: () => any; + result?: any; + error?: any; +} + +async function handler(op: Operation): Promise { + try { + op.result = await op.func(); + } catch (err) { + // Throw retry functions back to the queue where they will be retried + // with backoffs. To do this we cast a wide net for possible error codes. + // These can be either TOO MANY REQUESTS (429) errors or CONFLICT (409) + // errors. This can be a raw error with the correct HTTP code, a raw + // error with the HTTP code stashed where GCP puts it, or a FirebaseError + // wrapping either of the previous two cases. + const code = + err.status || + err.code || + err.context?.response?.statusCode || + err.original?.code || + err.original?.context?.response?.statusCode; + if (code === 429 || code === 409) { + throw err; + } + op.error = err; + } + return; +} + +/** + * A QueueExecutor implements the executor interface on top of a throttler queue. + * Any 429 will be retried within the ThrottlerOptions parameters, but all + * other errors are rethrown. + */ +export class QueueExecutor implements Executor { + private readonly queue: Queue; + constructor(options: Omit, "handler">) { + this.queue = new Queue({ ...options, handler }); + } + + async run(func: () => Promise): Promise { + const op: Operation = { func }; + await this.queue.run(op); + if (op.error) { + throw op.error; + } + return op.result as T; + } +} + +/** + * Inline executors run their functions right away. + * Useful for testing. + */ +export class InlineExecutor { + run(func: () => Promise): Promise { + return func(); + } +} diff --git a/src/deploy/functions/release/fabricator.ts b/src/deploy/functions/release/fabricator.ts new file mode 100644 index 00000000000..849c27bdaf2 --- /dev/null +++ b/src/deploy/functions/release/fabricator.ts @@ -0,0 +1,487 @@ +import * as clc from "cli-color"; + +import { Executor } from "./executor"; +import { FirebaseError } from "../../../error"; +import { SourceTokenScraper } from "./sourceTokenScraper"; +import { Timer } from "./timer"; +import { assertExhaustive } from "../../../functional"; +import { getHumanFriendlyRuntimeName } from "../runtimes"; +import { functionsOrigin, functionsV2Origin } from "../../../api"; +import { logger } from "../../../logger"; +import * as backend from "../backend"; +import * as deploymentTool from "../../../deploymentTool"; +import * as gcf from "../../../gcp/cloudfunctions"; +import * as gcfV2 from "../../../gcp/cloudfunctionsv2"; +import * as helper from "../functionsDeployHelper"; +import * as planner from "./planner"; +import * as poller from "../../../operation-poller"; +import * as pubsub from "../../../gcp/pubsub"; +import * as reporter from "./reporter"; +import * as run from "../../../gcp/run"; +import * as scheduler from "../../../gcp/cloudscheduler"; +import * as utils from "../../../utils"; + +// TODO: Tune this for better performance. +const gcfV1PollerOptions = { + apiOrigin: functionsOrigin, + apiVersion: gcf.API_VERSION, + masterTimeout: 25 * 60 * 1000, // 25 minutes is the maximum build time for a function +}; + +const gcfV2PollerOptions = { + apiOrigin: functionsV2Origin, + apiVersion: gcfV2.API_VERSION, + masterTimeout: 25 * 60 * 1000, // 25 minutes is the maximum build time for a function +}; + +const DEFAULT_GCFV2_CONCURRENCY = 80; + +export interface FabricatorArgs { + executor: Executor; + functionExecutor: Executor; + appEngineLocation: string; + + // Required if creating or updating any GCFv1 functions + sourceUrl?: string; + + // Required if creating or updating any GCFv2 functions + storage?: Record; +} + +const rethrowAs = (endpoint: backend.Endpoint, op: reporter.OperationType) => ( + err: unknown +): T => { + throw new reporter.DeploymentError(endpoint, op, err); +}; + +/** Fabricators make a customer's backend match a spec by applying a plan. */ +export class Fabricator { + executor: Executor; + functionExecutor: Executor; + sourceUrl: string | undefined; + storage: Record | undefined; + appEngineLocation: string; + + constructor(args: FabricatorArgs) { + this.executor = args.executor; + this.functionExecutor = args.functionExecutor; + this.sourceUrl = args.sourceUrl; + this.storage = args.storage; + this.appEngineLocation = args.appEngineLocation; + } + + async applyPlan(plan: planner.DeploymentPlan): Promise { + const timer = new Timer(); + const summary: reporter.Summary = { + totalTime: 0, + results: [], + }; + const deployRegions = Object.values(plan).map( + async (changes): Promise => { + const results = await this.applyRegionalChanges(changes); + summary.results.push(...results); + return; + } + ); + const promiseResults = await utils.allSettled(deployRegions); + + const errs = promiseResults + .filter((r) => r.status === "rejected") + .map((r) => (r as utils.PromiseRejectedResult).reason); + if (errs.length) { + logger.debug( + "Fabricator.applyRegionalChanges returned an unhandled exception. This should never happen", + JSON.stringify(errs, null, 2) + ); + } + + summary.totalTime = timer.stop(); + return summary; + } + + async applyRegionalChanges( + changes: planner.RegionalChanges + ): Promise> { + const deployResults: reporter.DeployResult[] = []; + const handle = async ( + op: reporter.OperationType, + endpoint: backend.Endpoint, + fn: () => Promise + ): Promise => { + const timer = new Timer(); + const result: Partial = { endpoint }; + try { + await fn(); + this.logOpSuccess(op, endpoint); + } catch (err) { + result.error = err as Error; + } + result.durationMs = timer.stop(); + deployResults.push(result as reporter.DeployResult); + }; + + const upserts: Array> = []; + const scraper = new SourceTokenScraper(); + for (const endpoint of changes.endpointsToCreate) { + this.logOpStart("creating", endpoint); + upserts.push(handle("create", endpoint, () => this.createEndpoint(endpoint, scraper))); + } + for (const update of changes.endpointsToUpdate) { + this.logOpStart("updating", update.endpoint); + upserts.push(handle("update", update.endpoint, () => this.updateEndpoint(update, scraper))); + } + await utils.allSettled(upserts); + + // Note: every promise is generated by handle which records error in results. + // We've used hasErrors as a cheater here instead of viewing the results of allSettled + if (deployResults.find((r) => r.error)) { + for (const endpoint of changes.endpointsToDelete) { + deployResults.push({ + endpoint, + durationMs: 0, + error: new reporter.AbortedDeploymentError(endpoint), + }); + } + return deployResults; + } + + const deletes: Array> = []; + for (const endpoint of changes.endpointsToDelete) { + this.logOpStart("deleting", endpoint); + deletes.push(handle("delete", endpoint, () => this.deleteEndpoint(endpoint))); + } + await utils.allSettled(deletes); + + return deployResults; + } + + async createEndpoint(endpoint: backend.Endpoint, scraper: SourceTokenScraper): Promise { + endpoint.labels = { ...endpoint.labels, ...deploymentTool.labels() }; + if (endpoint.platform === "gcfv1") { + await this.createV1Function(endpoint, scraper); + } else if (endpoint.platform === "gcfv2") { + await this.createV2Function(endpoint); + } else { + assertExhaustive(endpoint.platform); + } + + await this.setTrigger(endpoint); + } + + async updateEndpoint(update: planner.EndpointUpdate, scraper: SourceTokenScraper): Promise { + update.endpoint.labels = { ...update.endpoint.labels, ...deploymentTool.labels() }; + if (update.deleteAndRecreate) { + await this.deleteEndpoint(update.deleteAndRecreate); + await this.createEndpoint(update.endpoint, scraper); + return; + } + + if (update.endpoint.platform === "gcfv1") { + await this.updateV1Function(update.endpoint, scraper); + } else if (update.endpoint.platform === "gcfv2") { + await this.updateV2Function(update.endpoint); + } else { + assertExhaustive(update.endpoint.platform); + } + + await this.setTrigger(update.endpoint); + } + + async deleteEndpoint(endpoint: backend.Endpoint): Promise { + await this.deleteTrigger(endpoint); + if (endpoint.platform === "gcfv1") { + await this.deleteV1Function(endpoint); + } else { + await this.deleteV2Function(endpoint); + } + } + + async createV1Function(endpoint: backend.Endpoint, scraper: SourceTokenScraper): Promise { + if (!this.sourceUrl) { + logger.debug("Precondition failed. Cannot create a GCF function without sourceUrl"); + throw new Error("Precondition failed"); + } + const apiFunction = gcf.functionFromEndpoint(endpoint, this.sourceUrl); + apiFunction.sourceToken = await scraper.tokenPromise(); + const resultFunction = await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcf.createFunction(apiFunction); + return poller.pollOperation({ + ...gcfV1PollerOptions, + pollerName: `create-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + onPoll: scraper.poller, + }); + }) + .catch(rethrowAs(endpoint, "create")); + + endpoint.uri = resultFunction?.httpsTrigger?.url; + if (backend.isHttpsTriggered(endpoint)) { + const invoker = endpoint.httpsTrigger.invoker || ["public"]; + if (!invoker.includes("private")) { + await this.executor + .run(async () => { + await gcf.setInvokerCreate(endpoint.project, backend.functionName(endpoint), invoker); + }) + .catch(rethrowAs(endpoint, "set invoker")); + } + } + } + + async createV2Function(endpoint: backend.Endpoint): Promise { + if (!this.storage) { + logger.debug("Precondition failed. Cannot create a GCFv2 function without storage"); + throw new Error("Precondition failed"); + } + const apiFunction = gcfV2.functionFromEndpoint(endpoint, this.storage[endpoint.region]); + + // N.B. As of GCFv2 private preview GCF no longer creates Pub/Sub topics + // for Pub/Sub event handlers. This may change, at which point this code + // could be deleted. + const topic = apiFunction.eventTrigger?.pubsubTopic; + if (topic) { + await this.executor + .run(async () => { + try { + await pubsub.createTopic({ name: topic }); + } catch (err) { + // Pub/Sub uses HTTP 409 (CONFLICT) with a status message of + // ALREADY_EXISTS if the topic already exists. + if (err.status === 409) { + return; + } + throw new FirebaseError("Unexpected error creating Pub/Sub topic", { + original: err as Error, + }); + } + }) + .catch(rethrowAs(endpoint, "create topic")); + } + + const resultFunction = (await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcfV2.createFunction(apiFunction); + return await poller.pollOperation({ + ...gcfV2PollerOptions, + pollerName: `create-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + }); + }) + .catch(rethrowAs(endpoint, "create"))) as gcfV2.CloudFunction; + + endpoint.uri = resultFunction.serviceConfig.uri; + const serviceName = resultFunction.serviceConfig.service!; + if (backend.isHttpsTriggered(endpoint)) { + const invoker = endpoint.httpsTrigger.invoker || ["public"]; + if (!invoker.includes("private")) { + await this.executor + .run(() => run.setInvokerCreate(endpoint.project, serviceName, invoker)) + .catch(rethrowAs(endpoint, "set invoker")); + } + } + + await this.setConcurrency( + endpoint, + serviceName, + endpoint.concurrency || DEFAULT_GCFV2_CONCURRENCY + ); + } + + async updateV1Function(endpoint: backend.Endpoint, scraper: SourceTokenScraper): Promise { + if (!this.sourceUrl) { + logger.debug("Precondition failed. Cannot update a GCF function without sourceUrl"); + throw new Error("Precondition failed"); + } + const apiFunction = gcf.functionFromEndpoint(endpoint, this.sourceUrl); + apiFunction.sourceToken = await scraper.tokenPromise(); + const resultFunction = await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcf.updateFunction(apiFunction); + return await poller.pollOperation({ + ...gcfV1PollerOptions, + pollerName: `update-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + onPoll: scraper.poller, + }); + }) + .catch(rethrowAs(endpoint, "update")); + + endpoint.uri = resultFunction?.httpsTrigger?.url; + if (backend.isHttpsTriggered(endpoint) && endpoint.httpsTrigger.invoker) { + await this.executor + .run(async () => { + await gcf.setInvokerUpdate( + endpoint.project, + backend.functionName(endpoint), + endpoint.httpsTrigger.invoker! + ); + return; + }) + .catch(rethrowAs(endpoint, "set invoker")); + } + } + + async updateV2Function(endpoint: backend.Endpoint): Promise { + if (!this.storage) { + logger.debug("Precondition failed. Cannot update a GCFv2 function without storage"); + throw new Error("Precondition failed"); + } + const apiFunction = gcfV2.functionFromEndpoint(endpoint, this.storage[endpoint.region]); + + // N.B. As of GCFv2 private preview the API chokes on any update call that + // includes the pub/sub topic even if that topic is unchanged. + // We know that the user hasn't changed the topic between deploys because + // of checkForInvalidChangeOfTrigger(). + if (apiFunction.eventTrigger?.pubsubTopic) { + delete apiFunction.eventTrigger.pubsubTopic; + } + + const resultFunction = (await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcfV2.updateFunction(apiFunction); + return await poller.pollOperation({ + ...gcfV2PollerOptions, + pollerName: `update-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + }); + }) + .catch(rethrowAs(endpoint, "update"))) as gcfV2.CloudFunction; + + endpoint.uri = resultFunction.serviceConfig.uri; + const serviceName = resultFunction.serviceConfig.service!; + if (backend.isHttpsTriggered(endpoint) && endpoint.httpsTrigger.invoker) { + await this.executor + .run(() => + run.setInvokerUpdate(endpoint.project, serviceName, endpoint.httpsTrigger.invoker!) + ) + .catch(rethrowAs(endpoint, "set invoker")); + } + + if (endpoint.concurrency) { + await this.setConcurrency(endpoint, serviceName, endpoint.concurrency); + } + } + + async deleteV1Function(endpoint: backend.Endpoint): Promise { + const fnName = backend.functionName(endpoint); + await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcf.deleteFunction(fnName); + const pollerOptions = { + ...gcfV1PollerOptions, + pollerName: `delete-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + }; + await poller.pollOperation(pollerOptions); + }) + .catch(rethrowAs(endpoint, "delete")); + } + + async deleteV2Function(endpoint: backend.Endpoint): Promise { + const fnName = backend.functionName(endpoint); + await this.functionExecutor + .run(async () => { + const op: { name: string } = await gcfV2.deleteFunction(fnName); + const pollerOptions = { + ...gcfV2PollerOptions, + pollerName: `delete-${endpoint.region}-${endpoint.id}`, + operationResourceName: op.name, + }; + await poller.pollOperation(pollerOptions); + }) + .catch(rethrowAs(endpoint, "delete")); + } + + async setConcurrency( + endpoint: backend.Endpoint, + serviceName: string, + concurrency: number + ): Promise { + await this.functionExecutor + .run(async () => { + const service = await run.getService(serviceName); + if (service.spec.template.spec.containerConcurrency === concurrency) { + logger.debug("Skipping setConcurrency on", serviceName, " because it already matches"); + return; + } + + delete service.status; + delete (service.spec.template.metadata as any).name; + service.spec.template.spec.containerConcurrency = concurrency; + await run.replaceService(serviceName, service); + }) + .catch(rethrowAs(endpoint, "set concurrency")); + } + + // Set/Delete trigger is responsible for wiring up a function with any trigger not owned + // by the GCF API. This includes schedules, task queues, and blocking function triggers. + async setTrigger(endpoint: backend.Endpoint): Promise { + if (backend.isScheduleTriggered(endpoint)) { + if (endpoint.platform === "gcfv1") { + await this.upsertScheduleV1(endpoint); + return; + } else if (endpoint.platform === "gcfv2") { + await this.upsertScheduleV2(endpoint); + return; + } + assertExhaustive(endpoint.platform); + } + } + + async deleteTrigger(endpoint: backend.Endpoint): Promise { + if (backend.isScheduleTriggered(endpoint)) { + if (endpoint.platform === "gcfv1") { + await this.deleteScheduleV1(endpoint); + return; + } else if (endpoint.platform === "gcfv2") { + await this.deleteScheduleV2(endpoint); + return; + } + assertExhaustive(endpoint.platform); + } + } + + async upsertScheduleV1(endpoint: backend.Endpoint & backend.ScheduleTriggered): Promise { + // The Pub/Sub topic is already created + const job = scheduler.jobFromEndpoint(endpoint, this.appEngineLocation); + await this.executor + .run(() => scheduler.createOrReplaceJob(job)) + .catch(rethrowAs(endpoint, "upsert schedule")); + } + + upsertScheduleV2(endpoint: backend.Endpoint & backend.ScheduleTriggered): Promise { + return Promise.reject( + new reporter.DeploymentError(endpoint, "upsert schedule", new Error("Not implemented")) + ); + } + + async deleteScheduleV1(endpoint: backend.Endpoint & backend.ScheduleTriggered): Promise { + const job = scheduler.jobFromEndpoint(endpoint, this.appEngineLocation); + await this.executor + .run(() => scheduler.deleteJob(job.name)) + .catch(rethrowAs(endpoint, "delete schedule")); + + await this.executor + .run(() => pubsub.deleteTopic(job.pubsubTarget!.topicName)) + .catch(rethrowAs(endpoint, "delete topic")); + } + + deleteScheduleV2(endpoint: backend.Endpoint & backend.ScheduleTriggered): Promise { + return Promise.reject( + new reporter.DeploymentError(endpoint, "delete schedule", new Error("Not implemented")) + ); + } + + logOpStart(op: string, endpoint: backend.Endpoint): void { + const runtime = getHumanFriendlyRuntimeName(endpoint.runtime); + const label = helper.getFunctionLabel(endpoint); + utils.logBullet( + `${clc.bold.cyan("functions:")} ${op} ${runtime} function ${clc.bold(label)}...` + ); + } + + logOpSuccess(op: string, endpoint: backend.Endpoint): void { + const label = helper.getFunctionLabel(endpoint); + utils.logSuccess(`${clc.bold.green(`functions[${label}]`)} Successful ${op} operation.`); + } +} diff --git a/src/deploy/functions/release/index.ts b/src/deploy/functions/release/index.ts new file mode 100644 index 00000000000..9cab1d5a20b --- /dev/null +++ b/src/deploy/functions/release/index.ts @@ -0,0 +1,101 @@ +import * as clc from "cli-color"; + +import { Options } from "../../../options"; +import { logger } from "../../../logger"; +import { reduceFlat } from "../../../functional"; +import * as args from "../args"; +import * as backend from "../backend"; +import * as containerCleaner from "../containerCleaner"; +import * as planner from "./planner"; +import * as fabricator from "./fabricator"; +import * as reporter from "./reporter"; +import * as executor from "./executor"; +import * as prompts from "../prompts"; +import { getAppEngineLocation } from "../../../functionsConfig"; +import { getFunctionLabel } from "../functionsDeployHelper"; +import { FirebaseError } from "../../../error"; + +/** Releases new versions of functions to prod. */ +export async function release( + context: args.Context, + options: Options, + payload: args.Payload +): Promise { + if (!options.config.has("functions")) { + return; + } + + const plan = planner.createDeploymentPlan( + payload.functions!.backend, + await backend.existingBackend(context), + { filters: context.filters } + ); + + const fnsToDelete = Object.values(plan) + .map((regionalChanges) => regionalChanges.endpointsToDelete) + .reduce(reduceFlat, []); + const shouldDelete = await prompts.promptForFunctionDeletion( + fnsToDelete, + options.force, + options.nonInteractive + ); + if (!shouldDelete) { + for (const change of Object.values(plan)) { + change.endpointsToDelete = []; + } + } + + const functionExecutor: executor.QueueExecutor = new executor.QueueExecutor({ + retries: 30, + backoff: 20000, + concurrency: 40, + maxBackoff: 40000, + }); + + const fab = new fabricator.Fabricator({ + functionExecutor, + executor: new executor.QueueExecutor({}), + sourceUrl: context.uploadUrl!, + storage: context.storage!, + appEngineLocation: getAppEngineLocation(context.firebaseConfig), + }); + + const summary = await fab.applyPlan(plan); + + await reporter.logAndTrackDeployStats(summary); + reporter.printErrors(summary); + + // N.B. Fabricator::applyPlan updates the endpoints it deploys to include the + // uri field. createDeploymentPlan copies endpoints by reference. Both of these + // subtleties are so we can take out a round trip API call to get the latest + // trigger URLs by calling existingBackend again. + printTriggerUrls(payload.functions!.backend); + + await containerCleaner.cleanupBuildImages(backend.allEndpoints(payload.functions!.backend)); + + const allErrors = summary.results.filter((r) => r.error).map((r) => r.error) as Error[]; + if (allErrors.length) { + const opts = allErrors.length == 1 ? { original: allErrors[0] } : { children: allErrors }; + throw new FirebaseError("There was an error deploying functions", { ...opts, exit: 2 }); + } +} + +/** + * Prints the URLs of HTTPS functions. + * Caller must eitehr force refresh the backend or assume the fabricator + * has updated the URI of endpoints after deploy. + */ +export function printTriggerUrls(results: backend.Backend): void { + const httpsFunctions = backend.allEndpoints(results).filter(backend.isHttpsTriggered); + if (httpsFunctions.length === 0) { + return; + } + + for (const httpsFunc of httpsFunctions) { + if (!httpsFunc.uri) { + logger.debug("Missing URI for HTTPS function in printTriggerUrls. This shouldn't happen"); + continue; + } + logger.info(clc.bold("Function URL"), `(${getFunctionLabel(httpsFunc)}):`, httpsFunc.uri); + } +} diff --git a/src/deploy/functions/release/planner.ts b/src/deploy/functions/release/planner.ts new file mode 100644 index 00000000000..81b38424137 --- /dev/null +++ b/src/deploy/functions/release/planner.ts @@ -0,0 +1,246 @@ +import { functionMatchesAnyGroup } from "../functionsDeployHelper"; +import { getFunctionLabel } from "../functionsDeployHelper"; +import { isFirebaseManaged } from "../../../deploymentTool"; +import { FirebaseError } from "../../../error"; +import * as utils from "../../../utils"; +import * as backend from "../backend"; +import * as gcfv2 from "../../../gcp/cloudfunctionsv2"; + +export interface EndpointUpdate { + endpoint: backend.Endpoint; + deleteAndRecreate?: backend.Endpoint; +} + +export interface RegionalChanges { + endpointsToCreate: backend.Endpoint[]; + endpointsToUpdate: EndpointUpdate[]; + endpointsToDelete: backend.Endpoint[]; +} + +export type DeploymentPlan = Record; + +export interface Options { + filters?: string[][]; + // If set to false, will delete only functions that are managed by firebase + deleteAll?: boolean; +} + +/** Calculate the changes needed for a given region. */ +export function calculateRegionalChanges( + want: Record, + have: Record, + options: Options +): RegionalChanges { + const endpointsToCreate = Object.keys(want) + .filter((id) => !have[id]) + .map((id) => want[id]); + + const endpointsToDelete = Object.keys(have) + .filter((id) => !want[id]) + .filter((id) => options.deleteAll || isFirebaseManaged(have[id].labels || {})) + .map((id) => have[id]); + + const endpointsToUpdate = Object.keys(want) + .filter((id) => have[id]) + .map((id) => calculateUpdate(want[id], have[id])); + return { endpointsToCreate, endpointsToUpdate, endpointsToDelete }; +} + +/** + * Calculates the update object for a given endpoint. + * Throws if the update is illegal. + * Forces a delete & recreate if the underlying API doesn't allow an upgrade but + * CF3 does. + */ +export function calculateUpdate(want: backend.Endpoint, have: backend.Endpoint): EndpointUpdate { + checkForIllegalUpdate(want, have); + + const update: EndpointUpdate = { + endpoint: want, + }; + const needsDelete = + changedTriggerRegion(want, have) || + changedV2PubSubTopic(want, have) || + upgradedScheduleFromV1ToV2(want, have); + if (needsDelete) { + update.deleteAndRecreate = have; + } + return update; +} + +/** + * Create a plan for deploying all functions in one region. + * @param want the desired state + * @param have the current state + * @param filters The filters, passed in by the user via `--only functions:` + */ +export function createDeploymentPlan( + want: backend.Backend, + have: backend.Backend, + options: Options = {} +): DeploymentPlan { + const deployment: DeploymentPlan = {}; + want = backend.matchingBackend(want, (endpoint) => { + return functionMatchesAnyGroup(endpoint, options.filters || []); + }); + have = backend.matchingBackend(have, (endpoint) => { + return functionMatchesAnyGroup(endpoint, options.filters || []); + }); + + const regions = new Set([...Object.keys(want.endpoints), ...Object.keys(have.endpoints)]); + for (const region of regions) { + deployment[region] = calculateRegionalChanges( + want.endpoints[region] || {}, + have.endpoints[region] || {}, + options + ); + } + + if (upgradedToGCFv2WithoutSettingConcurrency(want, have)) { + utils.logLabeledBullet( + "functions", + "You are updating one or more functions to Google Cloud Functions v2, " + + "which introduces support for concurrent execution. New functions " + + "default to 80 concurrent executions, but existing functions keep the " + + "old default of 1. You can change this with the 'concurrency' option." + ); + } + return deployment; +} + +/** Whether a user upgraded any endpionts to GCFv2 without setting concurrency. */ +export function upgradedToGCFv2WithoutSettingConcurrency( + want: backend.Backend, + have: backend.Backend +): boolean { + return backend.someEndpoint(want, (endpoint) => { + // If there is not an existing v1 function + if (have.endpoints[endpoint.region]?.[endpoint.id]?.platform !== "gcfv1") { + return false; + } + + if (endpoint.platform !== "gcfv2") { + return false; + } + + if (endpoint.concurrency) { + return false; + } + + return true; + }); +} + +/** Whether a trigger chagned regions. This can happen if, for example, + * a user listens to a different bucket, which happens to have a different region. + */ +export function changedTriggerRegion(want: backend.Endpoint, have: backend.Endpoint): boolean { + if (want.platform != "gcfv2") { + return false; + } + if (have.platform != "gcfv2") { + return false; + } + if (!backend.isEventTriggered(want)) { + return false; + } + if (!backend.isEventTriggered(have)) { + return false; + } + return want.eventTrigger.region != have.eventTrigger.region; +} + +/** Whether a user changed the Pub/Sub topic of a GCFv2 function (which isn't allowed in the API). */ +export function changedV2PubSubTopic(want: backend.Endpoint, have: backend.Endpoint): boolean { + if (want.platform !== "gcfv2") { + return false; + } + if (have.platform !== "gcfv2") { + return false; + } + if (!backend.isEventTriggered(want)) { + return false; + } + if (!backend.isEventTriggered(have)) { + return false; + } + if (want.eventTrigger.eventType != gcfv2.PUBSUB_PUBLISH_EVENT) { + return false; + } + if (have.eventTrigger.eventType !== gcfv2.PUBSUB_PUBLISH_EVENT) { + return false; + } + return have.eventTrigger.eventFilters["resource"] != want.eventTrigger.eventFilters["resource"]; +} + +/** Whether a user upgraded a scheduled function (which goes from Pub/Sub to HTTPS). */ +export function upgradedScheduleFromV1ToV2( + want: backend.Endpoint, + have: backend.Endpoint +): boolean { + if (have.platform !== "gcfv1") { + return false; + } + if (want.platform !== "gcfv2") { + return false; + } + if (!backend.isScheduleTriggered(have)) { + return false; + } + // should not be possible + if (!backend.isScheduleTriggered(want)) { + return false; + } + + return true; +} + +/** Throws if there is an illegal update to a function. */ +export function checkForIllegalUpdate(want: backend.Endpoint, have: backend.Endpoint): void { + const triggerType = (e: backend.Endpoint): string => { + if (backend.isHttpsTriggered(e)) { + return "an HTTPS"; + } else if (backend.isEventTriggered(e)) { + return "a background triggered"; + } else if (backend.isScheduleTriggered(e)) { + return "a scheduled"; + } + // Unfortunately TypeScript isn't like Scala and I can't prove to it + // that all cases have been handled + throw Error("Functions release planner is not able to handle an unknown trigger type"); + }; + const wantType = triggerType(want); + const haveType = triggerType(have); + if (wantType != haveType) { + throw new FirebaseError( + `[${getFunctionLabel( + want + )}] Changing from ${haveType} function to ${wantType} function is not allowed. Please delete your function and create a new one instead.` + ); + } + if (want.platform == "gcfv1" && have.platform == "gcfv2") { + throw new FirebaseError( + `[${getFunctionLabel(want)}] Functions cannot be downgraded from GCFv2 to GCFv1` + ); + } + + // We need to call from module exports so tests can stub this behavior, but that + // breaks the type system. + // eslint-disable-next-line + exports.checkForV2Upgrade(want, have); +} + +/** + * Throws an error when upgrading/downgrading GCF versions. + * This is a separate function that is designed to be stubbed in tests to allow + * upgrading to v2 in tests before production is ready + */ +export function checkForV2Upgrade(want: backend.Endpoint, have: backend.Endpoint): void { + if (want.platform == "gcfv2" && have.platform == "gcfv1") { + throw new FirebaseError( + `[${getFunctionLabel( + have + )}] Upgrading from GCFv1 to GCFv2 is not yet supported. Please delete your old function or wait for this feature to be ready.` + ); + } +} diff --git a/src/deploy/functions/release/reporter.ts b/src/deploy/functions/release/reporter.ts new file mode 100644 index 00000000000..2c805f29bdb --- /dev/null +++ b/src/deploy/functions/release/reporter.ts @@ -0,0 +1,233 @@ +import * as backend from "../backend"; +import * as clc from "cli-color"; + +import { logger } from "../../../logger"; +import * as track from "../../../track"; +import * as utils from "../../../utils"; +import { getFunctionLabel } from "../functionsDeployHelper"; + +export interface DeployResult { + endpoint: backend.Endpoint; + durationMs: number; + error?: Error; +} + +export interface Summary { + totalTime: number; + results: DeployResult[]; +} + +export type OperationType = + | "create" + | "update" + | "delete" + | "upsert schedule" + | "delete schedule" + | "create topic" + | "delete topic" + | "set invoker" + | "set concurrency"; + +/** An error with a deployment phase. */ +export class DeploymentError extends Error { + constructor( + readonly endpoint: backend.Endpoint, + readonly op: OperationType, + readonly original: unknown + ) { + super(`Failed to ${op} function ${endpoint.id} in region ${endpoint.region}`); + } +} + +/** + * A specific error used to indicate that a function was not deleted because + * other errors happened during deploy. + */ +export class AbortedDeploymentError extends DeploymentError { + constructor(readonly endpoint: backend.Endpoint) { + super(endpoint, "delete", new Error("aborted")); + } +} + +/** Add debugger logs and GA metrics for deploy stats. */ +export async function logAndTrackDeployStats(summary: Summary): Promise { + let totalTime = 0; + let totalErrors = 0; + let totalSuccesses = 0; + let totalAborts = 0; + const reports: Array> = []; + + for (const result of summary.results) { + const tag = triggerTag(result.endpoint); + totalTime += result.durationMs; + if (!result.error) { + totalSuccesses++; + reports.push(track.track("function_deploy_success", tag, result.durationMs)); + } else if (result.error instanceof AbortedDeploymentError) { + totalAborts++; + reports.push(track.track("function_deploy_abort", tag, result.durationMs)); + } else { + totalErrors++; + reports.push(track.track("function_deploy_failure", tag, result.durationMs)); + } + } + + const gcfv1 = summary.results.find((r) => r.endpoint.platform === "gcfv1"); + const gcfv2 = summary.results.find((r) => r.endpoint.platform === "gcfv2"); + const tag = gcfv1 && gcfv2 ? "v1+v2" : gcfv1 ? "v1" : "v2"; + reports.push(track.track("functions_codebase_deploy", tag, summary.results.length)); + + const avgTime = totalTime / (totalSuccesses + totalErrors); + + logger.debug(`Total Function Deployment time: ${summary.totalTime}`); + logger.debug(`${totalErrors + totalSuccesses + totalAborts} Functions Deployed`); + logger.debug(`${totalErrors} Functions Errored`); + logger.debug(`${totalAborts} Function Deployments Aborted`); + logger.debug(`Average Function Deployment time: ${avgTime}`); + if (totalErrors + totalSuccesses > 0) { + if (totalErrors === 0) { + reports.push(track.track("functions_deploy_result", "success", totalSuccesses)); + } else if (totalSuccesses > 0) { + reports.push(track.track("functions_deploy_result", "partial_success", totalSuccesses)); + reports.push(track.track("functions_deploy_result", "partial_failure", totalErrors)); + reports.push( + track.track( + "functions_deploy_result", + "partial_error_ratio", + totalErrors / (totalSuccesses + totalErrors) + ) + ); + } else { + reports.push(track.track("functions_deploy_result", "failure", totalErrors)); + } + } + + await utils.allSettled(reports); +} + +/** Print error messages for failures in summary. */ +export function printErrors(summary: Summary): void { + const errored = summary.results.filter((r) => r.error) as Array>; + if (errored.length === 0) { + return; + } + + errored.sort((left, right) => backend.compareFunctions(left.endpoint, right.endpoint)); + logger.info(""); + logger.info( + "Functions deploy had errors with the following functions:" + + errored + .filter((r) => !(r.error instanceof AbortedDeploymentError)) + .map((result) => `\n\t${getFunctionLabel(result.endpoint)}`) + .join("") + ); + + printIamErrors(errored); + printQuotaErrors(errored); + printAbortedErrors(errored); +} + +/** Print errors for failures to set invoker. */ +function printIamErrors(results: Array>): void { + const iamFailures = results.filter( + (r) => r.error instanceof DeploymentError && r.error.op === "set invoker" + ); + if (!iamFailures.length) { + return; + } + + logger.info(""); + logger.info( + "Unable to set the invoker for the IAM policy on the following functions:" + + iamFailures.map((result) => `\n\t${getFunctionLabel(result.endpoint)}`).join("") + ); + + logger.info(""); + logger.info("Some common causes of this:"); + logger.info(""); + logger.info( + "- You may not have the roles/functions.admin IAM role. Note that " + + "roles/functions.developer does not allow you to change IAM policies." + ); + logger.info(""); + logger.info("- An organization policy that restricts Network Access on your project."); + + // We implicitly set IAM permissions to public invoker when creating a function that + // has no explicit invoker set. If these failures were on an inferred setInvoker command + // we need to let the customer know that it needs to be explicit next time. + const hadImplicitMakePublic = iamFailures.find( + (r) => backend.isHttpsTriggered(r.endpoint) && !r.endpoint.httpsTrigger.invoker + ); + if (!hadImplicitMakePublic) { + return; + } + logger.info(""); + logger.info( + "One or more functions were being implicitly made publicly available on function create." + ); + logger.info( + "Functions are not implicitly made public on updates. To try to make " + + "these functions public on next deploy, configure these functions with " + + `${clc.bold("invoker")} set to ${clc.bold(`"public"`)}` + ); +} + +/** Print errors for failures with the GCF API. */ +function printQuotaErrors(results: Array>): void { + const hadQuotaError = results.find((r) => { + if (!(r.error instanceof DeploymentError)) { + return false; + } + const original = r.error.original as any; + const code: number | undefined = + original?.status || + original?.code || + original?.context?.response?.statusCode || + original?.original?.code || + original?.original?.context?.response?.statusCode; + return code === 429 || code === 409; + }); + if (!hadQuotaError) { + return; + } + logger.info(""); + logger.info( + "Exceeded maximum retries while deploying functions. " + + "If you are deploying a large number of functions, " + + "please deploy your functions in batches by using the --only flag, " + + "and wait a few minutes before deploying again. " + + "Go to https://firebase.google.com/docs/cli/#partial_deploys to learn more." + ); +} + +/** Print errors for aborted deletes. */ +export function printAbortedErrors(results: Array>): void { + const aborted = results.filter((r) => r.error instanceof AbortedDeploymentError); + if (!aborted) { + return; + } + logger.info(""); + logger.info( + "Because there were errors creating or updating functions, the following " + + "functions were not deleted" + + aborted.map((result) => `\n\t${getFunctionLabel(result.endpoint)}`).join("") + ); + logger.info(`To delete these, use ${clc.bold("firebase functions:delete")}`); +} + +/** Get a short synopsis of trigger type for analytics */ +export function triggerTag(endpoint: backend.Endpoint): string { + const prefix = endpoint.platform === "gcfv1" ? "v1" : "v2"; + if (backend.isScheduleTriggered(endpoint)) { + return `${prefix}.scheduled`; + } + + if (backend.isHttpsTriggered(endpoint)) { + if (endpoint.labels?.["deployment-callable"]) { + return `${prefix}.callable`; + } + return `${prefix}.https`; + } + + return endpoint.eventTrigger.eventType; +} diff --git a/src/deploy/functions/release/sourceTokenScraper.ts b/src/deploy/functions/release/sourceTokenScraper.ts new file mode 100644 index 00000000000..482e3d77406 --- /dev/null +++ b/src/deploy/functions/release/sourceTokenScraper.ts @@ -0,0 +1,38 @@ +import { logger } from "../../../logger"; + +/** + * GCF v1 deploys support reusing a build between function deploys. + * This class will return a resolved promise for its first call to tokenPromise() + * and then will always return a promise that is resolved by the poller function. + */ +export class SourceTokenScraper { + private firstCall = true; + private resolve!: (token: string) => void; + private promise: Promise; + + constructor() { + this.promise = new Promise((resolve) => (this.resolve = resolve)); + } + + // Token Promise will return undefined for the first caller + // (because we presume it's this function's source token we'll scrape) + // and then returns the promise generated from the first function's onCall + tokenPromise(): Promise { + if (this.firstCall) { + this.firstCall = false; + return Promise.resolve(undefined); + } + return this.promise; + } + + get poller() { + return (op: any) => { + if (op.metadata?.sourceToken || op.done) { + const [, , , /* projects*/ /* project*/ /* regions*/ region] = + op.metadata?.target?.split("/") || []; + logger.debug(`Got source token ${op.metadata?.sourceToken} for region ${region as string}`); + this.resolve(op.metadata?.sourceToken); + } + }; + } +} diff --git a/src/deploy/functions/release/timer.ts b/src/deploy/functions/release/timer.ts new file mode 100644 index 00000000000..eaf880ca561 --- /dev/null +++ b/src/deploy/functions/release/timer.ts @@ -0,0 +1,14 @@ +/** Measures the time taken from construction to the call to stop() */ +export class Timer { + private readonly start: bigint; + + constructor() { + this.start = process.hrtime.bigint(); + } + + stop(): number { + const stop = process.hrtime.bigint(); + const elapsedNanos = stop - this.start; + return Number(elapsedNanos / BigInt(1e6)); + } +} diff --git a/src/deploy/functions/runtimes/discovery/v1alpha1.ts b/src/deploy/functions/runtimes/discovery/v1alpha1.ts index d3c8abe42ce..08b5d793ddc 100644 --- a/src/deploy/functions/runtimes/discovery/v1alpha1.ts +++ b/src/deploy/functions/runtimes/discovery/v1alpha1.ts @@ -1,170 +1,179 @@ import * as backend from "../../backend"; import * as runtimes from ".."; +import { copyIfPresent } from "../../../../gcp/proto"; import { assertKeyTypes, requireKeys } from "./parsing"; +import { FirebaseError } from "../../../../error"; +export type ManifestEndpoint = backend.ServiceConfiguration & + backend.Triggered & + Partial & + Partial & + Partial & { + region?: string[]; + entryPoint: string; + platform?: backend.FunctionsPlatform; + }; + +export interface Manifest { + specVersion: string; + requiredAPIs?: Record; + endpoints: Record; +} + +/** Returns a Backend from a v1alpha1 Manifest. */ export function backendFromV1Alpha1( - yaml: any, + yaml: unknown, project: string, region: string, runtime: runtimes.Runtime ): backend.Backend { - const bkend: backend.Backend = JSON.parse(JSON.stringify(yaml)); - delete (bkend as any).specVersion; - tryValidate(bkend); - fillDefaults(bkend, project, region, runtime); + const manifest = JSON.parse(JSON.stringify(yaml)) as Manifest; + const bkend: backend.Backend = backend.empty(); + bkend.requiredAPIs = parseRequiredAPIs(manifest); + requireKeys("", manifest, "endpoints"); + assertKeyTypes("", manifest, { + specVersion: "string", + requiredAPIs: "object", + endpoints: "object", + }); + for (const id of Object.keys(manifest.endpoints)) { + for (const parsed of parseEndpoints(manifest, id, project, region, runtime)) { + bkend.endpoints[parsed.region] = bkend.endpoints[parsed.region] || {}; + bkend.endpoints[parsed.region][parsed.id] = parsed; + } + } return bkend; } -function tryValidate(typed: backend.Backend) { - // Use a helper type to help guide code complete when writing this function - assertKeyTypes("", typed, { - requiredAPIs: "object", - endpoints: "array", - cloudFunctions: "array", - topics: "array", - schedules: "array", +function parseRequiredAPIs(manifest: Manifest): Record { + const requiredAPIs: Record = {}; + // Note: this intentionally allows undefined to slip through as {} + if (typeof manifest !== "object" || Array.isArray(manifest)) { + throw new FirebaseError("Expected requiredApis to be a map of string to string"); + } + for (const [api, reason] of Object.entries(manifest.requiredAPIs || {})) { + if (typeof reason !== "string") { + throw new FirebaseError( + `Invalid reason "${JSON.stringify(reason)} for API ${api}. Expected string` + ); + } + requiredAPIs[api] = reason; + } + return requiredAPIs; +} + +function parseEndpoints( + manifest: Manifest, + id: string, + project: string, + defaultRegion: string, + runtime: runtimes.Runtime +): backend.Endpoint[] { + const allParsed: backend.Endpoint[] = []; + const prefix = `endpoints[${id}]`; + const ep = manifest.endpoints[id]; + + assertKeyTypes(prefix, ep, { + region: "array", + platform: "string", + entryPoint: "string", + availableMemoryMb: "number", + maxInstances: "number", + minInstances: "number", + concurrency: "number", + serviceAccountEmail: "string", + timeout: "string", + vpcConnector: "string", + vpcConnectorEgressSettings: "string", + labels: "object", + ingressSettings: "string", environmentVariables: "object", + httpsTrigger: "object", + eventTrigger: "object", + scheduleTrigger: "object", }); - requireKeys("", typed, "cloudFunctions"); - - for (let ndx = 0; ndx < typed.cloudFunctions.length; ndx++) { - const prefix = `cloudFunctions[${ndx}]`; - const func = typed.cloudFunctions[ndx]; - requireKeys(prefix, func, "platform", "id", "entryPoint", "trigger"); - assertKeyTypes(prefix, func, { - platform: "string", - id: "string", - region: "string", - project: "string", - runtime: "string", - entryPoint: "string", - availableMemoryMb: "number", - maxInstances: "number", - minInstances: "number", - concurrency: "number", - serviceAccountEmail: "string", - timeout: "string", - trigger: "object", - vpcConnector: "string", - vpcConnectorEgressSettings: "string", - labels: "object", - ingressSettings: "string", - environmentVariables: "omit", - uri: "omit", - sourceUploadUrl: "omit", - }); - if (backend.isEventTrigger(func.trigger)) { - requireKeys(prefix + ".trigger", func.trigger, "eventType", "eventFilters"); - assertKeyTypes(prefix + ".trigger", func.trigger, { + let triggerCount = 0; + if (ep.httpsTrigger) { + triggerCount++; + } + if (ep.eventTrigger) { + triggerCount++; + } + if (ep.scheduleTrigger) { + triggerCount++; + } + if (!triggerCount) { + throw new FirebaseError("Expected trigger in endpoint" + id); + } + if (triggerCount > 1) { + throw new FirebaseError("Multiple triggers defined for endpoint" + id); + } + for (const region of ep.region || [defaultRegion]) { + let triggered: backend.Triggered; + if (backend.isEventTriggered(ep)) { + requireKeys(prefix + ".eventTrigger", ep.eventTrigger, "eventType", "eventFilters"); + assertKeyTypes(prefix + ".eventTrigger", ep.eventTrigger, { eventFilters: "object", eventType: "string", retry: "boolean", region: "string", serviceAccountEmail: "string", }); - } else { - assertKeyTypes(prefix + ".trigger", func.trigger, { + triggered = { eventTrigger: ep.eventTrigger }; + } else if (backend.isHttpsTriggered(ep)) { + assertKeyTypes(prefix + ".httpsTrigger", ep.httpsTrigger, { invoker: "array", }); + triggered = { httpsTrigger: {} }; + copyIfPresent(triggered.httpsTrigger, ep.httpsTrigger, "invoker"); + } else if (backend.isScheduleTriggered(ep)) { + assertKeyTypes(prefix + ".scheduleTrigger", ep.scheduleTrigger, { + schedule: "string", + timeZone: "string", + retryConfig: "object", + }); + assertKeyTypes(prefix + ".scheduleTrigger.retryConfig", ep.scheduleTrigger.retryConfig, { + retryCount: "number", + maxDoublings: "number", + minBackoffDuration: "string", + maxBackoffDuration: "string", + maxRetryDuration: "string", + }); + triggered = { scheduleTrigger: ep.scheduleTrigger }; + } else { + throw new FirebaseError( + `Do not recognize trigger type for endpoint ${id}. Try upgrading ` + + "firebase-tools with npm install -g firebase-tools@latest" + ); } - } - - for (let ndx = 0; ndx < typed.topics?.length; ndx++) { - let prefix = `topics[${ndx}]`; - const topic = typed.topics[ndx]; - requireKeys(prefix, topic, "id", "targetService"); - assertKeyTypes(prefix, topic, { - id: "string", - labels: "object", - project: "string", - targetService: "object", - }); - - prefix += ".targetService"; - requireKeys(prefix, topic.targetService, "id"); - assertKeyTypes(prefix, topic.targetService, { - id: "string", - project: "string", - region: "string", - }); - } - - for (let ndx = 0; ndx < typed.schedules?.length; ndx++) { - let prefix = `schedules[${ndx}]`; - const schedule = typed.schedules[ndx]; - requireKeys(prefix, schedule, "id", "schedule", "transport", "targetService"); - assertKeyTypes(prefix, schedule, { - id: "string", - project: "string", - retryConfig: "object", - schedule: "string", - timeZone: "string", - transport: "string", - targetService: "object", - }); - - assertKeyTypes(prefix + ".retryConfig", schedule.retryConfig, { - maxBackoffDuration: "string", - minBackoffDuration: "string", - maxDoublings: "number", - maxRetryDuration: "string", - retryCount: "number", - }); - - requireKeys((prefix = ".targetService"), schedule.targetService, "id"); - assertKeyTypes(prefix + ".targetService", schedule.targetService, { - id: "string", - project: "string", - region: "string", - }); - } -} - -function fillDefaults( - want: backend.Backend, - project: string, - region: string, - runtime: runtimes.Runtime -) { - want.requiredAPIs = want.requiredAPIs || {}; - want.environmentVariables = want.environmentVariables || {}; - want.schedules = want.schedules || []; - want.topics = want.topics || []; - want.endpoints = want.endpoints || []; - - for (const cloudFunction of want.cloudFunctions) { - if (!cloudFunction.project) { - cloudFunction.project = project; - } - if (!cloudFunction.region) { - cloudFunction.region = region; - } - if (!cloudFunction.runtime) { - cloudFunction.runtime = runtime; - } - } - for (const topic of want.topics) { - if (!topic.project) { - topic.project = project; - } - if (!topic.targetService.project) { - topic.targetService.project = project; - } - if (!topic.targetService.region) { - topic.targetService.region = region; - } + requireKeys(prefix, ep, "entryPoint"); + const parsed: backend.Endpoint = { + platform: ep.platform || "gcfv2", + id, + region, + project, + runtime, + entryPoint: ep.entryPoint, + ...triggered, + }; + copyIfPresent( + parsed, + ep, + "availableMemoryMb", + "maxInstances", + "minInstances", + "concurrency", + "serviceAccountEmail", + "timeout", + "vpcConnector", + "vpcConnectorEgressSettings", + "labels", + "ingressSettings", + "environmentVariables" + ); + allParsed.push(parsed); } - for (const schedule of want.schedules) { - if (!schedule.project) { - schedule.project = project; - } - if (!schedule.targetService.project) { - schedule.targetService.project = project; - } - if (!schedule.targetService.region) { - schedule.targetService.region = region; - } - } + return allParsed; } diff --git a/src/deploy/functions/runtimes/node/parseTriggers.ts b/src/deploy/functions/runtimes/node/parseTriggers.ts index 374eec8a5fb..942e1a22784 100644 --- a/src/deploy/functions/runtimes/node/parseTriggers.ts +++ b/src/deploy/functions/runtimes/node/parseTriggers.ts @@ -146,11 +146,11 @@ export function addResourcesToBackend( runtime: runtimes.Runtime, annotation: TriggerAnnotation, want: backend.Backend -) { +): void { Object.freeze(annotation); // Every trigger annotation is at least a function for (const region of annotation.regions || [api.functionsDefaultRegion]) { - let trigger: backend.HttpsTrigger | backend.EventTrigger; + let triggered: backend.Triggered; // Missing both or have both trigger types if (!!annotation.httpsTrigger == !!annotation.eventTrigger) { @@ -160,47 +160,53 @@ export function addResourcesToBackend( } if (annotation.httpsTrigger) { - trigger = {}; + const trigger: backend.HttpsTrigger = {}; if (annotation.failurePolicy) { logger.warn(`Ignoring retry policy for HTTPS function ${annotation.name}`); } - proto.copyIfPresent(trigger, annotation.httpsTrigger, "invoker", "invoker"); + proto.copyIfPresent(trigger, annotation.httpsTrigger, "invoker"); + triggered = { httpsTrigger: trigger }; + } else if (annotation.schedule) { + want.requiredAPIs["pubsub"] = "pubsub.googleapis.com"; + want.requiredAPIs["scheduler"] = "cloudscheduler.googleapis.com"; + triggered = { scheduleTrigger: annotation.schedule }; } else { - trigger = { - eventType: annotation.eventTrigger!.eventType, - eventFilters: { - resource: annotation.eventTrigger!.resource, + triggered = { + eventTrigger: { + eventType: annotation.eventTrigger!.eventType, + eventFilters: { + resource: annotation.eventTrigger!.resource, + }, + retry: !!annotation.failurePolicy, }, - retry: !!annotation.failurePolicy, }; + // TODO: yank this edge case for a v2 trigger on the pre-container contract + // once we use container contract for the functionsv2 experiment. if (GCS_EVENTS.has(annotation.eventTrigger?.eventType || "")) { - trigger.eventFilters = { + triggered.eventTrigger.eventFilters = { bucket: annotation.eventTrigger!.resource, }; } } - const cloudFunctionName: backend.TargetIds = { + const endpoint: backend.Endpoint = { + platform: annotation.platform || "gcfv1", id: annotation.name, region: region, project: projectId, - }; - const cloudFunction: backend.FunctionSpec = { - platform: annotation.platform || "gcfv1", - ...cloudFunctionName, entryPoint: annotation.entryPoint, runtime: runtime, - trigger: trigger, + ...triggered, }; if (annotation.vpcConnector) { let maybeId = annotation.vpcConnector; if (!maybeId.includes("/")) { maybeId = `projects/${projectId}/locations/${region}/connectors/${maybeId}`; } - cloudFunction.vpcConnector = maybeId; + endpoint.vpcConnector = maybeId; } proto.copyIfPresent( - cloudFunction, + endpoint, annotation, "concurrency", "serviceAccountEmail", @@ -212,41 +218,7 @@ export function addResourcesToBackend( "minInstances", "availableMemoryMb" ); - - if (annotation.schedule) { - want.requiredAPIs["pubsub"] = "pubsub.googleapis.com"; - want.requiredAPIs["scheduler"] = "cloudscheduler.googleapis.com"; - - const id = backend.scheduleIdForFunction(cloudFunctionName); - const schedule: backend.ScheduleSpec = { - id, - project: projectId, - schedule: annotation.schedule.schedule, - transport: "pubsub", - targetService: cloudFunctionName, - }; - proto.copyIfPresent(schedule, annotation.schedule, "timeZone", "retryConfig"); - want.schedules.push(schedule); - const topic: backend.PubSubSpec = { - id, - project: projectId, - labels: backend.SCHEDULED_FUNCTION_LABEL, - targetService: cloudFunctionName, - }; - want.topics.push(topic); - - // The firebase-functions SDK is missing the topic ID in the event trigger for - // scheduled functions. - if (backend.isEventTrigger(cloudFunction.trigger)) { - cloudFunction.trigger.eventFilters.resource = `${cloudFunction.trigger.eventFilters.resource}/${id}`; - } - - cloudFunction.labels = { - ...cloudFunction.labels, - "deployment-scheduled": "true", - }; - } - - want.cloudFunctions.push(cloudFunction); + want.endpoints[region] = want.endpoints[region] || {}; + want.endpoints[region][endpoint.id] = endpoint; } } diff --git a/src/deploy/functions/tasks.ts b/src/deploy/functions/tasks.ts deleted file mode 100644 index 3ba7404bac5..00000000000 --- a/src/deploy/functions/tasks.ts +++ /dev/null @@ -1,457 +0,0 @@ -import * as clc from "cli-color"; - -import Queue from "../../throttler/queue"; -import { logger } from "../../logger"; -import { RegionalFunctionChanges } from "./deploymentPlanner"; -import { OperationResult, OperationPollerOptions, pollOperation } from "../../operation-poller"; -import { functionsOrigin, functionsV2Origin } from "../../api"; -import { getHumanFriendlyRuntimeName } from "./runtimes"; -import { DeploymentTimer } from "./deploymentTimer"; -import { ErrorHandler } from "./errorHandler"; -import * as backend from "./backend"; -import * as cloudscheduler from "../../gcp/cloudscheduler"; -import * as deploymentTool from "../../deploymentTool"; -import * as gcf from "../../gcp/cloudfunctions"; -import * as gcfV2 from "../../gcp/cloudfunctionsv2"; -import * as cloudrun from "../../gcp/run"; -import * as helper from "./functionsDeployHelper"; -import * as pubsub from "../../gcp/pubsub"; -import * as utils from "../../utils"; -import { FirebaseError } from "../../error"; -import { track } from "../../track"; - -interface PollerOptions { - apiOrigin: string; - apiVersion: string; - masterTimeout: number; -} - -// TODO: Tune this for better performance. -const gcfV1PollerOptions = { - apiOrigin: functionsOrigin, - apiVersion: gcf.API_VERSION, - masterTimeout: 25 * 60 * 1000, // 25 minutes is the maximum build time for a function -}; - -const gcfV2PollerOptions = { - apiOrigin: functionsV2Origin, - apiVersion: gcfV2.API_VERSION, - masterTimeout: 25 * 60 * 1000, // 25 minutes is the maximum build time for a function -}; - -const pollerOptionsByPlatform: Record = { - gcfv1: gcfV1PollerOptions, - gcfv2: gcfV2PollerOptions, -}; - -export type OperationType = - | "create" - | "update" - | "delete" - | "upsert schedule" - | "delete schedule" - | "delete topic" - | "set invoker"; - -export interface DeploymentTask { - run: () => Promise; - data: T; - operationType: OperationType; -} - -export interface TaskParams { - projectId: string; - sourceUrl?: string; - storage?: Record; - errorHandler: ErrorHandler; -} - -/** - * Cloud Functions Deployments Tasks and Handler - */ - -export function createFunctionTask( - params: TaskParams, - fn: backend.FunctionSpec, - sourceToken?: string, - onPoll?: (op: OperationResult) => void -): DeploymentTask { - const fnName = backend.functionName(fn); - const run = async () => { - utils.logBullet( - clc.bold.cyan("functions: ") + - "creating " + - getHumanFriendlyRuntimeName(fn.runtime) + - " function " + - clc.bold(helper.getFunctionLabel(fn)) + - "..." - ); - let op: { name: string }; - if (fn.platform === "gcfv1") { - const apiFunction = gcf.functionFromSpec(fn, params.sourceUrl!); - if (sourceToken) { - apiFunction.sourceToken = sourceToken; - } - op = await gcf.createFunction(apiFunction); - } else { - const apiFunction = gcfV2.functionFromSpec(fn, params.storage![fn.region]); - // N.B. As of GCFv2 private preview GCF no longer creates Pub/Sub topics - // for Pub/Sub event handlers. This may change, at which point this code - // could be deleted. - if (apiFunction.eventTrigger?.pubsubTopic) { - try { - await pubsub.getTopic(apiFunction.eventTrigger.pubsubTopic); - } catch (err) { - if (err.status !== 404) { - throw new FirebaseError("Unexpected error looking for Pub/Sub topic", { - original: err, - }); - } - await pubsub.createTopic({ - name: apiFunction.eventTrigger.pubsubTopic, - }); - } - } - op = await gcfV2.createFunction(apiFunction); - } - const cloudFunction = await pollOperation({ - ...pollerOptionsByPlatform[fn.platform], - pollerName: `create-${fnName}`, - operationResourceName: op.name, - onPoll, - }); - if (!backend.isEventTrigger(fn.trigger)) { - const invoker = fn.trigger.invoker || ["public"]; - if (invoker[0] !== "private") { - try { - if (fn.platform === "gcfv1") { - await gcf.setInvokerCreate(params.projectId, fnName, invoker); - } else { - const serviceName = (cloudFunction as gcfV2.CloudFunction).serviceConfig.service!; - cloudrun.setInvokerCreate(params.projectId, serviceName, invoker); - } - } catch (err) { - params.errorHandler.record("error", fnName, "set invoker", err.message); - } - } - } - if (fn.platform !== "gcfv1") { - // GCFv2 has a default concurrency of 1, but CF3 has a default concurrency of 80. - await setConcurrency( - (cloudFunction as gcfV2.CloudFunction).serviceConfig.service!, - fn.concurrency || 80 - ); - } - }; - return { - run, - data: fn, - operationType: "create", - }; -} - -export function updateFunctionTask( - params: TaskParams, - fn: backend.FunctionSpec, - sourceToken?: string, - onPoll?: (op: OperationResult) => void -): DeploymentTask { - const fnName = backend.functionName(fn); - const run = async () => { - utils.logBullet( - clc.bold.cyan("functions: ") + - "updating " + - getHumanFriendlyRuntimeName(fn.runtime) + - " function " + - clc.bold(helper.getFunctionLabel(fn)) + - "..." - ); - - let opName; - if (fn.platform == "gcfv1") { - const apiFunction = gcf.functionFromSpec(fn, params.sourceUrl!); - if (sourceToken) { - apiFunction.sourceToken = sourceToken; - } - opName = (await gcf.updateFunction(apiFunction)).name; - } else { - const apiFunction = gcfV2.functionFromSpec(fn, params.storage![fn.region]); - // N.B. As of GCFv2 private preview the API chokes on any update call that - // includes the pub/sub topic even if that topic is unchanged. - // We know that the user hasn't changed the topic between deploys because - // of checkForInvalidChangeOfTrigger(). - if (apiFunction.eventTrigger?.pubsubTopic) { - delete apiFunction.eventTrigger.pubsubTopic; - } - opName = (await gcfV2.updateFunction(apiFunction)).name; - } - const pollerOptions: OperationPollerOptions = { - ...pollerOptionsByPlatform[fn.platform], - pollerName: `update-${fnName}`, - operationResourceName: opName, - onPoll, - }; - const cloudFunction = await pollOperation(pollerOptions); - if (!backend.isEventTrigger(fn.trigger) && fn.trigger.invoker) { - try { - if (fn.platform === "gcfv1") { - await gcf.setInvokerUpdate(params.projectId, fnName, fn.trigger.invoker); - } else { - const serviceName = (cloudFunction as gcfV2.CloudFunction).serviceConfig.service!; - cloudrun.setInvokerUpdate(params.projectId, serviceName, fn.trigger.invoker); - } - } catch (err) { - params.errorHandler.record("error", fnName, "set invoker", err.message); - } - } - - if ("concurrency" in fn) { - if (fn.platform === "gcfv1") { - throw new FirebaseError("Precondition failed: GCFv1 does not support concurrency"); - } else { - await setConcurrency( - (cloudFunction as gcfV2.CloudFunction).serviceConfig.service!, - fn.concurrency || 80 - ); - } - } - }; - return { - run, - data: fn, - operationType: "update", - }; -} - -export function deleteFunctionTask( - params: TaskParams, - fn: backend.FunctionSpec -): DeploymentTask { - const fnName = backend.functionName(fn); - const run = async () => { - utils.logBullet( - clc.bold.cyan("functions: ") + - "deleting function " + - clc.bold(helper.getFunctionLabel(fnName)) + - "..." - ); - let res: { name: string }; - if (fn.platform == "gcfv1") { - res = await gcf.deleteFunction(fnName); - } else { - res = await gcfV2.deleteFunction(fnName); - } - const pollerOptions: OperationPollerOptions = { - ...pollerOptionsByPlatform[fn.platform], - pollerName: `delete-${fnName}`, - operationResourceName: res.name, - }; - await pollOperation(pollerOptions); - }; - return { - run, - data: fn, - operationType: "delete", - }; -} - -async function setConcurrency(name: string, concurrency: number) { - const err: any = null; - while (true) { - const service = await cloudrun.getService(name); - - delete service.status; - delete (service.spec.template.metadata as any).name; - service.spec.template.spec.containerConcurrency = concurrency; - - try { - await cloudrun.replaceService(name, service); - return; - } catch (err) { - // We might get a 409 if resourceVersion does not match - if (err.status !== 409) { - throw new FirebaseError("Unexpected error while trying to set concurrency", { - original: err, - }); - } - } - } -} - -export function functionsDeploymentHandler( - timer: DeploymentTimer, - errorHandler: ErrorHandler -): (task: DeploymentTask) => Promise { - return async (task: DeploymentTask) => { - let result; - const fnName = backend.functionName(task.data); - try { - timer.startTimer(fnName, task.operationType); - result = await task.run(); - helper.printSuccess(task.data, task.operationType); - const duration = timer.endTimer(fnName); - track("function_deploy_success", backend.triggerTag(task.data), duration); - } catch (err) { - if (err.original?.context?.response?.statusCode === 429) { - // Throw quota errors so that throttler retries them. - throw err; - } - errorHandler.record("error", fnName, task.operationType, err.original?.message || ""); - const duration = timer.endTimer(fnName); - track("function_deploy_failure", backend.triggerTag(task.data), duration); - } - return result; - }; -} - -/** - * Adds tasks to execute all function creates and updates for a region to the provided queue. - */ -export async function runRegionalFunctionDeployment( - params: TaskParams, - region: string, - regionalDeployment: RegionalFunctionChanges, - queue: Queue, void> -): Promise { - let resolveToken: (token: string | undefined) => void; - const getRealToken = new Promise((resolve) => (resolveToken = resolve)); - let firstToken = true; - const getToken = (): Promise => { - // The first time we get a token, it must be undefined. - // After that we'll get it from the operation promise. - if (firstToken) { - firstToken = false; - return Promise.resolve(undefined); - } - return getRealToken; - }; - - // On operation poll (for a V1 function) we may get a source token. If we get a source token or if - // GCF isn't returning one for some reason, resolve getRealToken to unblock deploys that are waiting - // for the source token. - // This function should not be run with a GCF version that doesn't support sourceTokens or else we will - // call resolveToken(undefined) - const onPollFn = (op: any) => { - if (op.metadata?.sourceToken || op.done) { - logger.debug(`Got sourceToken ${op.metadata.sourceToken} for region ${region}`); - resolveToken(op.metadata?.sourceToken); - } - }; - - const deploy = async (functionSpec: backend.FunctionSpec, createTask: Function) => { - functionSpec.labels = { - ...(functionSpec.labels || {}), - ...deploymentTool.labels(), - }; - let task: DeploymentTask; - // GCF v2 doesn't support tokens yet. If we were to pass onPoll to a GCFv2 function, then - // it would complete deployment and resolve the getRealToken promies as undefined. - if (functionSpec.platform == "gcfv2") { - task = createTask( - params, - functionSpec, - /* sourceToken= */ undefined, - /* onPoll= */ () => undefined - ); - } else { - const sourceToken = await getToken(); - task = createTask(params, functionSpec, sourceToken, onPollFn); - } - return queue.run(task); - }; - - const deploys: Promise[] = []; - deploys.push(...regionalDeployment.functionsToCreate.map((fn) => deploy(fn, createFunctionTask))); - deploys.push( - ...regionalDeployment.functionsToUpdate.map(async (update) => { - if (update.deleteAndRecreate) { - await queue.run(deleteFunctionTask(params, update.func)); - return deploy(update.func, createFunctionTask); - } else { - return deploy(update.func, updateFunctionTask); - } - }) - ); - - await Promise.all(deploys); - - const deletes = regionalDeployment.functionsToDelete.map(async (fn) => { - const task = deleteFunctionTask(params, fn); - await queue.run(task); - }); - await Promise.all(deletes); -} - -/** - * Cloud Scheduler Deployments Tasks and Handler - */ - -export function upsertScheduleTask( - params: TaskParams, - schedule: backend.ScheduleSpec, - appEngineLocation: string -): DeploymentTask { - const run = async () => { - const job = cloudscheduler.jobFromSpec(schedule, appEngineLocation); - await cloudscheduler.createOrReplaceJob(job); - }; - return { - run, - data: schedule, - operationType: "upsert schedule", - }; -} - -export function deleteScheduleTask( - params: TaskParams, - schedule: backend.ScheduleSpec, - appEngineLocation: string -): DeploymentTask { - const run = async () => { - const jobName = backend.scheduleName(schedule, appEngineLocation); - await cloudscheduler.deleteJob(jobName); - }; - return { - run, - data: schedule, - operationType: "delete schedule", - }; -} - -export function deleteTopicTask( - params: TaskParams, - topic: backend.PubSubSpec -): DeploymentTask { - const run = async () => { - const topicName = backend.topicName(topic); - await pubsub.deleteTopic(topicName); - }; - return { - run, - data: topic, - operationType: "delete topic", - }; -} - -export const schedulerDeploymentHandler = (errorHandler: ErrorHandler) => async ( - task: DeploymentTask -): Promise => { - try { - const result = await task.run(); - helper.printSuccess(task.data.targetService, task.operationType); - return result; - } catch (err) { - if (err.status === 429) { - // Throw quota errors so that throttler retries them. - throw err; - } else if (err.status !== 404) { - // Ignore 404 errors from scheduler calls since they may be deleted out of band. - errorHandler.record( - "error", - backend.functionName(task.data.targetService), - task.operationType, - err.message || "" - ); - } - } -}; diff --git a/src/deploy/functions/triggerRegionHelper.ts b/src/deploy/functions/triggerRegionHelper.ts index 4ad05f87a8a..04af642d7b9 100644 --- a/src/deploy/functions/triggerRegionHelper.ts +++ b/src/deploy/functions/triggerRegionHelper.ts @@ -1,45 +1,51 @@ import * as backend from "./backend"; import * as storage from "../../gcp/storage"; import { FirebaseError } from "../../error"; +import { logger } from "../../logger"; + +const noop = (): Promise => Promise.resolve(); + +const LOOKUP_BY_EVENT_TYPE: Record Promise> = { + "google.cloud.pubsub.topic.v1.messagePublished": noop, + "google.cloud.storage.object.v1.finalized": lookupBucketRegion, + "google.cloud.storage.object.v1.archived": lookupBucketRegion, + "google.cloud.storage.object.v1.deleted": lookupBucketRegion, + "google.cloud.storage.object.v1.metadataUpdated": lookupBucketRegion, +}; /** * Sets the trigger region to what we currently have deployed * @param want the list of function specs we want to deploy * @param have the list of function specs we have deployed */ -export async function setTriggerRegion( - want: backend.FunctionSpec[], - have: backend.FunctionSpec[] -): Promise { - for (const wantFn of want) { - if (wantFn.platform === "gcfv1" || !backend.isEventTrigger(wantFn.trigger)) { +export async function lookupMissingTriggerRegions(want: backend.Backend): Promise { + const regionLookups: Array> = []; + for (const ep of backend.allEndpoints(want)) { + if (ep.platform === "gcfv1" || !backend.isEventTriggered(ep) || ep.eventTrigger.region) { continue; } - const match = have.find(backend.sameFunctionName(wantFn))?.trigger as backend.EventTrigger; - if (match?.region) { - wantFn.trigger.region = match.region; - } else { - await setTriggerRegionFromTriggerType(wantFn.trigger); + const lookup = LOOKUP_BY_EVENT_TYPE[ep.eventTrigger.eventType]; + if (!lookup) { + logger.debug( + "Don't know how to look up trigger region for event type", + ep.eventTrigger.eventType, + ". Deploy will fail unless this event type is global" + ); + continue; } + regionLookups.push(lookup(ep)); } + await Promise.all(regionLookups); } -/** - * Sets the event trigger region by calling finding the region of the underlying resource - * @param trigger the event trigger with a missing region - * - * @throws {@link FirebaseError} when the region is not found - */ -async function setTriggerRegionFromTriggerType(trigger: backend.EventTrigger): Promise { - if (trigger.eventFilters.bucket) { - // GCS function - try { - trigger.region = ( - await storage.getBucket(trigger.eventFilters.bucket) - ).location.toLowerCase(); - } catch (err) { - throw new FirebaseError("Can't find the storage bucket region", { original: err }); - } +/** Sets a GCS event trigger's region to the region of its bucket. */ +async function lookupBucketRegion(endpoint: backend.EventTriggered): Promise { + try { + const bucket: { location: string } = await storage.getBucket( + endpoint.eventTrigger.eventFilters.bucket! + ); + endpoint.eventTrigger.region = bucket.location.toLowerCase(); + } catch (err) { + throw new FirebaseError("Can't find the storage bucket region", { original: err }); } - // TODO: add more trigger types } diff --git a/src/deploy/functions/validate.ts b/src/deploy/functions/validate.ts index ee4c8652baf..659a44df0a0 100644 --- a/src/deploy/functions/validate.ts +++ b/src/deploy/functions/validate.ts @@ -54,44 +54,3 @@ export function functionIdsAreValid(functions: { id: string; platform: string }[ throw new FirebaseError(msg); } } - -export function checkForInvalidChangeOfTrigger( - fn: backend.FunctionSpec, - exFn: backend.FunctionSpec -) { - const wantEventTrigger = backend.isEventTrigger(fn.trigger); - const haveEventTrigger = backend.isEventTrigger(exFn.trigger); - if (!wantEventTrigger && haveEventTrigger) { - throw new FirebaseError( - `[${getFunctionLabel( - fn - )}] Changing from a background triggered function to an HTTPS function is not allowed. Please delete your function and create a new one instead.` - ); - } - if (wantEventTrigger && !haveEventTrigger) { - throw new FirebaseError( - `[${getFunctionLabel( - fn - )}] Changing from an HTTPS function to an background triggered function is not allowed. Please delete your function and create a new one instead.` - ); - } - if (fn.platform == "gcfv2" && exFn.platform == "gcfv1") { - throw new FirebaseError( - `[${getFunctionLabel( - fn - )}] Upgrading from GCFv1 to GCFv2 is not yet supported. Please delete your old function or wait for this feature to be ready.` - ); - } - if (fn.platform == "gcfv1" && exFn.platform == "gcfv2") { - throw new FirebaseError( - `[${getFunctionLabel(fn)}] Functions cannot be downgraded from GCFv2 to GCFv1` - ); - } - if (exFn.labels?.["deployment-scheduled"] && !fn.labels?.["deployment-scheduled"]) { - throw new FirebaseError( - `[${getFunctionLabel( - fn - )}] Scheduled functions cannot be changed to event handler or HTTP functions` - ); - } -} diff --git a/src/functions/listFunctions.ts b/src/functions/listFunctions.ts deleted file mode 100644 index 1599115398c..00000000000 --- a/src/functions/listFunctions.ts +++ /dev/null @@ -1,15 +0,0 @@ -import * as backend from "../deploy/functions/backend"; -import { Context } from "../deploy/functions/args"; - -/** - * Lists all functions of the Firebase project in order - * @param context the Context of the project - * @returns a mapping that contains an array of {@link FunctionSpec} in order under the 'functions' key - */ -export async function listFunctions( - context: Context -): Promise<{ functions: backend.FunctionSpec[] }> { - const functionSpecs = (await backend.existingBackend(context, true)).cloudFunctions; - functionSpecs.sort(backend.compareFunctions); - return { functions: functionSpecs }; -} diff --git a/src/functionsConfig.ts b/src/functionsConfig.ts index bcd7dd840dc..e33109e46c5 100644 --- a/src/functionsConfig.ts +++ b/src/functionsConfig.ts @@ -58,6 +58,7 @@ export function idsToVarName(projectId: string, configId: string, varId: string) return _.join(["projects", projectId, "configs", configId, "variables", varId], "/"); } +// TODO(inlined): Yank and inline into Fabricator export function getAppEngineLocation(config: any): string { let appEngineLocation = config.locationId; if (appEngineLocation && appEngineLocation.match(/[^\d]$/)) { diff --git a/src/functionsDelete.ts b/src/functionsDelete.ts deleted file mode 100644 index c217e5ea67e..00000000000 --- a/src/functionsDelete.ts +++ /dev/null @@ -1,68 +0,0 @@ -import * as helper from "./deploy/functions/functionsDeployHelper"; -import { Queue } from "./throttler/queue"; -import * as tasks from "./deploy/functions/tasks"; -import { DeploymentTimer } from "./deploy/functions/deploymentTimer"; -import { ErrorHandler } from "./deploy/functions/errorHandler"; -import * as backend from "./deploy/functions/backend"; - -/** delete functions, schedules, and topics. */ -export async function deleteFunctions( - functionsToDelete: backend.FunctionSpec[], - schedulesToDelete: backend.ScheduleSpec[], - topicsToDelete: backend.PubSubSpec[], - appEngineLocation: string -): Promise { - const timer = new DeploymentTimer(); - const errorHandler = new ErrorHandler(); - const cloudFunctionsQueue = new Queue, void>({ - handler: tasks.functionsDeploymentHandler(timer, errorHandler), - retries: 30, - backoff: 10000, - concurrency: 40, - maxBackoff: 40000, - }); - const schedulerQueue = new Queue, void>({ - handler: tasks.schedulerDeploymentHandler(errorHandler), - }); - const topicQueue = new Queue, void>({ - handler: tasks.schedulerDeploymentHandler(errorHandler), - }); - - functionsToDelete.forEach((fn) => { - const taskParams = { - projectId: fn.project, - errorHandler, - }; - const deleteFunctionTask = tasks.deleteFunctionTask(taskParams, fn); - void cloudFunctionsQueue.run(deleteFunctionTask); - }); - schedulesToDelete.forEach((schedule) => { - const taskParams = { - projectId: schedule.project, - errorHandler, - }; - const deleteSchedulerTask = tasks.deleteScheduleTask(taskParams, schedule, appEngineLocation); - void schedulerQueue.run(deleteSchedulerTask); - }); - topicsToDelete.forEach((topic) => { - const taskParams = { - projectId: topic.project, - errorHandler, - }; - const deleteTopicTask = tasks.deleteTopicTask(taskParams, topic); - void topicQueue.run(deleteTopicTask); - }); - const queuePromises = [cloudFunctionsQueue.wait(), schedulerQueue.wait(), topicQueue.wait()]; - - cloudFunctionsQueue.close(); - schedulerQueue.close(); - topicQueue.close(); - cloudFunctionsQueue.process(); - schedulerQueue.process(); - topicQueue.process(); - - await Promise.all(queuePromises); - - helper.logAndTrackDeployStats(cloudFunctionsQueue, errorHandler); - errorHandler.printErrors(); -} diff --git a/src/gcp/cloudfunctions.ts b/src/gcp/cloudfunctions.ts index b024f48a6f4..3b0921db2ae 100644 --- a/src/gcp/cloudfunctions.ts +++ b/src/gcp/cloudfunctions.ts @@ -459,123 +459,6 @@ export async function listAllFunctions(projectId: string): Promise - * and code may have to call this method explicitly. - */ -export function specFromFunction(gcfFunction: CloudFunction): backend.FunctionSpec { - const [, project, , region, , id] = gcfFunction.name.split("/"); - let trigger: backend.EventTrigger | backend.HttpsTrigger; - let uri: string | undefined; - if (gcfFunction.httpsTrigger) { - trigger = {}; - uri = gcfFunction.httpsTrigger.url; - } else { - trigger = { - eventType: gcfFunction.eventTrigger!.eventType, - eventFilters: { - resource: gcfFunction.eventTrigger!.resource, - }, - retry: !!gcfFunction.eventTrigger!.failurePolicy?.retry, - }; - } - - if (!runtimes.isValidRuntime(gcfFunction.runtime)) { - logger.debug("GCFv1 function has a deprecated runtime:", JSON.stringify(gcfFunction, null, 2)); - } - - const cloudFunction: backend.FunctionSpec = { - platform: "gcfv1", - id, - project, - region, - trigger, - entryPoint: gcfFunction.entryPoint, - runtime: gcfFunction.runtime, - }; - if (uri) { - cloudFunction.uri = uri; - } - proto.copyIfPresent( - cloudFunction, - gcfFunction, - "serviceAccountEmail", - "availableMemoryMb", - "timeout", - "minInstances", - "maxInstances", - "vpcConnector", - "vpcConnectorEgressSettings", - "ingressSettings", - "labels", - "environmentVariables", - "sourceUploadUrl" - ); - - return cloudFunction; -} - -/** - * Convert the API agnostic FunctionSpec struct to a CloudFunction proto for the v1 API. - */ -export function functionFromSpec( - cloudFunction: backend.FunctionSpec, - sourceUploadUrl: string -): Omit { - if (cloudFunction.platform != "gcfv1") { - throw new FirebaseError( - "Trying to create a v1 CloudFunction with v2 API. This should never happen" - ); - } - - if (!runtimes.isValidRuntime(cloudFunction.runtime)) { - throw new FirebaseError( - "Failed internal assertion. Trying to deploy a new function with a deprecated runtime." + - " This should never happen" - ); - } - const gcfFunction: Omit = { - name: backend.functionName(cloudFunction), - sourceUploadUrl: sourceUploadUrl, - entryPoint: cloudFunction.entryPoint, - runtime: cloudFunction.runtime, - }; - - if (backend.isEventTrigger(cloudFunction.trigger)) { - gcfFunction.eventTrigger = { - eventType: cloudFunction.trigger.eventType, - resource: cloudFunction.trigger.eventFilters.resource, - // Service is unnecessary and deprecated - }; - - // For field masks to pick up a deleted failure policy we must inject an undefined - // when retry is false - gcfFunction.eventTrigger.failurePolicy = cloudFunction.trigger.retry - ? { retry: {} } - : undefined; - } else { - gcfFunction.httpsTrigger = {}; - } - - proto.copyIfPresent( - gcfFunction, - cloudFunction, - "serviceAccountEmail", - "timeout", - "availableMemoryMb", - "minInstances", - "maxInstances", - "vpcConnector", - "vpcConnectorEgressSettings", - "ingressSettings", - "labels", - "environmentVariables" - ); - - return gcfFunction; -} - /** * Converts a Cloud Function from the v1 API into a version-agnostic FunctionSpec struct. * This API exists outside the GCF namespace because GCF returns an Operation diff --git a/src/gcp/cloudfunctionsv2.ts b/src/gcp/cloudfunctionsv2.ts index f3ccd4c522d..fe8d1af2bc6 100644 --- a/src/gcp/cloudfunctionsv2.ts +++ b/src/gcp/cloudfunctionsv2.ts @@ -8,7 +8,6 @@ import * as backend from "../deploy/functions/backend"; import * as runtimes from "../deploy/functions/runtimes"; import * as proto from "./proto"; import * as utils from "../utils"; -import * as storage from "./storage"; export const API_VERSION = "v2alpha"; @@ -144,7 +143,9 @@ export interface OperationMetadata { export interface Operation { name: string; - metadata: OperationMetadata; + // Note: this field is always present, but not used in prod and is a PITA + // to add in tests. + metadata?: OperationMetadata; done: boolean; error?: { code: number; message: string; details: unknown }; response?: CloudFunction; @@ -323,160 +324,6 @@ export async function deleteFunction(cloudFunction: string): Promise } } -export function functionFromSpec(cloudFunction: backend.FunctionSpec, source: StorageSource) { - if (cloudFunction.platform != "gcfv2") { - throw new FirebaseError( - "Trying to create a v2 CloudFunction with v1 API. This should never happen" - ); - } - - if (!runtimes.isValidRuntime(cloudFunction.runtime)) { - throw new FirebaseError( - "Failed internal assertion. Trying to deploy a new function with a deprecated runtime." + - " This should never happen" - ); - } - - const gcfFunction: Omit = { - name: backend.functionName(cloudFunction), - buildConfig: { - runtime: cloudFunction.runtime, - entryPoint: cloudFunction.entryPoint, - source: { - storageSource: source, - }, - // We don't use build environment variables, - environmentVariables: {}, - }, - serviceConfig: {}, - }; - - proto.copyIfPresent( - gcfFunction.serviceConfig, - cloudFunction, - "availableMemoryMb", - "environmentVariables", - "vpcConnector", - "vpcConnectorEgressSettings", - "serviceAccountEmail", - "ingressSettings" - ); - proto.renameIfPresent( - gcfFunction.serviceConfig, - cloudFunction, - "timeoutSeconds", - "timeout", - proto.secondsFromDuration - ); - proto.renameIfPresent( - gcfFunction.serviceConfig, - cloudFunction, - "minInstanceCount", - "minInstances" - ); - proto.renameIfPresent( - gcfFunction.serviceConfig, - cloudFunction, - "maxInstanceCount", - "maxInstances" - ); - - if (backend.isEventTrigger(cloudFunction.trigger)) { - gcfFunction.eventTrigger = { - eventType: cloudFunction.trigger.eventType, - }; - if (cloudFunction.trigger.region) { - gcfFunction.eventTrigger.triggerRegion = cloudFunction.trigger.region; - } - if (gcfFunction.eventTrigger.eventType === PUBSUB_PUBLISH_EVENT) { - gcfFunction.eventTrigger.pubsubTopic = cloudFunction.trigger.eventFilters.resource; - } else { - gcfFunction.eventTrigger.eventFilters = []; - for (const [attribute, value] of Object.entries(cloudFunction.trigger.eventFilters)) { - gcfFunction.eventTrigger.eventFilters.push({ attribute, value }); - } - } - - if (cloudFunction.trigger.retry) { - logger.warn("Cannot set a retry policy on Cloud Function", cloudFunction.id); - } - } - proto.copyIfPresent(gcfFunction, cloudFunction, "labels"); - - return gcfFunction; -} - -export function specFromFunction(gcfFunction: CloudFunction): backend.FunctionSpec { - const [, project, , region, , id] = gcfFunction.name.split("/"); - let trigger: backend.EventTrigger | backend.HttpsTrigger; - if (gcfFunction.eventTrigger) { - trigger = { - eventType: gcfFunction.eventTrigger!.eventType, - eventFilters: {}, - retry: false, - }; - if (gcfFunction.eventTrigger!.triggerRegion) { - trigger.region = gcfFunction.eventTrigger.triggerRegion; - } - if (gcfFunction.eventTrigger.pubsubTopic) { - trigger.eventFilters.resource = gcfFunction.eventTrigger.pubsubTopic; - } else { - for (const { attribute, value } of gcfFunction.eventTrigger.eventFilters || []) { - trigger.eventFilters[attribute] = value; - } - } - } else { - trigger = {}; - } - - if (!runtimes.isValidRuntime(gcfFunction.buildConfig.runtime)) { - logger.debug("GCFv2 function has a deprecated runtime:", JSON.stringify(gcfFunction, null, 2)); - } - - const cloudFunction: backend.FunctionSpec = { - platform: "gcfv2", - id, - project, - region, - trigger, - entryPoint: gcfFunction.buildConfig.entryPoint, - runtime: gcfFunction.buildConfig.runtime, - uri: gcfFunction.serviceConfig.uri, - }; - proto.copyIfPresent( - cloudFunction, - gcfFunction.serviceConfig, - "serviceAccountEmail", - "availableMemoryMb", - "vpcConnector", - "vpcConnectorEgressSettings", - "ingressSettings", - "environmentVariables" - ); - proto.renameIfPresent( - cloudFunction, - gcfFunction.serviceConfig, - "timeout", - "timeoutSeconds", - proto.durationFromSeconds - ); - proto.renameIfPresent( - cloudFunction, - gcfFunction.serviceConfig, - "minInstances", - "minInstanceCount" - ); - proto.renameIfPresent( - cloudFunction, - gcfFunction.serviceConfig, - "maxInstances", - "maxInstanceCount" - ); - proto.copyIfPresent(cloudFunction, gcfFunction, "labels"); - - return cloudFunction; -} - export function functionFromEndpoint(endpoint: backend.Endpoint, source: StorageSource) { if (endpoint.platform != "gcfv2") { throw new FirebaseError( @@ -572,6 +419,12 @@ export function endpointFromFunction(gcfFunction: CloudFunction): backend.Endpoi trigger.eventTrigger.eventFilters[attribute] = value; } } + proto.renameIfPresent( + trigger.eventTrigger, + gcfFunction.eventTrigger, + "region", + "triggerRegion" + ); } else { trigger = { httpsTrigger: {} }; } diff --git a/src/gcp/cloudscheduler.ts b/src/gcp/cloudscheduler.ts index 607c4695f05..729c2c4aaa6 100644 --- a/src/gcp/cloudscheduler.ts +++ b/src/gcp/cloudscheduler.ts @@ -5,6 +5,7 @@ import { logger } from "../logger"; import * as api from "../api"; import * as backend from "../deploy/functions/backend"; import * as proto from "./proto"; +import { assertExhaustive } from "../functional"; const VERSION = "v1beta1"; const DEFAULT_TIME_ZONE = "America/Los_Angeles"; @@ -194,21 +195,35 @@ function isIdentical(job: Job, otherJob: Job): boolean { ); } -/** Converts a version agnostic ScheduleSpec to a CloudScheduler v1 Job. */ -export function jobFromSpec(schedule: backend.ScheduleSpec, appEngineLocation: string): Job { - const job: Job = { - name: backend.scheduleName(schedule, appEngineLocation), - schedule: schedule.schedule!, - }; - proto.copyIfPresent(job, schedule, "timeZone", "retryConfig"); - if (schedule.transport === "https") { - throw new FirebaseError("HTTPS transport for scheduled functions is not yet supported"); +/** Converts an Endpoint to a CloudScheduler v1 job */ +export function jobFromEndpoint( + endpoint: backend.Endpoint & backend.ScheduleTriggered, + appEngineLocation: string +): Job { + const job: Partial = {}; + if (endpoint.platform === "gcfv1") { + const id = backend.scheduleIdForFunction(endpoint); + const region = appEngineLocation; + job.name = `projects/${endpoint.project}/locations/${region}/jobs/${id}`; + job.pubsubTarget = { + topicName: `projects/${endpoint.project}/topics/${id}`, + attributes: { + scheduled: "true", + }, + }; + } else if (endpoint.platform === "gcfv2") { + // NB: We should figure out whether there's a good service account we can use + // to get ODIC tokens from while invoking the function. Hopefully either + // CloudScheduler has an account we can use or we can use the default compute + // account credentials (it's a project editor, so it should have permissions + // to invoke a function and editor deployers should have permission to actAs + // it) + throw new FirebaseError("Do not know how to create a scheduled GCFv2 function"); + } else { + assertExhaustive(endpoint.platform); } - job.pubsubTarget = { - topicName: backend.topicName(schedule), - attributes: { - scheduled: "true", - }, - }; - return job; + proto.copyIfPresent(job, endpoint.scheduleTrigger, "schedule", "retryConfig", "timeZone"); + + // TypeScript compiler isn't noticing that name is defined in all code paths. + return job as Job; } diff --git a/src/gcp/pubsub.ts b/src/gcp/pubsub.ts index 86a6b5e7e3d..eb6d2617739 100644 --- a/src/gcp/pubsub.ts +++ b/src/gcp/pubsub.ts @@ -53,13 +53,6 @@ export async function deleteTopic(name: string): Promise { await client.delete(name); } -export function topicFromSpec(spec: backend.PubSubSpec): Topic { - return { - name: backend.topicName(spec), - labels: { ...spec.labels }, - }; -} - // NOTE: We currently don't need or have specFromTopic. // backend.ExistingBackend infers actual topics by the fact that it sees a function // with a scheduled annotation. This may not be good enough when we're diff --git a/src/test/deploy/functions/backend.spec.ts b/src/test/deploy/functions/backend.spec.ts index 9e2d1424b92..0cd049fc4bd 100644 --- a/src/test/deploy/functions/backend.spec.ts +++ b/src/test/deploy/functions/backend.spec.ts @@ -16,10 +16,9 @@ describe("Backend", () => { project: "project", }; - const FUNCTION_SPEC: backend.FunctionSpec = { + const ENDPOINT: Omit = { platform: "gcfv1", ...FUNCTION_NAME, - trigger: {}, entryPoint: "function", runtime: "nodejs16", }; @@ -67,33 +66,7 @@ describe("Backend", () => { status: "ACTIVE", }; - const SCHEDULE: backend.ScheduleSpec = { - id: backend.scheduleIdForFunction(FUNCTION_SPEC), - project: "project", - schedule: "every 1 minutes", - transport: "pubsub", - targetService: FUNCTION_NAME, - }; - - const TOPIC: backend.PubSubSpec = { - id: backend.scheduleIdForFunction(FUNCTION_SPEC), - project: "project", - labels: { deployment: "firebase-schedule" }, - targetService: FUNCTION_NAME, - }; - describe("Helper functions", () => { - it("isEventTrigger", () => { - const httpsTrigger: backend.HttpsTrigger = {}; - expect(backend.isEventTrigger(httpsTrigger)).to.be.false; - const eventTrigger: backend.EventTrigger = { - eventType: "google.pubsub.topic.publish", - eventFilters: {}, - retry: false, - }; - expect(backend.isEventTrigger(eventTrigger)).to.be.true; - }); - it("isEmptyBackend", () => { expect(backend.isEmptyBackend(backend.empty())).to.be.true; expect( @@ -102,150 +75,13 @@ describe("Backend", () => { requiredAPIs: { foo: "foo.googleapis.com" }, }) ).to.be.false; - expect( - backend.isEmptyBackend({ - ...backend.empty(), - cloudFunctions: [FUNCTION_SPEC], - }) - ).to.be.false; - expect( - backend.isEmptyBackend({ - ...backend.empty(), - schedules: [SCHEDULE], - }) - ).to.be.false; - expect( - backend.isEmptyBackend({ - ...backend.empty(), - topics: [TOPIC], - }) - ).to.be.false; + expect(backend.isEmptyBackend(backend.of({ ...ENDPOINT, httpsTrigger: {} }))); }); it("names", () => { - expect(backend.functionName(FUNCTION_SPEC)).to.equal( + expect(backend.functionName(ENDPOINT)).to.equal( "projects/project/locations/region/functions/id" ); - expect(backend.scheduleName(SCHEDULE, "appEngineRegion")).to.equal( - "projects/project/locations/appEngineRegion/jobs/firebase-schedule-id-region" - ); - expect(backend.topicName(TOPIC)).to.equal( - "projects/project/topics/firebase-schedule-id-region" - ); - }); - - it("sameFunctionName", () => { - const matcher = backend.sameFunctionName(FUNCTION_SPEC); - expect(matcher(FUNCTION_SPEC)).to.be.true; - expect(matcher({ ...FUNCTION_SPEC, id: "other" })).to.be.false; - expect(matcher({ ...FUNCTION_SPEC, region: "other" })).to.be.false; - expect(matcher({ ...FUNCTION_SPEC, project: "other" })).to.be.false; - }); - }); - - describe("triggerTag", () => { - it("detects v1.https", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv1", - entryPoint: "id", - runtime: "node14", - trigger: {}, - }) - ).to.equal("v1.https"); - }); - - it("detects v2.https", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv2", - entryPoint: "id", - runtime: "node14", - trigger: {}, - }) - ).to.equal("v2.https"); - }); - - it("detects v1.callable", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv1", - entryPoint: "id", - runtime: "node14", - trigger: {}, - labels: { - "deployment-callable": "true", - }, - }) - ).to.equal("v1.callable"); - }); - - it("detects v2.callable", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv2", - entryPoint: "id", - runtime: "node14", - trigger: {}, - labels: { - "deployment-callable": "true", - }, - }) - ).to.equal("v2.callable"); - }); - - it("detects v1.scheduled", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv1", - entryPoint: "id", - runtime: "node14", - trigger: { - eventType: "google.pubsub.topoic.publish", - eventFilters: {}, - retry: false, - }, - labels: { - "deployment-scheduled": "true", - }, - }) - ).to.equal("v1.scheduled"); - }); - - it("detects v2.scheduled", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv2", - entryPoint: "id", - runtime: "node14", - trigger: {}, - labels: { - "deployment-scheduled": "true", - }, - }) - ).to.equal("v2.scheduled"); - }); - - it("detects others", () => { - expect( - backend.triggerTag({ - ...FUNCTION_NAME, - platform: "gcfv2", - entryPoint: "id", - runtime: "node14", - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: {}, - retry: false, - }, - }) - ).to.equal("google.pubsub.topic.publish"); }); }); @@ -312,10 +148,7 @@ describe("Backend", () => { }); const have = await backend.existingBackend(newContext()); - expect(have).to.deep.equal({ - ...backend.empty(), - cloudFunctions: [FUNCTION_SPEC], - }); + expect(have).to.deep.equal(backend.of({ ...ENDPOINT, httpsTrigger: {} })); }); it("should throw an error if v2 list api throws an error", async () => { @@ -350,10 +183,42 @@ describe("Backend", () => { const have = await backend.existingBackend(newContext()); - expect(have).to.deep.equal({ - ...backend.empty(), - cloudFunctions: [FUNCTION_SPEC], + expect(have).to.deep.equal(backend.of({ ...ENDPOINT, httpsTrigger: {} })); + }); + + it("should throw an error if v2 list api throws an error", async () => { + previews.functionsv2 = true; + listAllFunctions.onFirstCall().resolves({ + functions: [], + unreachable: [], }); + listAllFunctionsV2.throws( + new FirebaseError("HTTP Error: 500, Internal Error", { status: 500 }) + ); + + await expect(backend.existingBackend(newContext())).to.be.rejectedWith( + "HTTP Error: 500, Internal Error" + ); + }); + + it("should read v1 functions only when user is not allowlisted for v2", async () => { + previews.functionsv2 = true; + listAllFunctions.onFirstCall().resolves({ + functions: [ + { + ...HAVE_CLOUD_FUNCTION, + httpsTrigger: {}, + }, + ], + unreachable: [], + }); + listAllFunctionsV2.throws( + new FirebaseError("HTTP Error: 404, Method not found", { status: 404 }) + ); + + const have = await backend.existingBackend(newContext()); + + expect(have).to.deep.equal(backend.of({ ...ENDPOINT, httpsTrigger: {} })); }); it("should read v2 functions when enabled", async () => { @@ -368,16 +233,14 @@ describe("Backend", () => { }); const have = await backend.existingBackend(newContext()); - expect(have).to.deep.equal({ - ...backend.empty(), - cloudFunctions: [ - { - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: HAVE_CLOUD_FUNCTION_V2.serviceConfig.uri, - }, - ], - }); + expect(have).to.deep.equal( + backend.of({ + ...ENDPOINT, + platform: "gcfv2", + httpsTrigger: {}, + uri: HAVE_CLOUD_FUNCTION_V2.serviceConfig.uri, + }) + ); }); it("should deduce features of scheduled functions", async () => { @@ -387,7 +250,7 @@ describe("Backend", () => { ...HAVE_CLOUD_FUNCTION, eventTrigger: { eventType: "google.pubsub.topic.publish", - resource: backend.topicName(TOPIC), + resource: "projects/project/topics/topic", }, labels: { "deployment-scheduled": "true", @@ -397,38 +260,13 @@ describe("Backend", () => { unreachable: [], }); const have = await backend.existingBackend(newContext()); - const functionSpec: backend.FunctionSpec = { - ...FUNCTION_SPEC, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: backend.topicName(TOPIC), - }, - retry: false, - }, + const want = backend.of({ + ...ENDPOINT, + scheduleTrigger: {}, labels: { "deployment-scheduled": "true", }, - }; - const schedule: backend.ScheduleSpec = { - ...SCHEDULE, - targetService: FUNCTION_NAME, - }; - // We don't actually make an API call to cloud scheduler, - // so we don't have the real schedule. - delete schedule.schedule; - - const want = { - ...backend.empty(), - cloudFunctions: [functionSpec], - schedules: [schedule], - topics: [ - { - ...TOPIC, - targetService: FUNCTION_NAME, - }, - ], - }; + }); expect(have).to.deep.equal(want); }); @@ -509,10 +347,7 @@ describe("Backend", () => { functions: [], unreachable: ["region"], }); - const want = { - ...backend.empty(), - cloudFunctions: [FUNCTION_SPEC], - }; + const want = backend.of({ ...ENDPOINT, httpsTrigger: {} }); await expect(backend.checkAvailability(newContext(), want)).to.eventually.be.rejectedWith( FirebaseError, /The following Cloud Functions regions are currently unreachable:/ @@ -529,15 +364,11 @@ describe("Backend", () => { functions: [], unreachable: ["region"], }); - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...FUNCTION_SPEC, - platform: "gcfv2", - }, - ], - }; + const want: backend.Backend = backend.of({ + ...ENDPOINT, + platform: "gcfv2", + httpsTrigger: {}, + }); await expect(backend.checkAvailability(newContext(), want)).to.eventually.be.rejectedWith( FirebaseError, @@ -556,10 +387,7 @@ describe("Backend", () => { unreachable: ["us-central1"], }); - const want = { - ...backend.empty(), - cloudFunctions: [FUNCTION_SPEC], - }; + const want = backend.of({ ...ENDPOINT, httpsTrigger: {} }); await backend.checkAvailability(newContext(), want); expect(listAllFunctions).to.have.been.called; @@ -578,15 +406,7 @@ describe("Backend", () => { unreachable: [], }); - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...FUNCTION_SPEC, - platform: "gcfv2", - }, - ], - }; + const want: backend.Backend = backend.of({ ...ENDPOINT, httpsTrigger: {} }); await backend.checkAvailability(newContext(), want); expect(listAllFunctions).to.have.been.called; @@ -600,18 +420,18 @@ describe("Backend", () => { const fnMembers = { project: "project", runtime: "nodejs14", - trigger: {}, + httpsTrigger: {}, }; it("should compare different platforms", () => { - const left: backend.FunctionSpec = { + const left: backend.Endpoint = { id: "v1", region: "us-central1", platform: "gcfv1", entryPoint: "v1", ...fnMembers, }; - const right: backend.FunctionSpec = { + const right: backend.Endpoint = { id: "v2", region: "us-west1", platform: "gcfv2", @@ -624,14 +444,14 @@ describe("Backend", () => { }); it("should compare different regions, same platform", () => { - const left: backend.FunctionSpec = { + const left: backend.Endpoint = { id: "v1", region: "us-west1", platform: "gcfv1", entryPoint: "v1", ...fnMembers, }; - const right: backend.FunctionSpec = { + const right: backend.Endpoint = { id: "newV1", region: "us-central1", platform: "gcfv1", @@ -644,14 +464,14 @@ describe("Backend", () => { }); it("should compare different ids, same platform & region", () => { - const left: backend.FunctionSpec = { + const left: backend.Endpoint = { id: "v1", region: "us-central1", platform: "gcfv1", entryPoint: "v1", ...fnMembers, }; - const right: backend.FunctionSpec = { + const right: backend.Endpoint = { id: "newV1", region: "us-central1", platform: "gcfv1", @@ -664,14 +484,14 @@ describe("Backend", () => { }); it("should compare same ids", () => { - const left: backend.FunctionSpec = { + const left: backend.Endpoint = { id: "v1", region: "us-central1", platform: "gcfv1", entryPoint: "v1", ...fnMembers, }; - const right: backend.FunctionSpec = { + const right: backend.Endpoint = { id: "v1", region: "us-central1", platform: "gcfv1", @@ -682,4 +502,70 @@ describe("Backend", () => { expect(backend.compareFunctions(left, right)).to.eq(0); }); }); + + describe("comprehension helpers", () => { + const endpointUS: backend.Endpoint = { + id: "endpointUS", + project: "project", + region: "us-west1", + platform: "gcfv2", + runtime: "nodejs16", + entryPoint: "ep", + httpsTrigger: {}, + }; + + const endpointEU: backend.Endpoint = { + ...endpointUS, + id: "endpointEU", + region: "europe-west1", + }; + + const bkend: backend.Backend = { + ...backend.empty(), + }; + bkend.endpoints[endpointUS.region] = { [endpointUS.id]: endpointUS }; + bkend.endpoints[endpointEU.region] = { [endpointEU.id]: endpointEU }; + + it("allEndpoints", () => { + const have = backend.allEndpoints(bkend).sort(backend.compareFunctions); + const want = [endpointUS, endpointEU].sort(backend.compareFunctions); + expect(have).to.deep.equal(want); + }); + + it("matchingBackend", () => { + const have = backend.matchingBackend(bkend, (fn) => fn.id === "endpointUS"); + const want: backend.Backend = { + ...backend.empty(), + endpoints: { + [endpointUS.region]: { + [endpointUS.id]: endpointUS, + }, + }, + }; + expect(have).to.deep.equal(want); + }); + + it("someEndpoint", () => { + expect(backend.someEndpoint(bkend, (fn) => fn.id === "endpointUS")).to.be.true; + expect(backend.someEndpoint(bkend, (fn) => fn.id === "missing")).to.be.false; + }); + + it("regionalEndpoints", () => { + const have = backend.regionalEndpoints(bkend, endpointUS.region); + const want = [endpointUS]; + expect(have).to.deep.equal(want); + }); + + it("hasEndpoint", () => { + const smallBackend = backend.matchingBackend(bkend, (fn) => fn.id === "endpointUS"); + expect(backend.hasEndpoint(smallBackend)(endpointUS)).to.be.true; + expect(backend.hasEndpoint(smallBackend)(endpointEU)).to.be.false; + }); + + it("missingEndpoint", () => { + const smallBackend = backend.matchingBackend(bkend, (fn) => fn.id === "endpointUS"); + expect(backend.missingEndpoint(smallBackend)(endpointUS)).to.be.false; + expect(backend.missingEndpoint(smallBackend)(endpointEU)).to.be.true; + }); + }); }); diff --git a/src/test/deploy/functions/containerCleaner.spec.ts b/src/test/deploy/functions/containerCleaner.spec.ts index 4dbcf38b2e6..7b32aa0d4a7 100644 --- a/src/test/deploy/functions/containerCleaner.spec.ts +++ b/src/test/deploy/functions/containerCleaner.spec.ts @@ -1,5 +1,4 @@ import { expect } from "chai"; -import _ from "lodash"; import * as sinon from "sinon"; import * as backend from "../../../deploy/functions/backend"; @@ -129,14 +128,14 @@ describe("DockerHelper", () => { }); describe("ContainerRegistryCleaner", () => { - const FUNCTION: backend.FunctionSpec = { + const ENDPOINT: backend.Endpoint = { platform: "gcfv1", project: "project", region: "us-central1", id: "id", entryPoint: "function", runtime: "nodejs16", - trigger: {}, + httpsTrigger: {}, }; // The first function in a region has subdirectories "cache/" and "worker/" in it. @@ -163,7 +162,7 @@ describe("ContainerRegistryCleaner", () => { }) ); - await cleaner.cleanupFunction(FUNCTION); + await cleaner.cleanupFunction(ENDPOINT); expect(stub.rm).to.have.been.calledOnceWith("project/gcf/us-central1/uuid"); }); @@ -192,7 +191,7 @@ describe("ContainerRegistryCleaner", () => { }) ); - await cleaner.cleanupFunction(FUNCTION); + await cleaner.cleanupFunction(ENDPOINT); expect(stub.rm).to.have.been.calledOnceWith("project/gcf/us-central1/uuid"); }); @@ -220,7 +219,7 @@ describe("ContainerRegistryCleaner", () => { }) ); - await cleaner.cleanupFunction(FUNCTION); + await cleaner.cleanupFunction(ENDPOINT); expect(stub.rm).to.not.have.been.called; }); diff --git a/src/test/deploy/functions/deploymentPlanner.spec.ts b/src/test/deploy/functions/deploymentPlanner.spec.ts deleted file mode 100644 index bfb07203964..00000000000 --- a/src/test/deploy/functions/deploymentPlanner.spec.ts +++ /dev/null @@ -1,476 +0,0 @@ -import { expect } from "chai"; - -import * as backend from "../../../deploy/functions/backend"; -import * as deploymentPlanner from "../../../deploy/functions/deploymentPlanner"; -import * as deploymentTool from "../../../deploymentTool"; -import * as gcfv2 from "../../../gcp/cloudfunctionsv2"; - -describe("deploymentPlanner", () => { - const CLOUD_FUNCTION: Omit = { - platform: "gcfv1", - project: "project", - runtime: "nodejs16", - entryPoint: "function", - trigger: {}, - }; - - const DEPLOYED_BY_CLI = { - labels: deploymentTool.labels(), - }; - - function func(id: string, region: string) { - return { - ...CLOUD_FUNCTION, - id, - region, - }; - } - - function schedule(schedule: string, target: backend.FunctionSpec): backend.ScheduleSpec { - return { - id: backend.scheduleIdForFunction(target), - project: "p", - schedule, - transport: "pubsub", - targetService: target, - }; - } - - function topic(target: backend.FunctionSpec): backend.PubSubSpec { - return { - id: backend.scheduleIdForFunction(target), - project: "p", - targetService: target, - }; - } - - describe("utility functions", () => { - it("should partition functions by region", () => { - const r1f1 = func("r1f1", "us-central1"); - const r1f2 = func("r1f2", "us-central1"); - const r2f1 = func("r2f1", "asia-northeast1"); - const byRegion = deploymentPlanner.functionsByRegion([r1f1, r1f2, r2f1]); - - expect(Object.keys(byRegion).sort()).to.deep.equal(["us-central1", "asia-northeast1"].sort()); - expect(byRegion["us-central1"].sort()).to.deep.equal([r1f1, r1f2].sort()); - expect(byRegion["asia-northeast1"]).to.deep.equal([r2f1]); - }); - - it("should iterate all regions", () => { - const have = deploymentPlanner.functionsByRegion([ - func("r1f1", "us-central1"), - func("r2f1", "asia-northeast1"), - ]); - const want = deploymentPlanner.functionsByRegion([ - func("r1f1", "us-central1"), - func("r3f1", "europe-west1"), - ]); - const regions = deploymentPlanner.allRegions(have, want); - expect(regions.sort()).to.deep.equal( - ["us-central1", "asia-northeast1", "europe-west1"].sort() - ); - }); - }); - - describe("createDeploymentPlan", () => { - it("should put new functions into functionsToCreate", () => { - const r1f1 = func("c", "us-east1"); - const r1f2 = func("d", "us-east1"); - const r2f1 = func("d", "us-west1"); - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [r1f1, r1f2, r2f1], - }; - const have: backend.Backend = backend.empty(); - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [r1f1, r1f2], - functionsToUpdate: [], - functionsToDelete: [], - }, - "us-west1": { - functionsToCreate: [r2f1], - functionsToUpdate: [], - functionsToDelete: [], - }, - }, - topicsToDelete: [], - schedulesToUpsert: [], - schedulesToDelete: [], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should put existing functions being deployed into functionsToUpdate", () => { - const r1f1 = func("c", "us-east1"); - const r1f2 = func("d", "us-east1"); - const r2f1 = func("d", "us-west1"); - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [r1f1, r1f2, r2f1], - }; - const have: backend.Backend = backend.empty(); - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [], - functionsToUpdate: [ - { - func: r1f1, - deleteAndRecreate: false, - }, - { - func: r1f2, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [], - }, - "us-west1": { - functionsToCreate: [], - functionsToUpdate: [ - { - func: r2f1, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [], - }, - }, - topicsToDelete: [], - schedulesToUpsert: [], - schedulesToDelete: [], - }; - }); - - it("should delete existing functions not in local code, only if they were deployed via CLI", () => { - const pantheonFunc = func("c", "us-east1"); - const cf3FuncR1 = { - ...CLOUD_FUNCTION, - ...DEPLOYED_BY_CLI, - id: "cf3", - region: "us-east1", - }; - const cf3FuncR2 = { - ...CLOUD_FUNCTION, - ...DEPLOYED_BY_CLI, - id: "cf3", - region: "us-west1", - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [pantheonFunc, cf3FuncR1, cf3FuncR2], - }; - const want = backend.empty(); - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [], - functionsToUpdate: [], - functionsToDelete: [cf3FuncR1], - }, - "us-west1": { - functionsToCreate: [], - functionsToUpdate: [], - functionsToDelete: [cf3FuncR2], - }, - }, - topicsToDelete: [], - schedulesToUpsert: [], - schedulesToDelete: [], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should create schedules for new or updated scheduled functions", () => { - // Existing function, existing schedule - const r1f1 = func("c", "us-east1"); - // New function, HTTPS schedule - const r1f2 = func("d", "us-east1"); - // Existing function, previously not scheduled - const r2f1 = func("d", "us-west1"); - const r1sched1 = schedule("every 20 minutes", r1f1); - const r1sched2 = schedule("every 5 minutes", r1f2); - const r2sched1 = schedule("every 5 minutes", r2f1); - const topic1 = topic(r1f1); - // Schedule 2 uses HTTP transport: - r1sched2.transport = "https"; - const topic2 = topic(r2f1); - - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [r1f1, r1f2, r2f1], - schedules: [r1sched1, r1sched2, r2sched1], - topics: [topic1, topic2], - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [r1f1, r2f1], - schedules: [r1sched1], - topics: [topic1], - }; - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [r1f2], - functionsToUpdate: [ - { - func: r1f1, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [], - }, - "us-west1": { - functionsToCreate: [], - functionsToUpdate: [ - { - func: r2f1, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [], - }, - }, - schedulesToUpsert: [r1sched1, r1sched2, r2sched1], - schedulesToDelete: [], - topicsToDelete: [], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should delete and recreate v2 pubsub functions with changes in topics", () => { - const f1: backend.FunctionSpec = { - ...func("pubsub", "us-west1"), - ...DEPLOYED_BY_CLI, - platform: "gcfv2", - trigger: { - eventType: gcfv2.PUBSUB_PUBLISH_EVENT, - eventFilters: { - resource: "projects/aproject/topics/atopic", - }, - retry: false, - }, - }; - const f2: backend.FunctionSpec = { - ...func("pubsub", "us-west1"), - ...DEPLOYED_BY_CLI, - platform: "gcfv2", - trigger: { - eventType: gcfv2.PUBSUB_PUBLISH_EVENT, - eventFilters: { - resource: "projects/aproject/topics/anothertopic", - }, - retry: false, - }, - }; - - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [f2], - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [f1], - }; - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-west1": { - functionsToCreate: [], - functionsToUpdate: [ - { - func: f2, - deleteAndRecreate: true, - }, - ], - functionsToDelete: [], - }, - }, - schedulesToUpsert: [], - schedulesToDelete: [], - topicsToDelete: [], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should delete schedules if the function is deleted or updated to another type", () => { - const f1 = { ...func("c", "us-east1"), ...DEPLOYED_BY_CLI }; - const f2 = { ...func("d", "us-east1"), ...DEPLOYED_BY_CLI }; - const schedule1 = schedule("every 1 minutes", f1); - const schedule2 = schedule("every 1 minutes", f2); - const topic1 = topic(f1); - const topic2 = topic(f2); - - // Deployment plan: delete f1 and the schedule from f2 - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [f2], - topics: [topic2], - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [f1, f2], - schedules: [schedule1, schedule2], - topics: [topic1, topic2], - }; - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [], - functionsToUpdate: [ - { - func: f2, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [f1], - }, - }, - schedulesToUpsert: [], - schedulesToDelete: [schedule1, schedule2], - topicsToDelete: [topic1], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should only create, update, and delete matching functions if filters are passed in.", () => { - // want - const group1func1 = func("group-a", "us-east1"); - const group1func2 = func("group-d", "us-east1"); - const group2func1 = func("differentGroup-a", "us-east1"); - const group1schedule1 = schedule("every 1 minutes", group1func1); - const group1topic1 = schedule("every 1 minutes", group1func1); - const group2schedule1 = schedule("every 1 minutes", group2func1); - const group2topic1 = topic(group2func1); - - // have: - // group1func1 - const group1func3 = { ...func("group-c", "us-east1"), ...DEPLOYED_BY_CLI }; - const group1func4 = { ...func("group-c", "us-east1"), ...DEPLOYED_BY_CLI }; - const group2func2 = { ...func("differentGroup-b", "us-east1"), ...DEPLOYED_BY_CLI }; - const group1schedule3 = schedule("every 1 minutes", group1func3); - const group2schedule2 = schedule("every 1 minutes", group2func2); - const group1topic3 = topic(group1func3); - const group2topic2 = topic(group2func2); - - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [group1func1, group1func2, group2func1], - schedules: [group1schedule1, group2schedule1], - topics: [group1topic1, group2topic1], - }; - - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [group1func1, group1func3, group1func4, group2func2], - schedules: [group1schedule1, group1schedule3, group2schedule2], - topics: [group1topic1, group1topic3, group2topic2], - }; - - const filters = [["group"]]; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - - const expected: deploymentPlanner.DeploymentPlan = { - regionalDeployments: { - "us-east1": { - functionsToCreate: [group1func2], - functionsToUpdate: [ - { - func: group1func1, - deleteAndRecreate: false, - }, - ], - functionsToDelete: [group1func3, group1func4], - }, - }, - schedulesToUpsert: [group1schedule1], - schedulesToDelete: [group1schedule3], - topicsToDelete: [group1topic3], - }; - expect(deploymentPlan).to.deep.equal(expected); - }); - - it("should preserve environment variables", () => { - const wantSpec = { - ...func("a", "us-west1"), - environmentVariables: { BAR: "baz" }, - }; - const haveSpec = { - ...func("a", "us-west1"), - environmentVariables: { FOO: "bar" }, - }; - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [wantSpec], - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [haveSpec], - }; - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { filters }); - expect( - deploymentPlan.regionalDeployments["us-west1"].functionsToUpdate.map( - (spec) => spec.func.environmentVariables - ) - ).to.be.deep.equals([{ FOO: "bar", BAR: "baz" }]); - }); - - it("should overwrite environment variables when specified", () => { - const wantSpec = { - ...func("a", "us-west1"), - environmentVariables: { BAR: "baz" }, - }; - const haveSpec = { - ...func("a", "us-west1"), - environmentVariables: { FOO: "bar" }, - }; - const want: backend.Backend = { - ...backend.empty(), - cloudFunctions: [wantSpec], - }; - const have: backend.Backend = { - ...backend.empty(), - cloudFunctions: [haveSpec], - }; - const filters: string[][] = []; - - const deploymentPlan = deploymentPlanner.createDeploymentPlan(want, have, { - filters, - overwriteEnvs: true, - }); - expect( - deploymentPlan.regionalDeployments["us-west1"].functionsToUpdate.map( - (spec) => spec.func.environmentVariables - ) - ).to.be.deep.equals([{ BAR: "baz" }]); - }); - }); -}); diff --git a/src/test/deploy/functions/functionsDeployHelper.spec.ts b/src/test/deploy/functions/functionsDeployHelper.spec.ts index ae198530698..469ff205884 100644 --- a/src/test/deploy/functions/functionsDeployHelper.spec.ts +++ b/src/test/deploy/functions/functionsDeployHelper.spec.ts @@ -5,52 +5,51 @@ import * as helper from "../../../deploy/functions/functionsDeployHelper"; import { Options } from "../../../options"; describe("functionsDeployHelper", () => { - const CLOUD_FUNCTION: Omit = { + const ENDPOINT: Omit = { platform: "gcfv1", project: "project", region: "us-central1", runtime: "nodejs16", entryPoint: "function", - trigger: {}, }; describe("functionMatchesGroup", () => { it("should match empty filters", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesGroup(func, [])).to.be.true; }); it("should match full names", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesGroup(func, ["id"])).to.be.true; }); it("should match group prefixes", () => { - const func = { ...CLOUD_FUNCTION, id: "group-subgroup-func" }; + const func = { ...ENDPOINT, id: "group-subgroup-func" }; expect(helper.functionMatchesGroup(func, ["group", "subgroup", "func"])).to.be.true; expect(helper.functionMatchesGroup(func, ["group", "subgroup"])).to.be.true; expect(helper.functionMatchesGroup(func, ["group"])).to.be.true; }); it("should exclude functions that don't match", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesGroup(func, ["group"])).to.be.false; }); }); describe("functionMatchesAnyGroup", () => { it("should match empty filters", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesAnyGroup(func, [[]])).to.be.true; }); it("should match against one filter", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesAnyGroup(func, [["id"], ["group"]])).to.be.true; }); it("should exclude functions that don't match", () => { - const func = { ...CLOUD_FUNCTION, id: "id" }; + const func = { ...ENDPOINT, id: "id" }; expect(helper.functionMatchesAnyGroup(func, [["group"], ["other-group"]])).to.be.false; }); }); diff --git a/src/test/deploy/functions/prepare.spec.ts b/src/test/deploy/functions/prepare.spec.ts new file mode 100644 index 00000000000..36f32991d80 --- /dev/null +++ b/src/test/deploy/functions/prepare.spec.ts @@ -0,0 +1,96 @@ +import { expect } from "chai"; + +import * as backend from "../../../deploy/functions/backend"; +import * as prepare from "../../../deploy/functions/prepare"; + +describe("prepare", () => { + describe("inferDetailsFromExisting", () => { + const ENDPOINT_BASE: Omit = { + platform: "gcfv2", + id: "id", + region: "region", + project: "project", + entryPoint: "entry", + runtime: "nodejs16", + }; + + const ENDPOINT: backend.Endpoint = { + ...ENDPOINT_BASE, + httpsTrigger: {}, + }; + + it("merges env vars if .env is not used", () => { + const oldE = { + ...ENDPOINT, + environmentVariables: { + foo: "old value", + old: "value", + }, + }; + const newE = { + ...ENDPOINT, + environmentVariables: { + foo: "new value", + new: "value", + }, + }; + + prepare.inferDetailsFromExisting(backend.of(newE), backend.of(oldE), /* usedDotenv= */ false); + + expect(newE.environmentVariables).to.deep.equals({ + old: "value", + new: "value", + foo: "new value", + }); + }); + + it("overwrites env vars if .env is used", () => { + const oldE = { + ...ENDPOINT, + environmentVariables: { + foo: "old value", + old: "value", + }, + }; + const newE = { + ...ENDPOINT, + environmentVariables: { + foo: "new value", + new: "value", + }, + }; + + prepare.inferDetailsFromExisting(backend.of(newE), backend.of(oldE), /* usedDotEnv= */ true); + + expect(newE.environmentVariables).to.deep.equals({ + new: "value", + foo: "new value", + }); + }); + + it("can noop when there is no prior endpoint", () => { + const e = { ...ENDPOINT }; + prepare.inferDetailsFromExisting(backend.of(e), backend.of(), /* usedDotEnv= */ false); + expect(e).to.deep.equal(ENDPOINT); + }); + + it("can fill in regions from last deploy", () => { + const want: backend.Endpoint = { + ...ENDPOINT_BASE, + eventTrigger: { + eventType: "google.cloud.storage.object.v1.finalized", + eventFilters: { + bucket: "bucket", + }, + retry: false, + }, + }; + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + const have: backend.Endpoint & backend.EventTriggered = JSON.parse(JSON.stringify(want)); + have.eventTrigger.region = "us"; + + prepare.inferDetailsFromExisting(backend.of(want), backend.of(have), /* usedDotEnv= */ false); + expect(want.eventTrigger.region).to.equal("us"); + }); + }); +}); diff --git a/src/test/deploy/functions/pricing.spec.ts b/src/test/deploy/functions/pricing.spec.ts index 8a87881438c..f2d89704aed 100644 --- a/src/test/deploy/functions/pricing.spec.ts +++ b/src/test/deploy/functions/pricing.spec.ts @@ -1,15 +1,17 @@ import { expect } from "chai"; -import { v1 } from "uuid"; import * as backend from "../../../deploy/functions/backend"; import * as pricing from "../../../deploy/functions/pricing"; -const FUNCTION_FRAGMENT: Omit = { +// N.B. I'm not sure why, but if I don't add back backend.HttpsTriggered +// then I can't add the trigger to the Omit<>, which means it can't be +// passed to test methods. +const ENDPOINT_FRAGMENT: Omit & backend.HttpsTriggered = { id: "function", project: "project", entryPoint: "foobar", runtime: "nodejs16", - trigger: {}, + httpsTrigger: {}, }; const INVALID_REGION = { region: "fillory" }; @@ -18,7 +20,7 @@ describe("Functions Pricing", () => { it("Can calculate the $0 cost of a function without min instances", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", }) @@ -26,7 +28,7 @@ describe("Functions Pricing", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", ...INVALID_REGION, }) @@ -36,7 +38,7 @@ describe("Functions Pricing", () => { it("Can calculate the cost of a well formed v1 function", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 10, @@ -47,7 +49,7 @@ describe("Functions Pricing", () => { it("Can calculate the cost of a well formed v2 function", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", region: "us-central1", minInstances: 10, @@ -58,7 +60,7 @@ describe("Functions Pricing", () => { it("Cannot calculate the cost of an unknown instance size", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 10, @@ -70,7 +72,7 @@ describe("Functions Pricing", () => { it("Cannot calculate the cost for an unknown region", () => { expect( pricing.canCalculateMinInstanceCost({ - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, ...INVALID_REGION, platform: "gcfv1", minInstances: 10, @@ -97,7 +99,7 @@ describe("Functions Pricing", () => { it("can calculate a v1 tier1 bill", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 1, @@ -115,14 +117,14 @@ describe("Functions Pricing", () => { it("doesn't estimate bills for unreserved instances", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 1, availableMemoryMb: 256, }, { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 0, @@ -139,7 +141,7 @@ describe("Functions Pricing", () => { it("can calculate a bill for a two reserved instances", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 2, @@ -157,14 +159,14 @@ describe("Functions Pricing", () => { it("Can calculate a v1 tier1 bill for a two reserved instance between two functions", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 1, availableMemoryMb: 256, }, { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 1, @@ -181,7 +183,7 @@ describe("Functions Pricing", () => { it("can calculate a v1 tier2 bill", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "europe-west3", minInstances: 1, @@ -199,7 +201,7 @@ describe("Functions Pricing", () => { it("can calculate a v1 bill for large instances", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "europe-west3", minInstances: 1, @@ -217,7 +219,7 @@ describe("Functions Pricing", () => { it("can calculate a v2 tier1 bill", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", region: "us-central1", minInstances: 1, @@ -235,7 +237,7 @@ describe("Functions Pricing", () => { it("can calculate a v2 tier2 bill", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", region: "europe-west3", minInstances: 1, @@ -253,7 +255,7 @@ describe("Functions Pricing", () => { it("can calculate a v2 bill for large instances", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", region: "europe-west3", minInstances: 1, @@ -271,13 +273,13 @@ describe("Functions Pricing", () => { it("calculates v1 and v2 discounts separately", () => { const cost = pricing.monthlyMinInstanceCost([ { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv1", region: "us-central1", minInstances: 1, }, { - ...FUNCTION_FRAGMENT, + ...ENDPOINT_FRAGMENT, platform: "gcfv2", region: "us-central1", minInstances: 1, diff --git a/src/test/deploy/functions/prompts.spec.ts b/src/test/deploy/functions/prompts.spec.ts index fad2f85bd75..eb25cf5f4dc 100644 --- a/src/test/deploy/functions/prompts.spec.ts +++ b/src/test/deploy/functions/prompts.spec.ts @@ -17,7 +17,7 @@ const SAMPLE_EVENT_TRIGGER: backend.EventTrigger = { retry: false, }; -const SAMPLE_FUNC: backend.FunctionSpec = { +const SAMPLE_ENDPOINT: backend.Endpoint = { platform: "gcfv1", id: "c", region: "us-central1", @@ -26,7 +26,7 @@ const SAMPLE_FUNC: backend.FunctionSpec = { labels: {}, environmentVariables: {}, runtime: "nodejs16", - trigger: SAMPLE_EVENT_TRIGGER, + eventTrigger: SAMPLE_EVENT_TRIGGER, }; const SAMPLE_OPTIONS: Options = { @@ -57,142 +57,158 @@ describe("promptForFailurePolicies", () => { }); it("should prompt if there are new functions with failure policies", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - retry: true, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, + retry: true, }, - ]; + }; promptStub.resolves(true); - await expect(functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, funcs, [])).not.to.be - .rejected; + await expect( + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(endpoint), + backend.empty() + ) + ).not.to.be.rejected; expect(promptStub).to.have.been.calledOnce; }); it("should not prompt if all functions with failure policies already had failure policies", async () => { - const func = { - ...SAMPLE_FUNC, - trigger: { + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { ...SAMPLE_EVENT_TRIGGER, retry: true, }, }; - await expect(functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, [func], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(endpoint), + backend.of(endpoint) + ) + ).eventually.be.fulfilled; expect(promptStub).to.not.have.been.called; }); it("should throw if user declines the prompt", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - retry: true, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, + retry: true, }, - ]; + }; promptStub.resolves(false); await expect( - functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, funcs, []) + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(endpoint), + backend.empty() + ) ).to.eventually.be.rejectedWith(FirebaseError, /Deployment canceled/); expect(promptStub).to.have.been.calledOnce; }); it("should prompt if an existing function adds a failure policy", async () => { - const func = { - ...SAMPLE_FUNC, - trigger: { + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { ...SAMPLE_EVENT_TRIGGER, }, }; - const newFunc = { - ...SAMPLE_FUNC, - trigger: { + const newEndpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { ...SAMPLE_EVENT_TRIGGER, retry: true, }, }; promptStub.resolves(true); - await expect(functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, [newFunc], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(newEndpoint), + backend.of(endpoint) + ) + ).eventually.be.fulfilled; expect(promptStub).to.have.been.calledOnce; }); it("should throw if there are any functions with failure policies and the user doesn't accept the prompt", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - retry: true, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, + retry: true, }, - ]; + }; promptStub.resolves(false); await expect( - functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, funcs, []) + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(endpoint), + backend.empty() + ) ).to.eventually.be.rejectedWith(FirebaseError, /Deployment canceled/); expect(promptStub).to.have.been.calledOnce; }); it("should not prompt if there are no functions with failure policies", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, }, - ]; + }; promptStub.resolves(); - await expect(functionPrompts.promptForFailurePolicies(SAMPLE_OPTIONS, funcs, [])).to.eventually - .be.fulfilled; + await expect( + functionPrompts.promptForFailurePolicies( + SAMPLE_OPTIONS, + backend.of(endpoint), + backend.empty() + ) + ).to.eventually.be.fulfilled; expect(promptStub).not.to.have.been.called; }); it("should throw if there are any functions with failure policies, in noninteractive mode, without the force flag set", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - retry: true, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, + retry: true, }, - ]; + }; const options = { ...SAMPLE_OPTIONS, nonInteractive: true }; - await expect(functionPrompts.promptForFailurePolicies(options, funcs, [])).to.be.rejectedWith( - FirebaseError, - /--force option/ - ); + await expect( + functionPrompts.promptForFailurePolicies(options, backend.of(endpoint), backend.empty()) + ).to.be.rejectedWith(FirebaseError, /--force option/); expect(promptStub).not.to.have.been.called; }); it("should not throw if there are any functions with failure policies, in noninteractive mode, with the force flag set", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - trigger: { - ...SAMPLE_EVENT_TRIGGER, - retry: true, - }, + const endpoint = { + ...SAMPLE_ENDPOINT, + eventTrigger: { + ...SAMPLE_EVENT_TRIGGER, + retry: true, }, - ]; + }; const options = { ...SAMPLE_OPTIONS, nonInteractive: true, force: true }; - await expect(functionPrompts.promptForFailurePolicies(options, funcs, [])).to.eventually.be - .fulfilled; + await expect( + functionPrompts.promptForFailurePolicies(options, backend.of(endpoint), backend.empty()) + ).to.eventually.be.fulfilled; expect(promptStub).not.to.have.been.called; }); }); @@ -212,199 +228,207 @@ describe("promptForMinInstances", () => { }); it("should prompt if there are new functions with minInstances", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - minInstances: 1, - }, - ]; + const endpoint = { + ...SAMPLE_ENDPOINT, + minInstances: 1, + }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, [])).not.to.be - .rejected; + await expect( + functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, backend.of(endpoint), backend.empty()) + ).not.to.be.rejected; expect(promptStub).to.have.been.calledOnce; }); it("should not prompt if no fucntion has minInstance", async () => { - await expect( - functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [SAMPLE_FUNC], [SAMPLE_FUNC]) - ).to.eventually.be.fulfilled; + const bkend = backend.of(SAMPLE_ENDPOINT); + await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, bkend, bkend)).to.eventually + .be.fulfilled; expect(promptStub).to.not.have.been.called; }); it("should not prompt if all functions with minInstances already had the same number of minInstances", async () => { - const func = { - ...SAMPLE_FUNC, + const bkend = backend.of({ + ...SAMPLE_ENDPOINT, minInstances: 1, - }; + }); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [func], [func])).to - .eventually.be.fulfilled; + await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, bkend, bkend)).to.eventually + .be.fulfilled; expect(promptStub).to.not.have.been.called; }); it("should not prompt if functions decrease in minInstances", async () => { - const func = { - ...SAMPLE_FUNC, + const endpoint = { + ...SAMPLE_ENDPOINT, minInstances: 2, }; - const newFunc = { - ...SAMPLE_FUNC, + const newEndpoint = { + ...SAMPLE_ENDPOINT, minInstances: 1, }; - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [newFunc], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForMinInstances( + SAMPLE_OPTIONS, + backend.of(newEndpoint), + backend.of(endpoint) + ) + ).eventually.be.fulfilled; expect(promptStub).to.not.have.been.called; }); it("should throw if user declines the prompt", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - minInstances: 1, - }, - ]; + const bkend = backend.of({ + ...SAMPLE_ENDPOINT, + minInstances: 1, + }); promptStub.resolves(false); - await expect( - functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, []) + functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, bkend, backend.empty()) ).to.eventually.be.rejectedWith(FirebaseError, /Deployment canceled/); expect(promptStub).to.have.been.calledOnce; }); it("should prompt if an existing function sets minInstances", async () => { - const func = { - ...SAMPLE_FUNC, - }; - const newFunc = { - ...SAMPLE_FUNC, + const newEndpoint = { + ...SAMPLE_ENDPOINT, minInstances: 1, }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [newFunc], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForMinInstances( + SAMPLE_OPTIONS, + backend.of(newEndpoint), + backend.of(SAMPLE_ENDPOINT) + ) + ).eventually.be.fulfilled; expect(promptStub).to.have.been.calledOnce; }); it("should prompt if an existing function increases minInstances", async () => { - const func = { - ...SAMPLE_FUNC, + const endpoint = { + ...SAMPLE_ENDPOINT, minInstances: 1, }; - const newFunc = { - ...SAMPLE_FUNC, + const newEndpoint = { + ...SAMPLE_ENDPOINT, minInstances: 2, }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [newFunc], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForMinInstances( + SAMPLE_OPTIONS, + backend.of(newEndpoint), + backend.of(endpoint) + ) + ).eventually.be.fulfilled; expect(promptStub).to.have.been.calledOnce; }); it("should prompt if a minInstance function increases resource reservations", async () => { - const func: backend.FunctionSpec = { - ...SAMPLE_FUNC, + const endpoint: backend.Endpoint = { + ...SAMPLE_ENDPOINT, minInstances: 2, availableMemoryMb: 1024, }; - const newFunc: backend.FunctionSpec = { - ...SAMPLE_FUNC, + const newEndpoint: backend.Endpoint = { + ...SAMPLE_ENDPOINT, minInstances: 2, availableMemoryMb: 2048, }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, [newFunc], [func])).to - .eventually.be.fulfilled; + await expect( + functionPrompts.promptForMinInstances( + SAMPLE_OPTIONS, + backend.of(newEndpoint), + backend.of(endpoint) + ) + ).eventually.be.fulfilled; expect(promptStub).to.have.been.calledOnce; }); it("should throw if there are any functions with failure policies and the user doesn't accept the prompt", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - minInstances: 2, - }, - ]; + const endpoint = { + ...SAMPLE_ENDPOINT, + minInstances: 2, + }; promptStub.resolves(false); await expect( - functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, []) + functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, backend.of(endpoint), backend.empty()) ).to.eventually.be.rejectedWith(FirebaseError, /Deployment canceled/); expect(promptStub).to.have.been.calledOnce; }); it("should not prompt if there are no functions with minInstances", async () => { - const funcs = [SAMPLE_FUNC]; promptStub.resolves(); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, [])).to.eventually.be - .fulfilled; + await expect( + functionPrompts.promptForMinInstances( + SAMPLE_OPTIONS, + backend.of(SAMPLE_ENDPOINT), + backend.empty() + ) + ).to.eventually.be.fulfilled; expect(promptStub).not.to.have.been.called; }); it("should throw if there are any functions with minInstances, in noninteractive mode, without the force flag set", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - minInstances: 1, - }, - ]; + const endpoint = { + ...SAMPLE_ENDPOINT, + minInstances: 1, + }; const options = { ...SAMPLE_OPTIONS, nonInteractive: true }; - await expect(functionPrompts.promptForMinInstances(options, funcs, [])).to.be.rejectedWith( - FirebaseError, - /--force option/ - ); + await expect( + functionPrompts.promptForMinInstances(options, backend.of(endpoint), backend.empty()) + ).to.be.rejectedWith(FirebaseError, /--force option/); expect(promptStub).not.to.have.been.called; }); it("should not throw if there are any functions with minInstances, in noninteractive mode, with the force flag set", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - minInstances: 1, - }, - ]; + const endpoint = { + ...SAMPLE_ENDPOINT, + minInstances: 1, + }; const options = { ...SAMPLE_OPTIONS, nonInteractive: true, force: true }; - await expect(functionPrompts.promptForMinInstances(options, funcs, [])).to.eventually.be - .fulfilled; + await expect( + functionPrompts.promptForMinInstances(options, backend.of(endpoint), backend.empty()) + ).to.eventually.be.fulfilled; expect(promptStub).not.to.have.been.called; }); it("Should disclaim if a bill cannot be calculated", async () => { - const funcs = [ - { - ...SAMPLE_FUNC, - region: "fillory", - minInstances: 1, - }, - ]; + const endpoint = { + ...SAMPLE_ENDPOINT, + region: "fillory", + minInstances: 1, + }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, [])).to.eventually.be - .fulfilled; + await expect( + functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, backend.of(endpoint), backend.empty()) + ).to.eventually.be.fulfilled; expect(promptStub).to.have.been.called; expect(logStub.firstCall.args[1]).to.match(/Cannot calculate the minimum monthly bill/); }); it("Should advise customers of possible discounts", async () => { - const funcs: backend.FunctionSpec[] = [ - { - ...SAMPLE_FUNC, - region: "fillory", - platform: "gcfv2", - minInstances: 2, - }, - ]; + const endpoint: backend.Endpoint = { + ...SAMPLE_ENDPOINT, + platform: "gcfv2", + minInstances: 2, + }; promptStub.resolves(true); - await expect(functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, funcs, [])).to.eventually.be - .fulfilled; + await expect( + functionPrompts.promptForMinInstances(SAMPLE_OPTIONS, backend.of(endpoint), backend.empty()) + ).to.eventually.be.fulfilled; expect(promptStub).to.have.been.called; expect(logStub.firstCall.args[1]).to.match(new RegExp("https://cloud.google.com/run/cud")); }); diff --git a/src/test/deploy/functions/release/executor.spec.ts b/src/test/deploy/functions/release/executor.spec.ts new file mode 100644 index 00000000000..f819b3b3d59 --- /dev/null +++ b/src/test/deploy/functions/release/executor.spec.ts @@ -0,0 +1,49 @@ +import { expect } from "chai"; + +import * as executor from "../../../../deploy/functions/release/executor"; + +describe("Executor", () => { + describe("QueueExecutor", () => { + const exec = new executor.QueueExecutor({ + retries: 20, + maxBackoff: 1, + backoff: 1, + }); + + it("supports arbitrary return types", async () => { + await expect(exec.run(() => Promise.resolve(42))).to.eventually.equal(42); + await expect(exec.run(() => Promise.resolve({ hello: "world" }))).to.eventually.deep.equal({ + hello: "world", + }); + }); + + it("throws errors", async () => { + const handler = (): Promise => Promise.reject(new Error("Fatal")); + await expect(exec.run(handler)).to.eventually.be.rejectedWith("Fatal"); + }); + + it("retries temporary errors", async () => { + let throwCount = 0; + const handler = (): Promise => { + if (throwCount < 2) { + throwCount++; + const err = new Error("Retryable"); + (err as any).code = 429; + return Promise.reject(err); + } + return Promise.resolve(42); + }; + + await expect(exec.run(handler)).to.eventually.equal(42); + }); + + it("eventually gives up on retryable errors", async () => { + const handler = (): Promise => { + const err = new Error("Retryable"); + (err as any).code = 429; + throw err; + }; + await expect(exec.run(handler)).to.eventually.be.rejectedWith("Retryable"); + }); + }); +}); diff --git a/src/test/deploy/functions/release/fabricator.spec.ts b/src/test/deploy/functions/release/fabricator.spec.ts new file mode 100644 index 00000000000..c12b6cca274 --- /dev/null +++ b/src/test/deploy/functions/release/fabricator.spec.ts @@ -0,0 +1,963 @@ +import { expect } from "chai"; +import * as sinon from "sinon"; + +import * as fabricator from "../../../../deploy/functions/release/fabricator"; +import * as reporter from "../../../../deploy/functions/release/reporter"; +import * as executor from "../../../../deploy/functions/release/executor"; +import * as gcfNSV2 from "../../../../gcp/cloudfunctionsv2"; +import * as gcfNS from "../../../../gcp/cloudfunctions"; +import * as pollerNS from "../../../../operation-poller"; +import * as pubsubNS from "../../../../gcp/pubsub"; +import * as schedulerNS from "../../../../gcp/cloudscheduler"; +import * as runNS from "../../../../gcp/run"; +import * as backend from "../../../../deploy/functions/backend"; +import * as scraper from "../../../../deploy/functions/release/sourceTokenScraper"; +import * as planner from "../../../../deploy/functions/release/planner"; + +describe("Fabricator", () => { + // Stub all GCP APIs to make sure this test is hermetic + let gcf: sinon.SinonStubbedInstance; + let gcfv2: sinon.SinonStubbedInstance; + let poller: sinon.SinonStubbedInstance; + let pubsub: sinon.SinonStubbedInstance; + let scheduler: sinon.SinonStubbedInstance; + let run: sinon.SinonStubbedInstance; + + beforeEach(() => { + gcf = sinon.stub(gcfNS); + gcfv2 = sinon.stub(gcfNSV2); + poller = sinon.stub(pollerNS); + pubsub = sinon.stub(pubsubNS); + scheduler = sinon.stub(schedulerNS); + run = sinon.stub(runNS); + + gcf.functionFromEndpoint.restore(); + gcfv2.functionFromEndpoint.restore(); + scheduler.jobFromEndpoint.restore(); + gcf.createFunction.rejects(new Error("unexpected gcf.createFunction")); + gcf.updateFunction.rejects(new Error("unexpected gcf.updateFunction")); + gcf.deleteFunction.rejects(new Error("unexpected gcf.deleteFunction")); + gcf.getIamPolicy.rejects(new Error("unexpected gcf.getIamPolicy")); + gcf.setIamPolicy.rejects(new Error("unexpected gcf.setIamPolicy")); + gcf.setInvokerCreate.rejects(new Error("unexpected gcf.setInvokerCreate")); + gcf.setInvokerUpdate.rejects(new Error("unexpected gcf.setInvokerUpdate")); + gcfv2.createFunction.rejects(new Error("unexpected gcfv2.createFunction")); + gcfv2.updateFunction.rejects(new Error("unexpected gcfv2.updateFunction")); + gcfv2.deleteFunction.rejects(new Error("unexpected gcfv2.deleteFunction")); + run.getIamPolicy.rejects(new Error("unexpected run.getIamPolicy")); + run.setIamPolicy.rejects(new Error("unexpected run.setIamPolicy")); + run.setInvokerCreate.rejects(new Error("unexpected run.setInvokerCreate")); + run.setInvokerUpdate.rejects(new Error("unexpected run.setInvokerUpdate")); + run.replaceService.rejects(new Error("unexpected run.replaceService")); + poller.pollOperation.rejects(new Error("unexpected poller.pollOperation")); + pubsub.createTopic.rejects(new Error("unexpected pubsub.createTopic")); + pubsub.deleteTopic.rejects(new Error("unexpected pubsub.deleteTopic")); + scheduler.createOrReplaceJob.rejects(new Error("unexpected scheduler.createOrReplaceJob")); + scheduler.deleteJob.rejects(new Error("unexpected scheduler.deleteJob")); + }); + + afterEach(() => { + sinon.verifyAndRestore(); + }); + + const storage: gcfNSV2.StorageSource = { + bucket: "bucket", + object: "object", + generation: 42, + }; + const ctorArgs: fabricator.FabricatorArgs = { + executor: new executor.InlineExecutor(), + functionExecutor: new executor.InlineExecutor(), + sourceUrl: "https://example.com", + storage: { + "us-central1": storage, + "us-west1": storage, + }, + appEngineLocation: "us-central1", + }; + let fab: fabricator.Fabricator; + beforeEach(() => { + fab = new fabricator.Fabricator(ctorArgs); + }); + + afterEach(() => { + sinon.verifyAndRestore(); + }); + + function endpoint( + trigger: backend.Triggered = { httpsTrigger: {} }, + base: Partial = {} + ): backend.Endpoint { + return { + platform: "gcfv1", + id: "id", + region: "us-central1", + entryPoint: "entrypoint", + runtime: "nodejs16", + ...JSON.parse(JSON.stringify(base)), + ...trigger, + } as backend.Endpoint; + } + + describe("createV1Function", () => { + it("throws on create function failure", async () => { + gcf.createFunction.rejects(new Error("Server failure")); + + await expect( + fab.createV1Function(endpoint(), new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "create"); + + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.rejects(new Error("Fail whale")); + await expect( + fab.createV1Function(endpoint(), new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "create"); + }); + + it("throws on set invoker failure", async () => { + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerCreate.rejects(new Error("Boom")); + + await expect( + fab.createV1Function(endpoint(), new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "set invoker"); + }); + + it("sets invoker by default", async () => { + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerCreate.resolves(); + const ep = endpoint(); + + await fab.createV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerCreate).to.have.been.calledWith(ep.project, backend.functionName(ep), [ + "public", + ]); + }); + + it("sets explicit invoker", async () => { + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerCreate.resolves(); + const ep = endpoint({ + httpsTrigger: { + invoker: ["custom@"], + }, + }); + + await fab.createV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerCreate).to.have.been.calledWith(ep.project, backend.functionName(ep), [ + "custom@", + ]); + }); + + it("doesn't set private invoker on create", async () => { + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerCreate.resolves(); + const ep = endpoint({ + httpsTrigger: { + invoker: ["private"], + }, + }); + + await fab.createV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerCreate).to.not.have.been.called; + }); + + it("doesn't set invoker on non-http functions", async () => { + gcf.createFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerCreate.resolves(); + const ep = endpoint({ + scheduleTrigger: {}, + }); + + await fab.createV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerCreate).to.not.have.been.called; + }); + }); + + describe("updateV1Function", () => { + it("throws on update function failure", async () => { + gcf.updateFunction.rejects(new Error("Server failure")); + + await expect( + fab.updateV1Function(endpoint(), new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "update"); + + gcf.updateFunction.resolves({ name: "op", type: "update", done: false }); + poller.pollOperation.rejects(new Error("Fail whale")); + await expect( + fab.updateV1Function(endpoint(), new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "update"); + }); + + it("throws on set invoker failure", async () => { + gcf.updateFunction.resolves({ name: "op", type: "update", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerUpdate.rejects(new Error("Boom")); + + const ep = endpoint({ + httpsTrigger: { + invoker: ["private"], + }, + }); + await expect(fab.updateV1Function(ep, new scraper.SourceTokenScraper())).to.be.rejectedWith( + reporter.DeploymentError, + "set invoker" + ); + }); + + it("sets explicit invoker", async () => { + gcf.updateFunction.resolves({ name: "op", type: "create", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerUpdate.resolves(); + const ep = endpoint({ + httpsTrigger: { + invoker: ["custom@"], + }, + }); + + await fab.updateV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerUpdate).to.have.been.calledWith(ep.project, backend.functionName(ep), [ + "custom@", + ]); + }); + + it("does not set invoker by default", async () => { + gcf.updateFunction.resolves({ name: "op", type: "update", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerUpdate.resolves(); + const ep = endpoint(); + + await fab.updateV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerUpdate).to.not.have.been.called; + }); + + it("doesn't set invoker on non-http functions", async () => { + gcf.updateFunction.resolves({ name: "op", type: "update", done: false }); + poller.pollOperation.resolves(); + gcf.setInvokerUpdate.resolves(); + const ep = endpoint({ + scheduleTrigger: {}, + }); + + await fab.updateV1Function(ep, new scraper.SourceTokenScraper()); + expect(gcf.setInvokerUpdate).to.not.have.been.called; + }); + }); + + describe("deleteV1Function", () => { + it("throws on delete function failure", async () => { + gcf.deleteFunction.rejects(new Error("404")); + const ep = endpoint(); + + await expect(fab.deleteV1Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "delete"); + + gcf.deleteFunction.resolves({ name: "op", type: "delete", done: false }); + poller.pollOperation.rejects(new Error("5xx")); + + await expect(fab.deleteV1Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "delete"); + }); + }); + + describe("createV2Function", () => { + let setConcurrency: sinon.SinonStub; + + beforeEach(() => { + setConcurrency = sinon.stub(fab, "setConcurrency"); + setConcurrency.resolves(); + }); + + it("handles topiocs that already exist", async () => { + pubsub.createTopic.callsFake(() => { + const err = new Error("Already exists"); + (err as any).status = 409; + return Promise.reject(err); + }); + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + + const ep = endpoint( + { + eventTrigger: { + eventType: gcfv2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "topic", + }, + retry: false, + }, + }, + { + platform: "gcfv2", + } + ); + + await fab.createV2Function(ep); + expect(pubsub.createTopic).to.have.been.called; + expect(gcfv2.createFunction).to.have.been.called; + }); + + it("handles failures to create a topic", async () => { + pubsub.createTopic.rejects(new Error("🤷‍♂️")); + + const ep = endpoint( + { + eventTrigger: { + eventType: gcfv2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "topic", + }, + retry: false, + }, + }, + { + platform: "gcfv2", + } + ); + + await expect(fab.createV2Function(ep)).to.be.rejectedWith( + reporter.DeploymentError, + "create topic" + ); + }); + + it("throws on create function failure", async () => { + gcfv2.createFunction.rejects(new Error("Server failure")); + + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + await expect(fab.createV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "create"); + + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.rejects(new Error("Fail whale")); + + await expect(fab.createV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "create"); + }); + + it("throws on set invoker failure", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.rejects(new Error("Boom")); + + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + await expect(fab.createV2Function(ep)).to.be.rejectedWith( + reporter.DeploymentError, + "set invoker" + ); + }); + + it("sets invoker and concurrency by default", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.resolves(); + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + + await fab.createV2Function(ep); + expect(run.setInvokerCreate).to.have.been.calledWith(ep.project, "service", ["public"]); + expect(setConcurrency).to.have.been.calledWith(ep, "service", 80); + }); + + it("sets explicit invoker", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.resolves(); + const ep = endpoint( + { + httpsTrigger: { + invoker: ["custom@"], + }, + }, + { platform: "gcfv2" } + ); + + await fab.createV2Function(ep); + expect(run.setInvokerCreate).to.have.been.calledWith(ep.project, "service", ["custom@"]); + }); + + it("doesn't set private invoker on create", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.resolves(); + const ep = endpoint({ httpsTrigger: { invoker: ["private"] } }, { platform: "gcfv2" }); + + await fab.createV2Function(ep); + expect(gcf.setInvokerCreate).to.not.have.been.called; + }); + + it("doesn't set invoker on non-http functions", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.resolves(); + const ep = endpoint({ scheduleTrigger: {} }, { platform: "gcfv2" }); + + await fab.createV2Function(ep); + expect(run.setInvokerCreate).to.not.have.been.called; + }); + + it("sets explicit concurrency", async () => { + gcfv2.createFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerCreate.resolves(); + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2", concurrency: 1 }); + + await fab.createV2Function(ep); + expect(setConcurrency).to.have.been.calledWith(ep, "service", 1); + }); + }); + + describe("updateV2Function", () => { + it("throws on update function failure", async () => { + gcfv2.updateFunction.rejects(new Error("Server failure")); + + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + await expect(fab.updateV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "update"); + + gcfv2.updateFunction.resolves({ name: "op", done: false }); + poller.pollOperation.rejects(new Error("Fail whale")); + await expect(fab.updateV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "update"); + }); + + it("throws on set invoker failure", async () => { + gcfv2.updateFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerUpdate.rejects(new Error("Boom")); + + const ep = endpoint({ httpsTrigger: { invoker: ["private"] } }, { platform: "gcfv2" }); + await expect(fab.updateV2Function(ep)).to.be.rejectedWith( + reporter.DeploymentError, + "set invoker" + ); + }); + + it("sets explicit invoker", async () => { + gcfv2.updateFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerUpdate.resolves(); + const ep = endpoint( + { + httpsTrigger: { + invoker: ["custom@"], + }, + }, + { platform: "gcfv2" } + ); + + await fab.updateV2Function(ep); + expect(run.setInvokerUpdate).to.have.been.calledWith(ep.project, "service", ["custom@"]); + }); + + it("does not set invoker by default", async () => { + gcfv2.updateFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerUpdate.resolves(); + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + + await fab.updateV2Function(ep); + expect(run.setInvokerUpdate).to.not.have.been.called; + }); + + it("doesn't set invoker on non-http functions", async () => { + gcfv2.updateFunction.resolves({ name: "op", done: false }); + poller.pollOperation.resolves({ serviceConfig: { service: "service" } }); + run.setInvokerUpdate.resolves(); + const ep = endpoint({ scheduleTrigger: {} }, { platform: "gcfv2" }); + + await fab.updateV2Function(ep); + expect(run.setInvokerUpdate).to.not.have.been.called; + }); + }); + + describe("deleteV2Function", () => { + it("throws on delete function failure", async () => { + gcfv2.deleteFunction.rejects(new Error("404")); + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + + await expect(fab.deleteV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "delete"); + + gcfv2.deleteFunction.resolves({ name: "op", done: false }); + poller.pollOperation.rejects(new Error("5xx")); + + await expect(fab.deleteV2Function(ep)).to.be.rejectedWith(reporter.DeploymentError, "delete"); + }); + }); + + describe("setConcurrency", () => { + let service: runNS.Service; + beforeEach(() => { + service = { + apiVersion: "serving.knative.dev/v1", + kind: "service", + metadata: { + name: "service", + namespace: "project", + }, + spec: { + template: { + metadata: { + name: "service", + namespace: "project", + }, + spec: { + containerConcurrency: 80, + }, + }, + traffic: [], + }, + }; + }); + + it("sets concurrency when necessary", async () => { + run.getService.resolves(service); + run.replaceService.callsFake((name: string, svc: runNS.Service) => { + expect(svc.spec.template.spec.containerConcurrency).equals(1); + // Run throws if this field is set + expect(svc.spec.template.metadata.name).is.undefined; + return Promise.resolve(service); + }); + + await fab.setConcurrency(endpoint(), "service", 1); + expect(run.replaceService).to.have.been.called; + }); + + it("doesn't set concurrency when already at the correct value", async () => { + run.getService.resolves(service); + + await fab.setConcurrency( + endpoint(), + "service", + service.spec.template.spec.containerConcurrency! + ); + expect(run.replaceService).to.not.have.been.called; + }); + + it("wraps errors", async () => { + run.getService.rejects(new Error("Oh noes!")); + + await expect(fab.setConcurrency(endpoint(), "service", 1)).to.eventually.be.rejectedWith( + reporter.DeploymentError, + "set concurrency" + ); + + run.getService.resolves(service); + run.replaceService.rejects(new Error("read only")); + await expect(fab.setConcurrency(endpoint(), "service", 1)).to.eventually.be.rejectedWith( + reporter.DeploymentError, + "set concurrency" + ); + }); + }); + + describe("upsertScheduleV1", () => { + const ep = endpoint({ + scheduleTrigger: { + schedule: "every 5 minutes", + }, + }) as backend.Endpoint & backend.ScheduleTriggered; + + it("upserts schedules", async () => { + scheduler.createOrReplaceJob.resolves(); + await fab.upsertScheduleV1(ep); + expect(scheduler.createOrReplaceJob).to.have.been.called; + }); + + it("wraps errors", async () => { + scheduler.createOrReplaceJob.rejects(new Error("Fail")); + await expect(fab.upsertScheduleV1(ep)).to.eventually.be.rejectedWith( + reporter.DeploymentError, + "upsert schedule" + ); + }); + }); + + describe("deleteScheduleV1", () => { + const ep = endpoint({ + scheduleTrigger: { + schedule: "every 5 minutes", + }, + }) as backend.Endpoint & backend.ScheduleTriggered; + + it("deletes schedules and topics", async () => { + scheduler.deleteJob.resolves(); + pubsub.deleteTopic.resolves(); + await fab.deleteScheduleV1(ep); + expect(scheduler.deleteJob).to.have.been.called; + expect(pubsub.deleteTopic).to.have.been.called; + }); + + it("wraps errors", async () => { + scheduler.deleteJob.rejects(new Error("Fail")); + await expect(fab.deleteScheduleV1(ep)).to.eventually.be.rejectedWith( + reporter.DeploymentError, + "delete schedule" + ); + + scheduler.deleteJob.resolves(); + pubsub.deleteTopic.rejects(new Error("Fail")); + await expect(fab.deleteScheduleV1(ep)).to.eventually.be.rejectedWith( + reporter.DeploymentError, + "delete topic" + ); + }); + }); + + describe("setTrigger", () => { + it("does nothing for HTTPS functions", async () => { + // all APIs throw by default + await fab.setTrigger(endpoint({ httpsTrigger: {} })); + }); + + it("does nothing for event triggers", async () => { + // all APIs throw by default + const ep = endpoint({ + eventTrigger: { + eventType: gcfNSV2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "topic", + }, + retry: false, + }, + }); + await fab.setTrigger(ep); + }); + + it("sets schedule triggers", async () => { + const ep = endpoint({ + scheduleTrigger: { + schedule: "every 5 minutes", + }, + }); + const upsertScheduleV1 = sinon.stub(fab, "upsertScheduleV1"); + upsertScheduleV1.resolves(); + + await fab.setTrigger(ep); + expect(upsertScheduleV1).to.have.been.called; + upsertScheduleV1.restore(); + + ep.platform = "gcfv2"; + const upsertScheduleV2 = sinon.stub(fab, "upsertScheduleV2"); + upsertScheduleV2.resolves(); + + await fab.setTrigger(ep); + expect(upsertScheduleV2).to.have.been.called; + }); + }); + + describe("deleteTrigger", () => { + it("does nothing for HTTPS functions", async () => { + // all APIs throw by default + await fab.deleteTrigger(endpoint({ httpsTrigger: {} })); + }); + + it("does nothing for event triggers", async () => { + // all APIs throw by default + const ep = endpoint({ + eventTrigger: { + eventType: gcfNSV2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "topic", + }, + retry: false, + }, + }); + await fab.deleteTrigger(ep); + }); + + it("sets schedule triggers", async () => { + const ep = endpoint({ + scheduleTrigger: { + schedule: "every 5 minutes", + }, + }); + const deleteScheduleV1 = sinon.stub(fab, "deleteScheduleV1"); + deleteScheduleV1.resolves(); + + await fab.deleteTrigger(ep); + expect(deleteScheduleV1).to.have.been.called; + deleteScheduleV1.restore(); + + ep.platform = "gcfv2"; + const deleteScheduleV2 = sinon.stub(fab, "deleteScheduleV2"); + deleteScheduleV2.resolves(); + + await fab.deleteTrigger(ep); + expect(deleteScheduleV2).to.have.been.called; + }); + }); + + describe("createEndpoint", () => { + it("creates v1 functions", async () => { + const ep = endpoint(); + const setTrigger = sinon.stub(fab, "setTrigger"); + setTrigger.resolves(); + const createV1Function = sinon.stub(fab, "createV1Function"); + createV1Function.resolves(); + + await fab.createEndpoint(ep, new scraper.SourceTokenScraper()); + expect(createV1Function).is.calledOnce; + expect(setTrigger).is.calledOnce; + expect(setTrigger).is.calledAfter(createV1Function); + }); + + it("creates v2 functions", async () => { + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + const setTrigger = sinon.stub(fab, "setTrigger"); + setTrigger.resolves(); + const createV2Function = sinon.stub(fab, "createV2Function"); + createV2Function.resolves(); + + await fab.createEndpoint(ep, new scraper.SourceTokenScraper()); + expect(createV2Function).is.calledOnce; + expect(setTrigger).is.calledOnce; + expect(setTrigger).is.calledAfter(createV2Function); + }); + + it("aborts for failures midway", async () => { + const ep = endpoint(); + const setTrigger = sinon.stub(fab, "setTrigger"); + const createV1Function = sinon.stub(fab, "createV1Function"); + createV1Function.rejects(new reporter.DeploymentError(ep, "set invoker", undefined)); + + await expect(fab.createEndpoint(ep, new scraper.SourceTokenScraper())).to.be.rejectedWith( + reporter.DeploymentError, + "set invoker" + ); + expect(createV1Function).is.calledOnce; + expect(setTrigger).is.not.called; + }); + }); + + describe("updateEndpoint", () => { + it("updates v1 functions", async () => { + const ep = endpoint(); + const setTrigger = sinon.stub(fab, "setTrigger"); + setTrigger.resolves(); + const updateV1Function = sinon.stub(fab, "updateV1Function"); + updateV1Function.resolves(); + + await fab.updateEndpoint({ endpoint: ep }, new scraper.SourceTokenScraper()); + expect(updateV1Function).is.calledOnce; + expect(setTrigger).is.calledOnce; + expect(setTrigger).is.calledAfter(updateV1Function); + }); + + it("updates v2 functions", async () => { + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + const setTrigger = sinon.stub(fab, "setTrigger"); + setTrigger.resolves(); + const updateV2Function = sinon.stub(fab, "updateV2Function"); + updateV2Function.resolves(); + + await fab.updateEndpoint({ endpoint: ep }, new scraper.SourceTokenScraper()); + expect(updateV2Function).is.calledOnce; + expect(setTrigger).is.calledOnce; + expect(setTrigger).is.calledAfter(updateV2Function); + }); + + it("aborts for failures midway", async () => { + const ep = endpoint(); + const setTrigger = sinon.stub(fab, "setTrigger"); + const updateV1Function = sinon.stub(fab, "updateV1Function"); + updateV1Function.rejects(new reporter.DeploymentError(ep, "set invoker", undefined)); + + await expect( + fab.updateEndpoint({ endpoint: ep }, new scraper.SourceTokenScraper()) + ).to.be.rejectedWith(reporter.DeploymentError, "set invoker"); + expect(updateV1Function).is.calledOnce; + expect(setTrigger).is.not.called; + }); + + it("can delete and create", async () => { + const target = endpoint( + { scheduleTrigger: { schedule: "every 5 minutes" } }, + { platform: "gcfv2" } + ); + const before = endpoint( + { scheduleTrigger: { schedule: "every 5 minutes" } }, + { platform: "gcfv1" } + ); + const update = { + endpoint: target, + deleteAndRecreate: before, + }; + + const deleteTrigger = sinon.stub(fab, "deleteTrigger"); + deleteTrigger.resolves(); + const setTrigger = sinon.stub(fab, "setTrigger"); + setTrigger.resolves(); + const deleteV1Function = sinon.stub(fab, "deleteV1Function"); + deleteV1Function.resolves(); + const createV2Function = sinon.stub(fab, "createV2Function"); + createV2Function.resolves(); + + await fab.updateEndpoint(update, new scraper.SourceTokenScraper()); + + expect(deleteTrigger).to.have.been.called; + expect(deleteV1Function).to.have.been.calledImmediatelyAfter(deleteTrigger); + expect(createV2Function).to.have.been.calledImmediatelyAfter(deleteV1Function); + expect(setTrigger).to.have.been.calledImmediatelyAfter(createV2Function); + }); + }); + + describe("deleteEndpoint", () => { + it("deletes v1 functions", async () => { + const ep = endpoint(); + const deleteTrigger = sinon.stub(fab, "deleteTrigger"); + deleteTrigger.resolves(); + const deleteV1Function = sinon.stub(fab, "deleteV1Function"); + deleteV1Function.resolves(); + + await fab.deleteEndpoint(ep); + expect(deleteTrigger).to.have.been.called; + expect(deleteV1Function).to.have.been.calledImmediatelyAfter(deleteTrigger); + }); + + it("deletes v2 functions", async () => { + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + const deleteTrigger = sinon.stub(fab, "deleteTrigger"); + deleteTrigger.resolves(); + const deleteV2Function = sinon.stub(fab, "deleteV2Function"); + deleteV2Function.resolves(); + + await fab.deleteEndpoint(ep); + expect(deleteTrigger).to.have.been.called; + expect(deleteV2Function).to.have.been.calledImmediatelyAfter(deleteTrigger); + }); + + it("does not delete functions with triggers outstanding", async () => { + const ep = endpoint({ httpsTrigger: {} }, { platform: "gcfv2" }); + const deleteV2Function = sinon.stub(fab, "deleteV2Function"); + const deleteTrigger = sinon.stub(fab, "deleteTrigger"); + deleteTrigger.rejects(new reporter.DeploymentError(ep, "delete schedule", undefined)); + deleteV2Function.resolves(); + + await expect(fab.deleteEndpoint(ep)).to.eventually.be.rejected; + expect(deleteV2Function).to.not.have.been.called; + }); + }); + + describe("applyRegionalUpdates", () => { + it("shares source token scrapers across upserts", async () => { + const ep1 = endpoint({ httpsTrigger: {} }, { id: "A" }); + const ep2 = endpoint({ httpsTrigger: {} }, { id: "B" }); + const ep3 = endpoint({ httpsTrigger: {} }, { id: "C" }); + const changes: planner.RegionalChanges = { + endpointsToCreate: [ep1, ep2], + endpointsToUpdate: [{ endpoint: ep3 }], + endpointsToDelete: [], + }; + + let sourceTokenScraper: scraper.SourceTokenScraper | undefined; + let callCount = 0; + const fakeUpsert = ( + unused: backend.Endpoint | planner.EndpointUpdate, + s: scraper.SourceTokenScraper + ): Promise => { + callCount++; + if (!sourceTokenScraper) { + expect(callCount).to.equal(1); + sourceTokenScraper = s; + } + expect(s).to.equal(sourceTokenScraper); + return Promise.resolve(); + }; + + const createEndpoint = sinon.stub(fab, "createEndpoint"); + createEndpoint.callsFake(fakeUpsert); + const updateEndpoint = sinon.stub(fab, "updateEndpoint"); + updateEndpoint.callsFake(fakeUpsert); + + await fab.applyRegionalChanges(changes); + }); + + it("handles errors and wraps them in results", async () => { + // when it hits a real API it will fail. + const ep = endpoint(); + const changes: planner.RegionalChanges = { + endpointsToCreate: [ep], + endpointsToUpdate: [], + endpointsToDelete: [], + }; + + const results = await fab.applyRegionalChanges(changes); + expect(results[0].error).to.be.instanceOf(reporter.DeploymentError); + expect(results[0].error?.message).to.match(/create function/); + }); + }); + + it("does not delete if there are upsert errors", async () => { + // when it hits a real API it will fail. + const createEP = endpoint({ httpsTrigger: {} }, { id: "A" }); + const deleteEP = endpoint({ httpsTrigger: {} }, { id: "B" }); + const changes: planner.RegionalChanges = { + endpointsToCreate: [createEP], + endpointsToUpdate: [], + endpointsToDelete: [deleteEP], + }; + + const results = await fab.applyRegionalChanges(changes); + const result = results.find((r) => r.endpoint.id === deleteEP.id); + expect(result?.error).to.be.instanceOf(reporter.AbortedDeploymentError); + expect(result?.durationMs).to.equal(0); + }); + + it("applies all kinds of changes", async () => { + const createEP = endpoint({ httpsTrigger: {} }, { id: "A" }); + const updateEP = endpoint({ httpsTrigger: {} }, { id: "B" }); + const deleteEP = endpoint({ httpsTrigger: {} }, { id: "C" }); + const update: planner.EndpointUpdate = { endpoint: updateEP }; + const changes: planner.RegionalChanges = { + endpointsToCreate: [createEP], + endpointsToUpdate: [update], + endpointsToDelete: [deleteEP], + }; + + const createEndpoint = sinon.stub(fab, "createEndpoint"); + createEndpoint.resolves(); + const updateEndpoint = sinon.stub(fab, "updateEndpoint"); + updateEndpoint.resolves(); + const deleteEndpoint = sinon.stub(fab, "deleteEndpoint"); + deleteEndpoint.resolves(); + + const results = await fab.applyRegionalChanges(changes); + expect(createEndpoint).to.have.been.calledWithMatch(createEP); + expect(updateEndpoint).to.have.been.calledWithMatch(update); + expect(deleteEndpoint).to.have.been.calledWith(deleteEP); + + // We can't actually verify that the timing isn't zero because tests + // have run in <1ms and failed. + expect(results[0].error).to.be.undefined; + expect(results[1].error).to.be.undefined; + expect(results[2].error).to.be.undefined; + }); + + describe("applyPlan", () => { + it("fans out to regions", async () => { + const ep1 = endpoint({ httpsTrigger: {} }, { region: "us-central1" }); + const ep2 = endpoint({ httpsTrigger: {} }, { region: "us-west1" }); + const plan: planner.DeploymentPlan = { + "us-central1": { + endpointsToCreate: [ep1], + endpointsToUpdate: [], + endpointsToDelete: [], + }, + "us-west1": { + endpointsToCreate: [], + endpointsToUpdate: [], + endpointsToDelete: [ep2], + }, + }; + + // Will fail when it hits actual API calls + const summary = await fab.applyPlan(plan); + const ep1Result = summary.results.find((r) => r.endpoint.region == ep1.region); + expect(ep1Result?.error).to.be.instanceOf(reporter.DeploymentError); + expect(ep1Result?.error?.message).to.match(/create function/); + + const ep2Result = summary.results.find((r) => r.endpoint.region === ep2.region); + expect(ep2Result?.error).to.be.instanceOf(reporter.DeploymentError); + expect(ep2Result?.error?.message).to.match(/delete function/); + }); + }); +}); diff --git a/src/test/deploy/functions/release/planner.spec.ts b/src/test/deploy/functions/release/planner.spec.ts new file mode 100644 index 00000000000..d5dd56b6bbc --- /dev/null +++ b/src/test/deploy/functions/release/planner.spec.ts @@ -0,0 +1,386 @@ +import { expect } from "chai"; +import * as sinon from "sinon"; + +import * as backend from "../../../../deploy/functions/backend"; +import * as planner from "../../../../deploy/functions/release/planner"; +import * as deploymentTool from "../../../../deploymentTool"; +import * as gcfv2 from "../../../../gcp/cloudfunctionsv2"; +import * as utils from "../../../../utils"; + +describe("planner", () => { + let logLabeledBullet: sinon.SinonStub; + + function allowV2Upgrades(): void { + sinon.stub(planner, "checkForV2Upgrade"); + } + + beforeEach(() => { + logLabeledBullet = sinon.stub(utils, "logLabeledBullet"); + }); + + afterEach(() => { + sinon.verifyAndRestore(); + }); + + function func( + id: string, + region: string, + triggered: backend.Triggered = { httpsTrigger: {} } + ): backend.Endpoint { + return { + id, + region, + ...triggered, + platform: "gcfv1", + project: "project", + runtime: "nodejs16", + entryPoint: "function", + environmentVariables: {}, + } as backend.Endpoint; + } + + describe("calculateUpdate", () => { + it("throws on illegal updates", () => { + const httpsFunc = func("a", "b", { httpsTrigger: {} }); + const scheduleFunc = func("a", "b", { scheduleTrigger: {} }); + expect(() => planner.calculateUpdate(httpsFunc, scheduleFunc)).to.throw; + }); + + it("knows to delete & recreate for v2 topic changes", () => { + const original: backend.Endpoint = { + ...func("a", "b", { + eventTrigger: { + eventType: gcfv2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "topic", + }, + retry: false, + }, + }), + platform: "gcfv2", + }; + const changed = JSON.parse(JSON.stringify(original)) as backend.Endpoint; + if (backend.isEventTriggered(changed)) { + changed.eventTrigger.eventFilters["resource"] = "anotherTopic"; + } + expect(planner.calculateUpdate(changed, original)).to.deep.equal({ + endpoint: changed, + deleteAndRecreate: original, + }); + }); + + it("knows to delete & recreate for v1 to v2 scheduled function upgrades", () => { + const original: backend.Endpoint = { + ...func("a", "b", { scheduleTrigger: {} }), + platform: "gcfv1", + }; + const changed: backend.Endpoint = { ...original, platform: "gcfv2" }; + + allowV2Upgrades(); + expect(planner.calculateUpdate(changed, original)).to.deep.equal({ + endpoint: changed, + deleteAndRecreate: original, + }); + }); + + it("knows to delete & recreate when trigger regions change", () => { + const original: backend.Endpoint = func("a", "b", { + eventTrigger: { + eventType: "google.cloud.storage.object.v1.finalzied", + eventFilters: { + bucket: "mybucket", + }, + region: "us-west1", + retry: false, + }, + }); + original.platform = "gcfv2"; + const changed: backend.Endpoint = func("a", "b", { + eventTrigger: { + eventType: "google.cloud.storage.object.v1.finalzied", + eventFilters: { + bucket: "bucket2", + }, + region: "us", + retry: false, + }, + }); + changed.platform = "gcfv2"; + allowV2Upgrades(); + expect(planner.calculateUpdate(changed, original)).to.deep.equal({ + endpoint: changed, + deleteAndRecreate: original, + }); + }); + + it("knows to upgrade in-place in the general case", () => { + const v1Function: backend.Endpoint = { + ...func("a", "b"), + platform: "gcfv1", + }; + const v2Function: backend.Endpoint = { + ...v1Function, + platform: "gcfv1", + availableMemoryMb: 512, + }; + expect(planner.calculateUpdate(v2Function, v1Function)).to.deep.equal({ + endpoint: v2Function, + }); + }); + }); + + describe("calculateRegionalChanges", () => { + it("passes a smoke test", () => { + const created = func("created", "region"); + const updated = func("updated", "region"); + const deleted = func("deleted", "region"); + deleted.labels = deploymentTool.labels(); + const pantheon = func("pantheon", "region"); + + const want = { created, updated }; + const have = { updated, deleted, pantheon }; + + // note: pantheon is not updated in any way + expect(planner.calculateRegionalChanges(want, have, {})).to.deep.equal({ + endpointsToCreate: [created], + endpointsToUpdate: [ + { + endpoint: updated, + }, + ], + endpointsToDelete: [deleted], + }); + }); + + it("can be told to delete all functions", () => { + const created = func("created", "region"); + const updated = func("updated", "region"); + const deleted = func("deleted", "region"); + deleted.labels = deploymentTool.labels(); + const pantheon = func("pantheon", "region"); + + const want = { created, updated }; + const have = { updated, deleted, pantheon }; + + // note: pantheon is deleted because we have deleteAll: true + expect(planner.calculateRegionalChanges(want, have, { deleteAll: true })).to.deep.equal({ + endpointsToCreate: [created], + endpointsToUpdate: [ + { + endpoint: updated, + }, + ], + endpointsToDelete: [deleted, pantheon], + }); + }); + }); + + describe("createDeploymentPlan", () => { + it("applies filters", () => { + const group1Created = func("g1-created", "region"); + const group1Updated = func("g1-updated", "region"); + const group1Deleted = func("g1-deleted", "region"); + + const group2Created = func("g2-created", "region"); + const group2Updated = func("g2-updated", "region"); + const group2Deleted = func("g2-deleted", "region"); + + group1Deleted.labels = deploymentTool.labels(); + group2Deleted.labels = deploymentTool.labels(); + + const want = backend.of(group1Updated, group1Created, group2Updated, group2Created); + const have = backend.of(group1Updated, group1Deleted, group2Updated, group2Deleted); + + expect(planner.createDeploymentPlan(want, have, { filters: [["g1"]] })).to.deep.equal({ + region: { + endpointsToCreate: [group1Created], + endpointsToUpdate: [ + { + endpoint: group1Updated, + }, + ], + endpointsToDelete: [group1Deleted], + }, + }); + }); + + it("nudges users towards concurrency settings when upgrading and not setting", () => { + const original: backend.Endpoint = func("id", "region"); + original.platform = "gcfv1"; + const upgraded: backend.Endpoint = { ...original }; + upgraded.platform = "gcfv2"; + + const have = backend.of(original); + const want = backend.of(upgraded); + + allowV2Upgrades(); + planner.createDeploymentPlan(want, have); + expect(logLabeledBullet).to.have.been.calledOnceWith( + "functions", + sinon.match(/change this with the 'concurrency' option/) + ); + }); + }); + + it("does not warn users about concurrency when inappropriate", () => { + allowV2Upgrades(); + // Concurrency isn't set but this isn't an upgrade operation, so there + // should be no warning + const v2Function: backend.Endpoint = { ...func("id", "region"), platform: "gcfv2" }; + + planner.createDeploymentPlan(backend.of(v2Function), backend.of(v2Function)); + expect(logLabeledBullet).to.not.have.been.called; + + const v1Function: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + planner.createDeploymentPlan(backend.of(v1Function), backend.of(v1Function)); + expect(logLabeledBullet).to.not.have.been.called; + + // Upgraded but specified concurrency + const concurrencyUpgraded: backend.Endpoint = { + ...v1Function, + platform: "gcfv2", + concurrency: 80, + }; + planner.createDeploymentPlan(backend.of(concurrencyUpgraded), backend.of(v1Function)); + expect(logLabeledBullet).to.not.have.been.called; + }); + + describe("checkForIllegalUpdate", () => { + // TODO: delete this test once GCF supports upgrading from v1 to v2 + it("prohibits upgrades from v1 to v2", () => { + const have: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + const want: backend.Endpoint = { ...func("id", "region"), platform: "gcfv2" }; + + expect(() => planner.checkForIllegalUpdate(want, have)).to.throw; + }); + + it("should throw if a https function would be changed into an event triggered function", () => { + const want = func("a", "b", { + eventTrigger: { + eventType: "google.pubsub.topic.publish", + eventFilters: {}, + retry: false, + }, + }); + const have = func("a", "b", { httpsTrigger: {} }); + + expect(() => planner.checkForIllegalUpdate(want, have)).to.throw(); + }); + + it("should throw if a event triggered function would be changed into an https function", () => { + const want = func("a", "b", { httpsTrigger: {} }); + const have = func("a", "b", { + eventTrigger: { + eventType: "google.pubsub.topic.publish", + eventFilters: {}, + retry: false, + }, + }); + + expect(() => planner.checkForIllegalUpdate(want, have)).to.throw(); + }); + + it("should throw if a scheduled trigger would change into an https function", () => { + const want = func("a", "b"); + const have = func("a", "b", { scheduleTrigger: {} }); + + expect(() => planner.checkForIllegalUpdate(want, have)).to.throw(); + }); + + it("should not throw if a event triggered function keeps the same trigger", () => { + const eventTrigger: backend.EventTrigger = { + eventType: "google.pubsub.topic.publish", + eventFilters: {}, + retry: false, + }; + const want = func("a", "b", { eventTrigger }); + + expect(() => planner.checkForIllegalUpdate(want, want)).not.to.throw(); + }); + + it("should not throw if a https function stays as a https function", () => { + const want = func("a", "b"); + const have = func("a", "b"); + + expect(() => planner.checkForIllegalUpdate(want, have)).not.to.throw(); + }); + + it("should not throw if a scheduled function stays as a scheduled function", () => { + const want = func("a", "b", { scheduleTrigger: {} }); + const have = func("a", "b", { scheduleTrigger: {} }); + + expect(() => planner.checkForIllegalUpdate(want, have)).not.to.throw(); + }); + + it("should throw if a user downgrades from v2 to v1", () => { + const want: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + const have: backend.Endpoint = { ...func("id", "region"), platform: "gcfv2" }; + + expect(() => planner.checkForIllegalUpdate(want, have)).to.throw(); + }); + }); + + it("detects changes to v2 pubsub topics", () => { + const eventTrigger: backend.EventTrigger = { + eventType: gcfv2.PUBSUB_PUBLISH_EVENT, + eventFilters: { + resource: "projects/p/topics/t", + }, + retry: false, + }; + + let want: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + let have: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + expect(planner.changedV2PubSubTopic(want, have)).to.be.false; + + want.platform = "gcfv2"; + expect(planner.changedV2PubSubTopic(want, have)).to.be.false; + + have.platform = "gcfv2"; + expect(planner.changedV2PubSubTopic(want, have)).to.be.false; + + want = { + ...func("id", "region", { eventTrigger }), + platform: "gcfv2", + }; + expect(planner.changedV2PubSubTopic(want, have)).to.be.false; + + have = { + ...func("id", "region", { eventTrigger }), + platform: "gcfv2", + }; + expect(planner.changedV2PubSubTopic(want, have)).to.be.false; + + // want has a shallow copy of eventTrigger, so we need to duplicate it + // to modify only 'want' + want = JSON.parse(JSON.stringify(want)) as backend.Endpoint; + if (backend.isEventTriggered(want)) { + want.eventTrigger.eventFilters.resource = "projects/p/topics/t2"; + } + expect(planner.changedV2PubSubTopic(want, have)).to.be.true; + }); + + it("detects upgrades to scheduled functions", () => { + const v1Https: backend.Endpoint = { ...func("id", "region"), platform: "gcfv1" }; + const v1Scheduled: backend.Endpoint = { + ...func("id", "region", { scheduleTrigger: {} }), + platform: "gcfv1", + }; + const v2Https: backend.Endpoint = { ...func("id", "region"), platform: "gcfv2" }; + const v2Scheduled: backend.Endpoint = { + ...func("id", "region", { scheduleTrigger: {} }), + platform: "gcfv2", + }; + + expect(planner.upgradedScheduleFromV1ToV2(v1Https, v1Https)).to.be.false; + expect(planner.upgradedScheduleFromV1ToV2(v2Https, v1Https)).to.be.false; + expect(planner.upgradedScheduleFromV1ToV2(v1Scheduled, v1Scheduled)).to.be.false; + expect(planner.upgradedScheduleFromV1ToV2(v2Scheduled, v2Scheduled)).to.be.false; + + // Invalid case but caught elsewhere + expect(planner.upgradedScheduleFromV1ToV2(v2Scheduled, v1Https)).to.be.false; + expect(planner.upgradedScheduleFromV1ToV2(v2Https, v1Scheduled)).to.be.false; + + expect(planner.upgradedScheduleFromV1ToV2(v2Scheduled, v1Scheduled)).to.be.true; + }); +}); diff --git a/src/test/deploy/functions/release/reporter.spec.ts b/src/test/deploy/functions/release/reporter.spec.ts new file mode 100644 index 00000000000..c87886e37a4 --- /dev/null +++ b/src/test/deploy/functions/release/reporter.spec.ts @@ -0,0 +1,358 @@ +import { expect } from "chai"; +import * as sinon from "sinon"; + +import { logger } from "../../../../logger"; +import * as backend from "../../../../deploy/functions/backend"; +import * as reporter from "../../../../deploy/functions/release/reporter"; +import * as track from "../../../../track"; + +const ENDPOINT_BASE: Omit = { + platform: "gcfv1", + id: "id", + region: "region", + project: "project", + entryPoint: "id", + runtime: "nodejs16", +}; +const ENDPOINT: backend.Endpoint = { ...ENDPOINT_BASE, httpsTrigger: {} }; + +describe("reporter", () => { + describe("triggerTag", () => { + it("detects v1.https", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + httpsTrigger: {}, + }) + ).to.equal("v1.https"); + }); + + it("detects v2.https", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + platform: "gcfv2", + httpsTrigger: {}, + }) + ).to.equal("v2.https"); + }); + + it("detects v1.callable", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + httpsTrigger: {}, + labels: { + "deployment-callable": "true", + }, + }) + ).to.equal("v1.callable"); + }); + + it("detects v2.callable", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + platform: "gcfv2", + httpsTrigger: {}, + labels: { + "deployment-callable": "true", + }, + }) + ).to.equal("v2.callable"); + }); + + it("detects v1.scheduled", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + scheduleTrigger: {}, + }) + ).to.equal("v1.scheduled"); + }); + + it("detects v2.scheduled", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + platform: "gcfv2", + scheduleTrigger: {}, + }) + ).to.equal("v2.scheduled"); + }); + + it("detects others", () => { + expect( + reporter.triggerTag({ + ...ENDPOINT_BASE, + platform: "gcfv2", + eventTrigger: { + eventType: "google.pubsub.topic.publish", + eventFilters: {}, + retry: false, + }, + }) + ).to.equal("google.pubsub.topic.publish"); + }); + }); + + describe("logAndTrackDeployStats", () => { + let trackStub: sinon.SinonStub; + let debugStub: sinon.SinonStub; + + beforeEach(() => { + trackStub = sinon.stub(track, "track"); + debugStub = sinon.stub(logger, "debug"); + }); + + afterEach(() => { + sinon.verifyAndRestore(); + }); + + it("tracks global summaries", async () => { + const summary: reporter.Summary = { + totalTime: 2_000, + results: [ + { + endpoint: ENDPOINT, + durationMs: 2_000, + }, + { + endpoint: ENDPOINT, + durationMs: 1_000, + error: new reporter.DeploymentError(ENDPOINT, "update", undefined), + }, + { + endpoint: ENDPOINT, + durationMs: 0, + error: new reporter.AbortedDeploymentError(ENDPOINT), + }, + ], + }; + + await reporter.logAndTrackDeployStats(summary); + + expect(trackStub).to.have.been.calledWith("function_deploy_success", "v1.https", 2_000); + expect(trackStub).to.have.been.calledWith("function_deploy_failure", "v1.https", 1_000); + // Aborts aren't tracked because they would throw off timing metrics + expect(trackStub).to.not.have.been.calledWith("function_deploy_failure", "v1.https", 0); + + expect(debugStub).to.have.been.calledWith("Total Function Deployment time: 2000"); + expect(debugStub).to.have.been.calledWith("3 Functions Deployed"); + expect(debugStub).to.have.been.calledWith("1 Functions Errored"); + expect(debugStub).to.have.been.calledWith("1 Function Deployments Aborted"); + + // The 0ms for an aborted function isn't counted. + expect(debugStub).to.have.been.calledWith("Average Function Deployment time: 1500"); + }); + + it("tracks v1 vs v2 codebases", async () => { + const v1 = { ...ENDPOINT }; + const v2: backend.Endpoint = { ...ENDPOINT, platform: "gcfv2" }; + + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: v1, + durationMs: 1_000, + }, + { + endpoint: v2, + durationMs: 1_000, + }, + ], + }; + + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_codebase_deploy", "v1+v2", 2); + trackStub.resetHistory(); + + summary.results = [{ endpoint: v1, durationMs: 1_000 }]; + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_codebase_deploy", "v1", 1); + trackStub.resetHistory(); + + summary.results = [{ endpoint: v2, durationMs: 1_000 }]; + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_codebase_deploy", "v2", 1); + }); + + it("tracks overall success/failure", async () => { + const success: reporter.DeployResult = { + endpoint: ENDPOINT, + durationMs: 1_000, + }; + const failure: reporter.DeployResult = { + endpoint: ENDPOINT, + durationMs: 1_000, + error: new reporter.DeploymentError(ENDPOINT, "create", undefined), + }; + + const summary: reporter.Summary = { + totalTime: 1_000, + results: [success, failure], + }; + + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_deploy_result", "partial_success", 1); + expect(trackStub).to.have.been.calledWith("functions_deploy_result", "partial_failure", 1); + expect(trackStub).to.have.been.calledWith( + "functions_deploy_result", + "partial_error_ratio", + 0.5 + ); + trackStub.resetHistory(); + + summary.results = [success]; + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_deploy_result", "success", 1); + trackStub.resetHistory(); + + summary.results = [failure]; + await reporter.logAndTrackDeployStats(summary); + expect(trackStub).to.have.been.calledWith("functions_deploy_result", "failure", 1); + }); + }); + + describe("printErrors", () => { + let infoStub: sinon.SinonStub; + + beforeEach(() => { + infoStub = sinon.stub(logger, "info"); + }); + + afterEach(() => { + sinon.verifyAndRestore(); + }); + + it("does nothing if there are no errors", () => { + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: ENDPOINT, + durationMs: 1_000, + }, + ], + }; + + reporter.printErrors(summary); + + expect(infoStub).to.not.have.been.called; + }); + + it("only prints summaries for non-aborted errors", () => { + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: { ...ENDPOINT, id: "failedCreate" }, + durationMs: 1_000, + error: new reporter.DeploymentError(ENDPOINT, "create", undefined), + }, + { + endpoint: { ...ENDPOINT, id: "abortedDelete" }, + durationMs: 0, + error: new reporter.AbortedDeploymentError(ENDPOINT), + }, + ], + }; + + reporter.printErrors(summary); + + // N.B. The lists of functions are printed in one call along with their header + // so that we know why a function label was printed (e.g. abortedDelete shouldn't + // show up in the main list of functions that had deployment errors but should show + // up in the list of functions that weren't deleted). To match these regexes we must + // pass the "s" modifier to regexes to make . capture newlines. + expect(infoStub).to.have.been.calledWithMatch(/Functions deploy had errors.*failedCreate/s); + expect(infoStub).to.not.have.been.calledWithMatch( + /Functions deploy had errors.*abortedDelete/s + ); + }); + + it("prints IAM errors", () => { + const explicit: backend.Endpoint = { + ...ENDPOINT, + httpsTrigger: { + invoker: ["public"], + }, + }; + + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: explicit, + durationMs: 1_000, + error: new reporter.DeploymentError(explicit, "set invoker", undefined), + }, + ], + }; + + reporter.printErrors(summary); + + expect(infoStub).to.have.been.calledWithMatch("Unable to set the invoker for the IAM policy"); + expect(infoStub).to.not.have.been.calledWithMatch( + "One or more functions were being implicitly made publicly available" + ); + + infoStub.resetHistory(); + // No longer explicitly setting invoker + summary.results[0].endpoint = ENDPOINT; + reporter.printErrors(summary); + + expect(infoStub).to.have.been.calledWithMatch("Unable to set the invoker for the IAM policy"); + expect(infoStub).to.have.been.calledWithMatch( + "One or more functions were being implicitly made publicly available" + ); + }); + + it("prints quota errors", () => { + const rawError = new Error("Quota exceeded"); + (rawError as any).status = 429; + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: ENDPOINT, + durationMs: 1_000, + error: new reporter.DeploymentError(ENDPOINT, "create", rawError), + }, + ], + }; + + reporter.printErrors(summary); + expect(infoStub).to.have.been.calledWithMatch( + "Exceeded maximum retries while deploying functions." + ); + }); + + it("prints aborted errors", () => { + const summary: reporter.Summary = { + totalTime: 1_000, + results: [ + { + endpoint: { ...ENDPOINT, id: "failedCreate" }, + durationMs: 1_000, + error: new reporter.DeploymentError(ENDPOINT, "create", undefined), + }, + { + endpoint: { ...ENDPOINT, id: "abortedDelete" }, + durationMs: 1_000, + error: new reporter.AbortedDeploymentError(ENDPOINT), + }, + ], + }; + + reporter.printErrors(summary); + expect(infoStub).to.have.been.calledWithMatch( + /the following functions were not deleted.*abortedDelete/s + ); + expect(infoStub).to.not.have.been.calledWith( + /the following functions were not deleted.*failedCreate/s + ); + }); + }); +}); diff --git a/src/test/deploy/functions/release/sourceTokenScraper.spec.ts b/src/test/deploy/functions/release/sourceTokenScraper.spec.ts new file mode 100644 index 00000000000..40927a0b25c --- /dev/null +++ b/src/test/deploy/functions/release/sourceTokenScraper.spec.ts @@ -0,0 +1,44 @@ +import { expect } from "chai"; + +import { SourceTokenScraper } from "../../../../deploy/functions/release/sourceTokenScraper"; + +describe("SourcTokenScraper", () => { + it("immediately provides the first result", async () => { + const scraper = new SourceTokenScraper(); + await expect(scraper.tokenPromise()).to.eventually.be.undefined; + }); + + it("provides results after the firt operation completes", async () => { + const scraper = new SourceTokenScraper(); + // First result comes right away; + await expect(scraper.tokenPromise()).to.eventually.be.undefined; + + let gotResult = false; + const timeout = new Promise((resolve, reject) => { + setTimeout(() => reject(new Error("Timeout")), 10); + }); + const getResult = (async () => { + await scraper.tokenPromise(); + gotResult = true; + })(); + await expect(Promise.race([getResult, timeout])).to.be.rejectedWith("Timeout"); + expect(gotResult).to.be.false; + + scraper.poller({ done: true }); + await expect(getResult).to.eventually.be.undefined; + }); + + it("provides tokens from an operation", async () => { + const scraper = new SourceTokenScraper(); + // First result comes right away + await expect(scraper.tokenPromise()).to.eventually.be.undefined; + + scraper.poller({ + metadata: { + sourceToken: "magic token", + target: "projects/p/locations/l/functions/f", + }, + }); + await expect(scraper.tokenPromise()).to.eventually.equal("magic token"); + }); +}); diff --git a/src/test/deploy/functions/runtimes/discovery/index.spec.ts b/src/test/deploy/functions/runtimes/discovery/index.spec.ts index 90b0b0aa0f0..9279f990a88 100644 --- a/src/test/deploy/functions/runtimes/discovery/index.spec.ts +++ b/src/test/deploy/functions/runtimes/discovery/index.spec.ts @@ -8,15 +8,15 @@ import { FirebaseError } from "../../../../../error"; import * as discovery from "../../../../../deploy/functions/runtimes/discovery"; import * as backend from "../../../../../deploy/functions/backend"; -const MIN_FUNCTION = { - platform: "gcfv1" as backend.FunctionsPlatform, - id: "function", +const MIN_ENDPOINT = { entryPoint: "entrypoint", - trigger: {}, + httpsTrigger: {}, }; -const FUNCTION: backend.FunctionSpec = { - ...MIN_FUNCTION, +const ENDPOINT: backend.Endpoint = { + ...MIN_ENDPOINT, + id: "id", + platform: "gcfv2", project: "project", region: api.functionsDefaultRegion, runtime: "nodejs16", @@ -24,16 +24,12 @@ const FUNCTION: backend.FunctionSpec = { const YAML_OBJ = { specVersion: "v1alpha1", - ...backend.empty(), - cloudFunctions: [MIN_FUNCTION], + endpoints: { id: MIN_ENDPOINT }, }; const YAML_TEXT = yaml.dump(YAML_OBJ); -const BACKEND: backend.Backend = { - ...backend.empty(), - cloudFunctions: [FUNCTION], -}; +const BACKEND: backend.Backend = backend.of(ENDPOINT); describe("yamlToBackend", () => { it("Accepts a valid v1alpha1 spec", () => { @@ -47,7 +43,7 @@ describe("yamlToBackend", () => { }); it("Requires a spec version", () => { - const flawed: any = { ...YAML_OBJ }; + const flawed: Record = { ...YAML_OBJ }; delete flawed.specVersion; expect(() => discovery.yamlToBackend(flawed, "project", api.functionsDefaultRegion, "nodejs16") diff --git a/src/test/deploy/functions/runtimes/discovery/v1alpha1.spec.ts b/src/test/deploy/functions/runtimes/discovery/v1alpha1.spec.ts index 2b862ccbb0b..4772edb229d 100644 --- a/src/test/deploy/functions/runtimes/discovery/v1alpha1.spec.ts +++ b/src/test/deploy/functions/runtimes/discovery/v1alpha1.spec.ts @@ -8,16 +8,13 @@ import * as v1alpha1 from "../../../../../deploy/functions/runtimes/discovery/v1 const PROJECT = "project"; const REGION = "region"; const RUNTIME: Runtime = "node14"; -const MIN_FUNC: Partial = { - platform: "gcfv1", - id: "id", +const MIN_ENDPOINT: Omit = { entryPoint: "entryPoint", - trigger: {}, }; describe("backendFromV1Alpha1", () => { describe("parser errors", () => { - function assertParserError(obj: any) { + function assertParserError(obj: unknown): void { expect(() => v1alpha1.backendFromV1Alpha1(obj, PROJECT, REGION, RUNTIME)).to.throw( FirebaseError ); @@ -30,15 +27,13 @@ describe("backendFromV1Alpha1", () => { const invalidBackendTypes = { requiredAPIS: ["cloudscheduler.googleapis.com"], - cloudFunctions: {}, - topics: {}, - schedules: {}, - environmentVariables: {}, + endpoints: [], }; for (const [key, value] of Object.entries(invalidBackendTypes)) { it(`throws on invalid value for top-level key ${key}`, () => { const obj = { - functions: [MIN_FUNC], + requiredAPIs: {}, + endpoints: {}, [key]: value, }; assertParserError(obj); @@ -50,30 +45,31 @@ describe("backendFromV1Alpha1", () => { }); }); // top level keys - describe("CloudFunction keys", () => { + describe("Endpoint keys", () => { it("invalid keys", () => { assertParserError({ - cloudFunctions: [ - { - ...MIN_FUNC, + endpoints: { + id: { + ...MIN_ENDPOINT, + httpsTrigger: {}, invalid: "key", }, - ], + }, }); }); - for (const key of Object.keys(MIN_FUNC)) { - it(`missing CloudFunction key ${key}`, () => { - const func = { ...MIN_FUNC } as Record; + for (const key of Object.keys(MIN_ENDPOINT)) { + it(`missing Endpoint key ${key}`, () => { + const func = { ...MIN_ENDPOINT, httpsTrigger: {} } as Record; delete func[key]; assertParserError({ cloudFunctions: [func] }); }); } const invalidFunctionEntries = { - apiVersion: "five", + platform: 2, id: 1, - region: ["us-central1"], + region: "us-central1", project: 42, runtime: null, entryPoint: 5, @@ -90,11 +86,12 @@ describe("backendFromV1Alpha1", () => { }; for (const [key, value] of Object.entries(invalidFunctionEntries)) { it(`invalid value for CloudFunction key ${key}`, () => { - const func = { - ...MIN_FUNC, + const endpoint = { + ...MIN_ENDPOINT, + httpsTrigger: {}, [key]: value, }; - assertParserError({ cloudFunctions: [func] }); + assertParserError({ endpoints: { endpoint } }); }); } }); // Top level function keys @@ -102,24 +99,19 @@ describe("backendFromV1Alpha1", () => { describe("Event triggers", () => { const validTrigger: backend.EventTrigger = { eventType: "google.pubsub.v1.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, + eventFilters: { resource: "projects/p/topics/t" }, retry: true, region: "global", serviceAccountEmail: "root@", }; for (const key of ["eventType", "eventFilters"]) { it(`missing event trigger key ${key}`, () => { - const trigger = { ...validTrigger } as any; - delete trigger[key]; + const eventTrigger = { ...validTrigger } as Record; + delete eventTrigger[key]; assertParserError({ - cloudFunctions: [ - { - ...MIN_FUNC, - trigger, - }, - ], + endpoints: { + func: { ...MIN_ENDPOINT, eventTrigger }, + }, }); }); } @@ -133,111 +125,222 @@ describe("backendFromV1Alpha1", () => { }; for (const [key, value] of Object.entries(invalidEntries)) { it(`invalid value for event trigger key ${key}`, () => { - const trigger = { + const eventTrigger = { ...validTrigger, [key]: value, }; assertParserError({ - cloudFunctions: [ - { - ...MIN_FUNC, - trigger, - }, - ], + endpoints: { + func: { ...MIN_ENDPOINT, eventTrigger }, + }, }); }); } }); // Event triggers + + describe("httpsTriggers", () => { + it("invalid value for https trigger key invoker", () => { + assertParserError({ + endpoints: { + func: { + ...MIN_ENDPOINT, + httpsTrigger: { invoker: 42 }, + }, + }, + }); + }); + }); + + describe("scheduleTriggers", () => { + const validTrigger: backend.ScheduleTrigger = { + schedule: "every 5 minutes", + timeZone: "America/Los_Angeles", + retryConfig: { + retryCount: 42, + minBackoffDuration: "1s", + maxBackoffDuration: "20s", + maxDoublings: 20, + maxRetryDuration: "120s", + }, + }; + + const invalidEntries = { + schedule: 46, + timeZone: {}, + }; + for (const [key, value] of Object.entries(invalidEntries)) { + it(`invalid value for schedule trigger key ${key}`, () => { + const scheduleTrigger = { + ...validTrigger, + [key]: value, + }; + assertParserError({ + endpoints: { + func: { ...MIN_ENDPOINT, scheduleTrigger }, + }, + }); + }); + } + + const invalidRetryEntries = { + retryCount: "42", + minBackoffDuration: 1, + maxBackoffDuration: 20, + maxDoublings: "20", + maxRetryDuration: 120, + }; + for (const [key, value] of Object.entries(invalidRetryEntries)) { + const retryConfig = { + ...validTrigger.retryConfig, + [key]: value, + }; + const scheduleTrigger = { ...validTrigger, retryConfig }; + assertParserError({ + endpoints: { + func: { ...MIN_ENDPOINT, scheduleTrigger }, + }, + }); + } + }); + + it("detects missing triggers", () => { + assertParserError({ endpoints: MIN_ENDPOINT }); + }); }); // Parser errors; describe("allows valid backends", () => { - const DEFAULTED_FUNC = { - ...MIN_FUNC, - project: PROJECT, - region: REGION, - runtime: RUNTIME, - } as backend.FunctionSpec; - - const TARGET_SERVICE = { + const DEFAULTED_ENDPOINT: Omit = { + ...MIN_ENDPOINT, + platform: "gcfv2", id: "id", project: PROJECT, region: REGION, + runtime: RUNTIME, }; it("fills default backend and function fields", () => { - const yaml = { - cloudFunctions: [ - { - ...MIN_FUNC, - trigger: {}, + const yaml: v1alpha1.Manifest = { + specVersion: "v1alpha1", + endpoints: { + id: { + ...MIN_ENDPOINT, + httpsTrigger: {}, }, - ], + }, }; const parsed = v1alpha1.backendFromV1Alpha1(yaml, PROJECT, REGION, RUNTIME); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...DEFAULTED_FUNC, - trigger: {}, + const expected: backend.Backend = backend.of({ ...DEFAULTED_ENDPOINT, httpsTrigger: {} }); + expect(parsed).to.deep.equal(expected); + }); + + it("copies schedules", () => { + const scheduleTrigger: backend.ScheduleTrigger = { + schedule: "every 5 minutes", + timeZone: "America/Los_Angeles", + retryConfig: { + retryCount: 20, + minBackoffDuration: "1s", + maxBackoffDuration: "20s", + maxRetryDuration: "120s", + maxDoublings: 10, + }, + }; + + const yaml: v1alpha1.Manifest = { + specVersion: "v1alpha1", + endpoints: { + id: { + ...MIN_ENDPOINT, + scheduleTrigger, }, - ], + }, }; + const expected = backend.of({ ...DEFAULTED_ENDPOINT, scheduleTrigger }); + const parsed = v1alpha1.backendFromV1Alpha1(yaml, PROJECT, REGION, RUNTIME); expect(parsed).to.deep.equal(expected); }); - it("fills defaults for pub/sub and schedules", () => { - const yaml = { - cloudFunctions: [ - { - ...MIN_FUNC, - trigger: {}, - }, - ], - topics: [ - { - id: "topic", - targetService: { - id: "id", - }, - }, - ], - schedules: [ - { - id: "schedule", - schedule: "every 5 minutes", - transport: "https", - targetService: { - id: "id", - }, - }, - ], + it("copies event triggers", () => { + const eventTrigger: backend.EventTrigger = { + eventType: "google.pubsub.topic.v1.publish", + eventFilters: { + resource: "projects/project/topics/topic", + }, + region: "us-central1", + serviceAccountEmail: "sa@", + retry: true, }; - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...DEFAULTED_FUNC, - trigger: {}, + const yaml: v1alpha1.Manifest = { + specVersion: "v1alpha1", + endpoints: { + id: { + ...MIN_ENDPOINT, + eventTrigger, }, - ], - topics: [ - { - id: "topic", - project: PROJECT, - targetService: TARGET_SERVICE, + }, + }; + const expected = backend.of({ ...DEFAULTED_ENDPOINT, eventTrigger }); + const parsed = v1alpha1.backendFromV1Alpha1(yaml, PROJECT, REGION, RUNTIME); + expect(parsed).to.deep.equal(expected); + }); + + it("copies optional fields", () => { + const fields: backend.ServiceConfiguration = { + concurrency: 42, + labels: { hello: "world" }, + environmentVariables: { foo: "bar" }, + availableMemoryMb: 256, + timeout: "60s", + maxInstances: 20, + minInstances: 1, + vpcConnector: "hello", + vpcConnectorEgressSettings: "ALL_TRAFFIC", + ingressSettings: "ALLOW_INTERNAL_ONLY", + serviceAccountEmail: "sa@", + }; + + const yaml: v1alpha1.Manifest = { + specVersion: "v1alpha1", + endpoints: { + id: { + ...MIN_ENDPOINT, + httpsTrigger: {}, + ...fields, }, - ], - schedules: [ - { - id: "schedule", - project: PROJECT, - schedule: "every 5 minutes", - transport: "https", - targetService: TARGET_SERVICE, + }, + }; + const expected = backend.of({ + ...DEFAULTED_ENDPOINT, + httpsTrigger: {}, + ...fields, + }); + const parsed = v1alpha1.backendFromV1Alpha1(yaml, PROJECT, REGION, RUNTIME); + expect(parsed).to.deep.equal(expected); + }); + + it("handles multiple regions", () => { + const yaml: v1alpha1.Manifest = { + specVersion: "v1alpha1", + endpoints: { + id: { + ...MIN_ENDPOINT, + httpsTrigger: {}, + region: ["region1", "region2"], }, - ], + }, }; + const expected = backend.of( + { + ...DEFAULTED_ENDPOINT, + httpsTrigger: {}, + region: "region1", + }, + { + ...DEFAULTED_ENDPOINT, + httpsTrigger: {}, + region: "region2", + } + ); const parsed = v1alpha1.backendFromV1Alpha1(yaml, PROJECT, REGION, RUNTIME); expect(parsed).to.deep.equal(expected); }); diff --git a/src/test/deploy/functions/runtimes/node/parseTriggers.spec.ts b/src/test/deploy/functions/runtimes/node/parseTriggers.spec.ts index 950100e46ab..c96179e2bb8 100644 --- a/src/test/deploy/functions/runtimes/node/parseTriggers.spec.ts +++ b/src/test/deploy/functions/runtimes/node/parseTriggers.spec.ts @@ -26,7 +26,7 @@ describe("addResourcesToBackend", () => { project: "project", }); - const BASIC_FUNCTION: Omit = Object.freeze({ + const BASIC_ENDPOINT: Omit = Object.freeze({ platform: "gcfv1", ...BASIC_FUNCTION_NAME, runtime: "nodejs16", @@ -61,15 +61,7 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: {}, - }, - ], - }; + const expected: backend.Backend = backend.of({ ...BASIC_ENDPOINT, httpsTrigger: {} }); expect(result).to.deep.equal(expected); }); @@ -93,21 +85,14 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/project/topics/topic", - }, - retry: !!failurePolicy, - }, - }, - ], + const eventTrigger: backend.EventTrigger = { + eventType: "google.pubsub.topic.publish", + eventFilters: { + resource: "projects/project/topics/topic", + }, + retry: !!failurePolicy, }; + const expected: backend.Backend = backend.of({ ...BASIC_ENDPOINT, eventTrigger }); expect(result).to.deep.equal(expected); }); } @@ -134,27 +119,25 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: { - invoker: ["public"], - }, - maxInstances: 42, - minInstances: 1, - serviceAccountEmail: "inlined@google.com", - vpcConnectorEgressSettings: "PRIVATE_RANGES_ONLY", - vpcConnector: "projects/project/locations/region/connectors/connector", - ingressSettings: "ALLOW_ALL", - timeout: "60s", - labels: { - test: "testing", - }, - }, - ], + const config: backend.ServiceConfiguration = { + maxInstances: 42, + minInstances: 1, + serviceAccountEmail: "inlined@google.com", + vpcConnectorEgressSettings: "PRIVATE_RANGES_ONLY", + vpcConnector: "projects/project/locations/region/connectors/connector", + ingressSettings: "ALLOW_ALL", + timeout: "60s", + labels: { + test: "testing", + }, }; + const expected: backend.Backend = backend.of({ + ...BASIC_ENDPOINT, + httpsTrigger: { + invoker: ["public"], + }, + ...config, + }); expect(result).to.deep.equal(expected); }); @@ -171,21 +154,15 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: false, - }, - }, - ], + const eventTrigger: backend.EventTrigger = { + eventType: "google.pubsub.topic.publish", + eventFilters: { + resource: "projects/p/topics/t", + }, + retry: false, }; + + const expected: backend.Backend = backend.of({ ...BASIC_ENDPOINT, eventTrigger }); expect(result).to.deep.equal(expected); }); @@ -199,16 +176,11 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: {}, - region: "europe-west1", - }, - ], - }; + const expected: backend.Backend = backend.of({ + ...BASIC_ENDPOINT, + region: "europe-west1", + httpsTrigger: {}, + }); expect(result).to.deep.equal(expected); }); @@ -222,24 +194,19 @@ describe("addResourcesToBackend", () => { const result = backend.empty(); parseTriggers.addResourcesToBackend("project", "nodejs16", trigger, result); - const expected: backend.Backend = { - ...backend.empty(), - cloudFunctions: [ - { - ...BASIC_FUNCTION, - trigger: {}, - region: "us-central1", - }, - { - ...BASIC_FUNCTION, - trigger: {}, - region: "europe-west1", - }, - ], - }; + const expected: backend.Backend = backend.of( + { + ...BASIC_ENDPOINT, + httpsTrigger: {}, + region: "us-central1", + }, + { + ...BASIC_ENDPOINT, + httpsTrigger: {}, + region: "europe-west1", + } + ); - result.cloudFunctions = result.cloudFunctions.sort(); - expected.cloudFunctions = expected.cloudFunctions.sort(); expect(result).to.deep.equal(expected); }); @@ -257,7 +224,11 @@ describe("addResourcesToBackend", () => { }; const trigger: parseTriggers.TriggerAnnotation = { ...BASIC_TRIGGER, - httpsTrigger: {}, + eventTrigger: { + eventType: "google.pubsub.topic.publish", + resource: "projects/project/topics", + service: "pubsub.googleapis.com", + }, regions: ["us-central1", "europe-west1"], schedule, labels: { @@ -273,69 +244,31 @@ describe("addResourcesToBackend", () => { region: "europe-west1", }; - const usFunction = { - ...BASIC_FUNCTION, - trigger: {}, - labels: { - "deployment-scheduled": "true", - test: "testing", - }, - region: "us-central1", - }; - const europeFunction = { - ...BASIC_FUNCTION, - ...europeFunctionName, - trigger: {}, - labels: { - "deployment-scheduled": "true", - test: "testing", - }, - }; const expected: backend.Backend = { - ...backend.empty(), + ...backend.of( + { + ...BASIC_ENDPOINT, + region: "us-central1", + labels: { + test: "testing", + }, + scheduleTrigger: schedule, + }, + { + ...BASIC_ENDPOINT, + region: "europe-west1", + labels: { + test: "testing", + }, + scheduleTrigger: schedule, + } + ), requiredAPIs: { pubsub: "pubsub.googleapis.com", scheduler: "cloudscheduler.googleapis.com", }, - cloudFunctions: [usFunction, europeFunction], - topics: [ - { - id: "firebase-schedule-func-us-central1", - project: "project", - labels: backend.SCHEDULED_FUNCTION_LABEL, - targetService: BASIC_FUNCTION_NAME, - }, - { - id: "firebase-schedule-func-europe-west1", - project: "project", - labels: backend.SCHEDULED_FUNCTION_LABEL, - targetService: europeFunctionName, - }, - ], - schedules: [ - { - id: "firebase-schedule-func-us-central1", - project: "project", - ...schedule, - transport: "pubsub", - targetService: BASIC_FUNCTION_NAME, - }, - { - id: "firebase-schedule-func-europe-west1", - project: "project", - ...schedule, - transport: "pubsub", - targetService: europeFunctionName, - }, - ], }; - result.cloudFunctions = result.cloudFunctions.sort(); - result.schedules = result.schedules.sort(); - result.topics = result.topics.sort(); - expected.cloudFunctions = expected.cloudFunctions.sort(); - expected.schedules = expected.schedules.sort(); - expected.topics = expected.topics.sort(); expect(result).to.deep.equal(expected); }); }); diff --git a/src/test/deploy/functions/tasks.spec.ts b/src/test/deploy/functions/tasks.spec.ts deleted file mode 100644 index 40ce800023d..00000000000 --- a/src/test/deploy/functions/tasks.spec.ts +++ /dev/null @@ -1,228 +0,0 @@ -import { expect } from "chai"; -import * as sinon from "sinon"; - -import { DeploymentTimer } from "../../../deploy/functions/deploymentTimer"; -import { ErrorHandler } from "../../../deploy/functions/errorHandler"; -import { FirebaseError } from "../../../error"; -import * as backend from "../../../deploy/functions/backend"; -import * as tasks from "../../../deploy/functions/tasks"; -import * as track from "../../../track"; - -describe("Function Deployment tasks", () => { - const CLOUD_FUNCTION: backend.FunctionSpec = { - platform: "gcfv1", - id: "id", - region: "region", - project: "project", - entryPoint: "function", - runtime: "nodejs16", - trigger: {}, - }; - - const SCHEDULE: backend.ScheduleSpec = { - id: "id", - project: "project", - transport: "pubsub", - targetService: { - id: "id", - region: "region", - project: "project", - }, - }; - - describe("functionsDeploymentHandler", () => { - let sandbox: sinon.SinonSandbox; - let timerStub: sinon.SinonStubbedInstance; - let errorHandlerStub: sinon.SinonStubbedInstance; - let trackStub: sinon.SinonStub; - - beforeEach(() => { - sandbox = sinon.createSandbox(); - timerStub = sandbox.createStubInstance(DeploymentTimer); - errorHandlerStub = sandbox.createStubInstance(ErrorHandler); - trackStub = sinon.stub(track, "track"); - }); - - afterEach(() => { - sandbox.restore(); - trackStub.restore(); - }); - - it("should execute the task and time it", async () => { - const duration = (Math.random() * 10) ^ 6; - timerStub.endTimer.returns(duration); - - const run = sinon.spy(); - const functionName = backend.functionName(CLOUD_FUNCTION); - const testTask: tasks.DeploymentTask = { - run, - data: CLOUD_FUNCTION, - operationType: "create", - }; - - const handler = tasks.functionsDeploymentHandler(timerStub, errorHandlerStub); - await handler(testTask); - - expect(timerStub.startTimer).to.have.been.calledWith(functionName); - expect(run).to.have.been.called; - expect(timerStub.endTimer).to.have.been.calledWith(functionName); - expect(errorHandlerStub.record).not.to.have.been.called; - expect(trackStub).to.have.been.calledWith("function_deploy_success", "v1.https", duration); - }); - - it("should throw quota errors", async () => { - const originalError = { - name: "Quota Exceeded", - message: "an error occurred", - context: { - response: { - statusCode: 429, - }, - }, - }; - const run = sinon.spy(() => { - throw new FirebaseError("an error occurred", { - original: originalError, - }); - }); - const testTask: tasks.DeploymentTask = { - run, - data: CLOUD_FUNCTION, - operationType: "create", - }; - - const handler = tasks.functionsDeploymentHandler(timerStub, errorHandlerStub); - - await expect(handler(testTask)).to.eventually.be.rejected; - - expect(run).to.have.been.called; - expect(errorHandlerStub.record).not.to.have.been.called; - expect(trackStub).to.not.have.been.called; - }); - - it("should handle other errors", async () => { - const duration = (Math.random() * 10) ^ 6; - timerStub.endTimer.returns(duration); - - const originalError = { - name: "Some Other Error", - message: "an error occurred", - context: { - response: { - statusCode: 500, - }, - }, - }; - const run = sinon.spy(() => { - throw new FirebaseError("an error occurred", { - original: originalError, - }); - }); - const functionName = backend.functionName(CLOUD_FUNCTION); - const testTask: tasks.DeploymentTask = { - run, - data: CLOUD_FUNCTION, - operationType: "create", - }; - - const handler = tasks.functionsDeploymentHandler(timerStub, errorHandlerStub); - await handler(testTask); - - expect(timerStub.startTimer).to.have.been.calledWith(functionName); - expect(run).to.have.been.called; - expect(timerStub.endTimer).to.have.been.calledWith(functionName); - expect(errorHandlerStub.record).to.have.been.calledWith("error", functionName, "create"); - expect(trackStub).to.have.been.calledWith("function_deploy_failure", "v1.https", duration); - }); - }); - - describe("schedulerDeploymentHandler", () => { - const sandbox = sinon.createSandbox(); - let errorHandlerStub: sinon.SinonStubbedInstance; - - beforeEach(() => { - errorHandlerStub = sandbox.createStubInstance(ErrorHandler); - }); - - afterEach(() => { - sandbox.restore(); - }); - - it("should execute the task", async () => { - const run = sinon.spy(); - const testTask: tasks.DeploymentTask = { - run, - data: SCHEDULE, - operationType: "upsert schedule", - }; - - const handler = tasks.schedulerDeploymentHandler(errorHandlerStub); - await handler(testTask); - - expect(run).to.have.been.called; - expect(errorHandlerStub.record).not.to.have.been.called; - }); - - it("should throw quota errors", async () => { - const run = sinon.spy(() => { - throw new FirebaseError("an error occurred", { - status: 429, - }); - }); - const testTask: tasks.DeploymentTask = { - run, - data: SCHEDULE, - operationType: "upsert schedule", - }; - - const handler = tasks.schedulerDeploymentHandler(errorHandlerStub); - await expect(handler(testTask)).to.eventually.be.rejected; - - expect(run).to.have.been.called; - expect(errorHandlerStub.record).not.to.have.been.called; - }); - - it("should ignore 404 errors", async () => { - const run = sinon.spy(() => { - throw new FirebaseError("an error occurred", { - status: 404, - }); - }); - const testTask: tasks.DeploymentTask = { - run, - data: SCHEDULE, - operationType: "upsert schedule", - }; - - const handler = tasks.schedulerDeploymentHandler(errorHandlerStub); - await handler(testTask); - - expect(run).to.have.been.called; - expect(errorHandlerStub.record).not.to.have.been.called; - }); - - it("should handle other errors", async () => { - const run = sinon.spy(() => { - throw new FirebaseError("an error occurred", { - status: 500, - }); - }); - const functionName = backend.functionName(CLOUD_FUNCTION); - const testTask: tasks.DeploymentTask = { - run, - data: SCHEDULE, - operationType: "upsert schedule", - }; - - const handler = tasks.schedulerDeploymentHandler(errorHandlerStub); - await handler(testTask); - - expect(run).to.have.been.called; - expect(errorHandlerStub.record).to.have.been.calledWith( - "error", - functionName, - "upsert schedule" - ); - }); - }); -}); diff --git a/src/test/deploy/functions/triggerRegionHelper.spec.ts b/src/test/deploy/functions/triggerRegionHelper.spec.ts index 59d07cecd00..4d01c8ce127 100644 --- a/src/test/deploy/functions/triggerRegionHelper.spec.ts +++ b/src/test/deploy/functions/triggerRegionHelper.spec.ts @@ -1,6 +1,7 @@ import { expect } from "chai"; import * as sinon from "sinon"; -import { FunctionSpec } from "../../../deploy/functions/backend"; + +import * as backend from "../../../deploy/functions/backend"; import * as storage from "../../../gcp/storage"; import * as triggerRegionHelper from "../../../deploy/functions/triggerRegionHelper"; @@ -23,11 +24,11 @@ describe("TriggerRegionHelper", () => { }); it("should throw an error if we can't find the bucket region", async () => { - const fn: FunctionSpec = { + const ep: backend.Endpoint = { id: "fn", entryPoint: "fnn", platform: "gcfv2", - trigger: { + eventTrigger: { eventType: "google.cloud.storage.object.v1.finalized", eventFilters: { bucket: "my-bucket", @@ -37,89 +38,52 @@ describe("TriggerRegionHelper", () => { ...SPEC, }; - await expect(triggerRegionHelper.setTriggerRegion([fn], [])).to.be.rejectedWith( - "Can't find the storage bucket region" - ); + await expect( + triggerRegionHelper.lookupMissingTriggerRegions(backend.of(ep)) + ).to.be.rejectedWith("Can't find the storage bucket region"); }); it("should skip v1 and callable functions", async () => { - const v1EventFn: FunctionSpec = { + const v1EventFn: backend.Endpoint = { id: "v1eventfn", entryPoint: "v1Fn", platform: "gcfv1", - trigger: { - eventType: "google.cloud.audit.log.v1.written", - eventFilters: {}, - retry: false, - }, - ...SPEC, - }; - const v2CallableFn: FunctionSpec = { - id: "v2callablefn", - entryPoint: "v2callablefn", - platform: "gcfv2", - trigger: {}, - ...SPEC, - }; - - await triggerRegionHelper.setTriggerRegion([v1EventFn, v2CallableFn], []); - - expect(v1EventFn.trigger).to.deep.eq({ - eventType: "google.cloud.audit.log.v1.written", - eventFilters: {}, - retry: false, - }); - expect(v2CallableFn.trigger).to.deep.eq({}); - }); - - it("should match trigger region from have functions", async () => { - const wantFn: FunctionSpec = { - id: "fn", - entryPoint: "fn", - platform: "gcfv2", - trigger: { - eventType: "google.cloud.storage.object.v1.finalized", + eventTrigger: { + eventType: "google.storage.object.create", eventFilters: { - bucket: "my-bucket", + resource: "projects/_/buckets/myBucket", }, retry: false, }, ...SPEC, }; - const haveFn: FunctionSpec = { - id: "fn", - entryPoint: "fn", + const v2CallableFn: backend.Endpoint = { + id: "v2callablefn", + entryPoint: "v2callablefn", platform: "gcfv2", - trigger: { - eventType: "google.cloud.storage.object.v1.finalized", - eventFilters: { - bucket: "my-bucket", - }, - retry: false, - region: "us", - }, + httpsTrigger: {}, ...SPEC, }; - await triggerRegionHelper.setTriggerRegion([wantFn], [haveFn]); + await triggerRegionHelper.lookupMissingTriggerRegions(backend.of(v1EventFn, v2CallableFn)); - expect(wantFn.trigger).to.deep.eq({ - eventType: "google.cloud.storage.object.v1.finalized", + expect(v1EventFn.eventTrigger).to.deep.eq({ + eventType: "google.storage.object.create", eventFilters: { - bucket: "my-bucket", + resource: "projects/_/buckets/myBucket", }, retry: false, - region: "us", }); + expect(v2CallableFn.httpsTrigger).to.deep.eq({}); }); it("should set trigger region from API", async () => { storageStub.resolves({ location: "US" }); - const wantFn: FunctionSpec = { + const wantFn: backend.Endpoint = { id: "wantFn", entryPoint: "wantFn", platform: "gcfv2", - trigger: { + eventTrigger: { eventType: "google.cloud.storage.object.v1.finalized", eventFilters: { bucket: "my-bucket", @@ -129,9 +93,9 @@ describe("TriggerRegionHelper", () => { ...SPEC, }; - await triggerRegionHelper.setTriggerRegion([wantFn], []); + await triggerRegionHelper.lookupMissingTriggerRegions(backend.of(wantFn)); - expect(wantFn.trigger).to.deep.eq({ + expect(wantFn.eventTrigger).to.deep.eq({ eventType: "google.cloud.storage.object.v1.finalized", eventFilters: { bucket: "my-bucket", diff --git a/src/test/deploy/functions/validate.spec.ts b/src/test/deploy/functions/validate.spec.ts index d1a3689871a..5a2d3e6a1b7 100644 --- a/src/test/deploy/functions/validate.spec.ts +++ b/src/test/deploy/functions/validate.spec.ts @@ -2,16 +2,10 @@ import { expect } from "chai"; import * as sinon from "sinon"; import { FirebaseError } from "../../../error"; -import { RUNTIME_NOT_SET } from "../../../deploy/functions/runtimes/node/parseRuntimeAndValidateSDK"; -import { FunctionSpec } from "../../../deploy/functions/backend"; import * as fsutils from "../../../fsutils"; import * as validate from "../../../deploy/functions/validate"; import * as projectPath from "../../../projectPath"; -// have to require this because no @types/cjson available -// eslint-disable-next-line -const cjson = require("cjson"); - describe("validate", () => { describe("functionsDirectoryExists", () => { const sandbox: sinon.SinonSandbox = sinon.createSandbox(); @@ -119,88 +113,4 @@ describe("validate", () => { }).to.throw(FirebaseError); }); }); - - describe("checkForInvalidChangeOfTrigger", () => { - const CLOUD_FUNCTION: Omit = { - platform: "gcfv1", - id: "my-func", - region: "us-central1", - project: "project", - runtime: "nodejs16", - entryPoint: "function", - }; - it("should throw if a https function would be changed into an event triggered function", () => { - const fn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: {}, - retry: false, - }, - }; - const exFn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger: {}, - }; - - expect(() => { - validate.checkForInvalidChangeOfTrigger(fn, exFn); - }).to.throw(); - }); - - it("should throw if a event triggered function would be changed into an https function", () => { - const fn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger: {}, - }; - const exFn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: {}, - retry: false, - }, - }; - - expect(() => { - validate.checkForInvalidChangeOfTrigger(fn, exFn); - }).to.throw(); - }); - - it("should not throw if a event triggered function keeps the same trigger", () => { - const trigger = { - eventType: "google.pubsub.topic.publish", - eventFilters: {}, - retry: false, - }; - const fn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger, - }; - const exFn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger, - }; - - expect(() => { - validate.checkForInvalidChangeOfTrigger(fn, exFn); - }).not.to.throw(); - }); - - it("should not throw if a https function stays as a https function", () => { - const trigger = {}; - const fn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger, - }; - const exFn: FunctionSpec = { - ...CLOUD_FUNCTION, - trigger, - }; - - expect(() => { - validate.checkForInvalidChangeOfTrigger(fn, exFn); - }).not.to.throw(); - }); - }); }); diff --git a/src/test/functions/listFunctions.spec.ts b/src/test/functions/listFunctions.spec.ts deleted file mode 100644 index 89795ee76fd..00000000000 --- a/src/test/functions/listFunctions.spec.ts +++ /dev/null @@ -1,160 +0,0 @@ -import { expect } from "chai"; -import * as sinon from "sinon"; -import { listFunctions } from "../../functions/listFunctions"; -import * as backend from "../../deploy/functions/backend"; -import * as args from "../../deploy/functions/args"; -import { previews } from "../../previews"; - -describe("listFunctions", () => { - let sandbox: sinon.SinonSandbox; - let backendStub: sinon.SinonStub; - - beforeEach(() => { - sandbox = sinon.createSandbox(); - backendStub = sandbox.stub(backend, "existingBackend"); - previews.functionsv2 = false; - }); - - afterEach(() => { - sandbox.restore(); - }); - - it("should return an empty array on empty function spec list", async () => { - backendStub.returns(Promise.resolve({ cloudFunctions: [] })); - - const result = await listFunctions({ projectId: "project" } as args.Context); - - expect(result).to.deep.equal({ functions: [] }); - }); - - it("should return the v1 functions in order", async () => { - backendStub.returns( - Promise.resolve({ - cloudFunctions: [ - { - id: "fn2", - entryPoint: "fn2", - trigger: { - eventType: "providers/firebase.database/eventTypes/ref.create", - }, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn1", - entryPoint: "fn1", - trigger: {}, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - ], - }) - ); - - const result = await listFunctions({ projectId: "project" } as args.Context); - - expect(result).to.deep.equal({ - functions: [ - { - id: "fn1", - entryPoint: "fn1", - trigger: {}, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn2", - entryPoint: "fn2", - trigger: { - eventType: "providers/firebase.database/eventTypes/ref.create", - }, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - ], - }); - }); - - it("should return the v1&v2 functions in order", async () => { - previews.functionsv2 = true; - backendStub.returns( - Promise.resolve({ - cloudFunctions: [ - { - id: "fn2", - entryPoint: "fn2", - trigger: { - eventType: "providers/firebase.database/eventTypes/ref.create", - }, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn3", - entryPoint: "fn3", - trigger: {}, - platform: "gcfv2", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn1", - entryPoint: "fn1", - trigger: {}, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - ], - }) - ); - - const result = await listFunctions({ projectId: "project" } as args.Context); - - expect(result).to.deep.equal({ - functions: [ - { - id: "fn3", - entryPoint: "fn3", - trigger: {}, - platform: "gcfv2", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn1", - entryPoint: "fn1", - trigger: {}, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - { - id: "fn2", - entryPoint: "fn2", - trigger: { - eventType: "providers/firebase.database/eventTypes/ref.create", - }, - platform: "gcfv1", - region: "us-west1", - availableMemoryMb: "256", - runtime: "nodejs14", - }, - ], - }); - }); -}); diff --git a/src/test/gcp/cloudfunctions.spec.ts b/src/test/gcp/cloudfunctions.spec.ts index 7b0c02dbc0b..be0c9269028 100644 --- a/src/test/gcp/cloudfunctions.spec.ts +++ b/src/test/gcp/cloudfunctions.spec.ts @@ -12,14 +12,6 @@ describe("cloudfunctions", () => { project: "project", }; - const FUNCTION_SPEC: backend.FunctionSpec = { - platform: "gcfv1", - ...FUNCTION_NAME, - trigger: {}, - entryPoint: "function", - runtime: "nodejs16", - }; - // Omit a random trigger to make this compile const ENDPOINT: Omit = { platform: "gcfv1", @@ -267,219 +259,6 @@ describe("cloudfunctions", () => { }); }); - describe("functionFromSpec", () => { - const UPLOAD_URL = "https://storage.googleapis.com/projects/-/buckets/sample/source.zip"; - it("should guard against version mixing", () => { - expect(() => { - cloudfunctions.functionFromSpec({ ...FUNCTION_SPEC, platform: "gcfv2" }, UPLOAD_URL); - }).to.throw; - }); - - it("should copy a minimal function", () => { - expect(cloudfunctions.functionFromSpec(FUNCTION_SPEC, UPLOAD_URL)).to.deep.equal({ - ...CLOUD_FUNCTION, - sourceUploadUrl: UPLOAD_URL, - httpsTrigger: {}, - }); - - const eventFunction = { - ...FUNCTION_SPEC, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: false, - }, - }; - const eventGcfFunction = { - ...CLOUD_FUNCTION, - sourceUploadUrl: UPLOAD_URL, - eventTrigger: { - eventType: "google.pubsub.topic.publish", - resource: "projects/p/topics/t", - failurePolicy: undefined, - }, - }; - expect(cloudfunctions.functionFromSpec(eventFunction, UPLOAD_URL)).to.deep.equal( - eventGcfFunction - ); - }); - - it("should copy trival fields", () => { - const fullFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, - availableMemoryMb: 128, - minInstances: 1, - maxInstances: 42, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - timeout: "15s", - serviceAccountEmail: "inlined@google.com", - labels: { - foo: "bar", - }, - environmentVariables: { - FOO: "bar", - }, - }; - - const fullGcfFunction: Omit = { - ...CLOUD_FUNCTION, - sourceUploadUrl: UPLOAD_URL, - httpsTrigger: {}, - labels: { - foo: "bar", - }, - environmentVariables: { - FOO: "bar", - }, - maxInstances: 42, - minInstances: 1, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - availableMemoryMb: 128, - timeout: "15s", - serviceAccountEmail: "inlined@google.com", - }; - - expect(cloudfunctions.functionFromSpec(fullFunction, UPLOAD_URL)).to.deep.equal( - fullGcfFunction - ); - }); - - it("should calculate non-trivial fields", () => { - const complexFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: true, - }, - }; - - const complexGcfFunction: Omit< - cloudfunctions.CloudFunction, - cloudfunctions.OutputOnlyFields - > = { - ...CLOUD_FUNCTION, - sourceUploadUrl: UPLOAD_URL, - eventTrigger: { - eventType: "google.pubsub.topic.publish", - resource: "projects/p/topics/t", - failurePolicy: { - retry: {}, - }, - }, - }; - - expect(cloudfunctions.functionFromSpec(complexFunction, UPLOAD_URL)).to.deep.equal( - complexGcfFunction - ); - }); - }); - - describe("specFromFunction", () => { - it("should copy a minimal version", () => { - expect( - cloudfunctions.specFromFunction({ - ...HAVE_CLOUD_FUNCTION, - httpsTrigger: {}, - }) - ).to.deep.equal(FUNCTION_SPEC); - }); - - it("should translate event triggers", () => { - expect( - cloudfunctions.specFromFunction({ - ...HAVE_CLOUD_FUNCTION, - eventTrigger: { - eventType: "google.pubsub.topic.publish", - resource: "projects/p/topics/t", - failurePolicy: { - retry: {}, - }, - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: true, - }, - }); - - // And again w/o the failure policy - expect( - cloudfunctions.specFromFunction({ - ...HAVE_CLOUD_FUNCTION, - eventTrigger: { - eventType: "google.pubsub.topic.publish", - resource: "projects/p/topics/t", - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - trigger: { - eventType: "google.pubsub.topic.publish", - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: false, - }, - }); - }); - - it("should copy optional fields", () => { - const extraFields: Partial = { - availableMemoryMb: 128, - minInstances: 1, - maxInstances: 42, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - serviceAccountEmail: "inlined@google.com", - timeout: "15s", - labels: { - foo: "bar", - }, - environmentVariables: { - FOO: "bar", - }, - }; - expect( - cloudfunctions.specFromFunction({ - ...HAVE_CLOUD_FUNCTION, - ...extraFields, - httpsTrigger: {}, - } as cloudfunctions.CloudFunction) - ).to.deep.equal({ - ...FUNCTION_SPEC, - ...extraFields, - trigger: {}, - }); - }); - - it("should transform fields", () => { - expect( - cloudfunctions.specFromFunction({ - ...HAVE_CLOUD_FUNCTION, - httpsTrigger: {}, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - trigger: {}, - }); - }); - }); - describe("setInvokerCreate", () => { let sandbox: sinon.SinonSandbox; let apiRequestStub: sinon.SinonStub; diff --git a/src/test/gcp/cloudfunctionsv2.spec.ts b/src/test/gcp/cloudfunctionsv2.spec.ts index 99c37b25123..a8de4174964 100644 --- a/src/test/gcp/cloudfunctionsv2.spec.ts +++ b/src/test/gcp/cloudfunctionsv2.spec.ts @@ -10,14 +10,6 @@ describe("cloudfunctionsv2", () => { project: "project", }; - const FUNCTION_SPEC: backend.FunctionSpec = { - platform: "gcfv2", - ...FUNCTION_NAME, - trigger: {}, - entryPoint: "function", - runtime: "nodejs16", - }; - // Omit a random trigger to get this fragment to compile. const ENDPOINT: Omit = { platform: "gcfv2", @@ -62,8 +54,8 @@ describe("cloudfunctionsv2", () => { const UPLOAD_URL = "https://storage.googleapis.com/projects/-/buckets/sample/source.zip"; it("should guard against version mixing", () => { expect(() => { - cloudfunctionsv2.functionFromSpec( - { ...FUNCTION_SPEC, platform: "gcfv1" }, + cloudfunctionsv2.functionFromEndpoint( + { ...ENDPOINT, httpsTrigger: {}, platform: "gcfv1" }, CLOUD_FUNCTION_V2_SOURCE ); }).to.throw; @@ -71,19 +63,20 @@ describe("cloudfunctionsv2", () => { it("should copy a minimal function", () => { expect( - cloudfunctionsv2.functionFromSpec( + cloudfunctionsv2.functionFromEndpoint( { - ...FUNCTION_SPEC, + ...ENDPOINT, platform: "gcfv2", + httpsTrigger: {}, }, CLOUD_FUNCTION_V2_SOURCE ) ).to.deep.equal(CLOUD_FUNCTION_V2); - const eventFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, + const eventEndpoint: backend.Endpoint = { + ...ENDPOINT, platform: "gcfv2", - trigger: { + eventTrigger: { eventType: "google.cloud.audit.log.v1.written", eventFilters: { resource: "projects/p/regions/r/instances/i", @@ -112,13 +105,14 @@ describe("cloudfunctionsv2", () => { }, }; expect( - cloudfunctionsv2.functionFromSpec(eventFunction, CLOUD_FUNCTION_V2_SOURCE) + cloudfunctionsv2.functionFromEndpoint(eventEndpoint, CLOUD_FUNCTION_V2_SOURCE) ).to.deep.equal(eventGcfFunction); }); it("should copy trival fields", () => { - const fullFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, + const fullEndpoint: backend.Endpoint = { + ...ENDPOINT, + httpsTrigger: {}, platform: "gcfv2", availableMemoryMb: 128, vpcConnector: "connector", @@ -155,15 +149,15 @@ describe("cloudfunctionsv2", () => { }; expect( - cloudfunctionsv2.functionFromSpec(fullFunction, CLOUD_FUNCTION_V2_SOURCE) + cloudfunctionsv2.functionFromEndpoint(fullEndpoint, CLOUD_FUNCTION_V2_SOURCE) ).to.deep.equal(fullGcfFunction); }); it("should calculate non-trivial fields", () => { - const complexFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, + const complexEndpoint: backend.Endpoint = { + ...ENDPOINT, platform: "gcfv2", - trigger: { + eventTrigger: { eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, eventFilters: { resource: "projects/p/topics/t", @@ -193,15 +187,16 @@ describe("cloudfunctionsv2", () => { }; expect( - cloudfunctionsv2.functionFromSpec(complexFunction, CLOUD_FUNCTION_V2_SOURCE) + cloudfunctionsv2.functionFromEndpoint(complexEndpoint, CLOUD_FUNCTION_V2_SOURCE) ).to.deep.equal(complexGcfFunction); }); }); describe("endpointFromFunction", () => { it("should copy a minimal version", () => { - expect(cloudfunctionsv2.specFromFunction(HAVE_CLOUD_FUNCTION_V2)).to.deep.equal({ - ...FUNCTION_SPEC, + expect(cloudfunctionsv2.endpointFromFunction(HAVE_CLOUD_FUNCTION_V2)).to.deep.equal({ + ...ENDPOINT, + httpsTrigger: {}, platform: "gcfv2", uri: RUN_URI, }); @@ -209,7 +204,7 @@ describe("cloudfunctionsv2", () => { it("should translate event triggers", () => { expect( - cloudfunctionsv2.specFromFunction({ + cloudfunctionsv2.endpointFromFunction({ ...HAVE_CLOUD_FUNCTION_V2, eventTrigger: { eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, @@ -217,279 +212,10 @@ describe("cloudfunctionsv2", () => { }, }) ).to.deep.equal({ - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: RUN_URI, - trigger: { - eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: false, - }, - }); - - // And again w/ a normal event trigger - expect( - cloudfunctionsv2.specFromFunction({ - ...HAVE_CLOUD_FUNCTION_V2, - eventTrigger: { - eventType: "google.cloud.audit.log.v1.written", - eventFilters: [ - { - attribute: "resource", - value: "projects/p/regions/r/instances/i", - }, - { - attribute: "serviceName", - value: "compute.googleapis.com", - }, - ], - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: RUN_URI, - trigger: { - eventType: "google.cloud.audit.log.v1.written", - eventFilters: { - resource: "projects/p/regions/r/instances/i", - serviceName: "compute.googleapis.com", - }, - retry: false, - }, - }); - }); - - it("should copy optional fields", () => { - const extraFields: Partial = { - availableMemoryMb: 128, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - serviceAccountEmail: "inlined@google.com", - environmentVariables: { - FOO: "bar", - }, - }; - expect( - cloudfunctionsv2.specFromFunction({ - ...HAVE_CLOUD_FUNCTION_V2, - serviceConfig: { - ...HAVE_CLOUD_FUNCTION_V2.serviceConfig, - ...extraFields, - }, - labels: { - foo: "bar", - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, + ...ENDPOINT, platform: "gcfv2", uri: RUN_URI, - ...extraFields, - labels: { - foo: "bar", - }, - }); - }); - - it("should transform fields", () => { - const extraFields: Partial = { - minInstances: 1, - maxInstances: 42, - timeout: "15s", - }; - - const extraGcfFields: Partial = { - minInstanceCount: 1, - maxInstanceCount: 42, - timeoutSeconds: 15, - }; - - expect( - cloudfunctionsv2.specFromFunction({ - ...HAVE_CLOUD_FUNCTION_V2, - serviceConfig: { - ...HAVE_CLOUD_FUNCTION_V2.serviceConfig, - ...extraGcfFields, - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: RUN_URI, - ...extraFields, - }); - }); - }); - - describe("functionFromSpec", () => { - const UPLOAD_URL = "https://storage.googleapis.com/projects/-/buckets/sample/source.zip"; - it("should guard against version mixing", () => { - expect(() => { - cloudfunctionsv2.functionFromSpec( - { ...FUNCTION_SPEC, platform: "gcfv1" }, - CLOUD_FUNCTION_V2_SOURCE - ); - }).to.throw; - }); - - it("should copy a minimal function", () => { - expect( - cloudfunctionsv2.functionFromSpec( - { - ...FUNCTION_SPEC, - platform: "gcfv2", - }, - CLOUD_FUNCTION_V2_SOURCE - ) - ).to.deep.equal(CLOUD_FUNCTION_V2); - - const eventFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, - platform: "gcfv2", - trigger: { - eventType: "google.cloud.audit.log.v1.written", - eventFilters: { - resource: "projects/p/regions/r/instances/i", - serviceName: "compute.googleapis.com", - }, - retry: false, - }, - }; - const eventGcfFunction: Omit< - cloudfunctionsv2.CloudFunction, - cloudfunctionsv2.OutputOnlyFields - > = { - ...CLOUD_FUNCTION_V2, - eventTrigger: { - eventType: "google.cloud.audit.log.v1.written", - eventFilters: [ - { - attribute: "resource", - value: "projects/p/regions/r/instances/i", - }, - { - attribute: "serviceName", - value: "compute.googleapis.com", - }, - ], - }, - }; - expect( - cloudfunctionsv2.functionFromSpec(eventFunction, CLOUD_FUNCTION_V2_SOURCE) - ).to.deep.equal(eventGcfFunction); - }); - - it("should copy trival fields", () => { - const fullFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, - platform: "gcfv2", - availableMemoryMb: 128, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - serviceAccountEmail: "inlined@google.com", - labels: { - foo: "bar", - }, - environmentVariables: { - FOO: "bar", - }, - }; - - const fullGcfFunction: Omit< - cloudfunctionsv2.CloudFunction, - cloudfunctionsv2.OutputOnlyFields - > = { - ...CLOUD_FUNCTION_V2, - labels: { - foo: "bar", - }, - serviceConfig: { - ...CLOUD_FUNCTION_V2.serviceConfig, - environmentVariables: { - FOO: "bar", - }, - vpcConnector: "connector", - vpcConnectorEgressSettings: "ALL_TRAFFIC", - ingressSettings: "ALLOW_ALL", - availableMemoryMb: 128, - serviceAccountEmail: "inlined@google.com", - }, - }; - - expect( - cloudfunctionsv2.functionFromSpec(fullFunction, CLOUD_FUNCTION_V2_SOURCE) - ).to.deep.equal(fullGcfFunction); - }); - - it("should calculate non-trivial fields", () => { - const complexFunction: backend.FunctionSpec = { - ...FUNCTION_SPEC, - platform: "gcfv2", - trigger: { - eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, - eventFilters: { - resource: "projects/p/topics/t", - }, - retry: false, - region: "us", - }, - maxInstances: 42, - minInstances: 1, - timeout: "15s", - }; - - const complexGcfFunction: Omit< - cloudfunctionsv2.CloudFunction, - cloudfunctionsv2.OutputOnlyFields - > = { - ...CLOUD_FUNCTION_V2, eventTrigger: { - eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, - pubsubTopic: "projects/p/topics/t", - triggerRegion: "us", - }, - serviceConfig: { - ...CLOUD_FUNCTION_V2.serviceConfig, - maxInstanceCount: 42, - minInstanceCount: 1, - timeoutSeconds: 15, - }, - }; - - expect( - cloudfunctionsv2.functionFromSpec(complexFunction, CLOUD_FUNCTION_V2_SOURCE) - ).to.deep.equal(complexGcfFunction); - }); - }); - - describe("specFromFunction", () => { - it("should copy a minimal version", () => { - expect(cloudfunctionsv2.specFromFunction(HAVE_CLOUD_FUNCTION_V2)).to.deep.equal({ - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: RUN_URI, - }); - }); - - it("should translate event triggers", () => { - expect( - cloudfunctionsv2.specFromFunction({ - ...HAVE_CLOUD_FUNCTION_V2, - eventTrigger: { - eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, - pubsubTopic: "projects/p/topics/t", - }, - }) - ).to.deep.equal({ - ...FUNCTION_SPEC, - platform: "gcfv2", - uri: RUN_URI, - trigger: { eventType: cloudfunctionsv2.PUBSUB_PUBLISH_EVENT, eventFilters: { resource: "projects/p/topics/t", @@ -500,7 +226,7 @@ describe("cloudfunctionsv2", () => { // And again w/ a normal event trigger expect( - cloudfunctionsv2.specFromFunction({ + cloudfunctionsv2.endpointFromFunction({ ...HAVE_CLOUD_FUNCTION_V2, eventTrigger: { eventType: "google.cloud.audit.log.v1.written", @@ -514,27 +240,25 @@ describe("cloudfunctionsv2", () => { value: "compute.googleapis.com", }, ], - triggerRegion: "us", }, }) ).to.deep.equal({ - ...FUNCTION_SPEC, + ...ENDPOINT, platform: "gcfv2", uri: RUN_URI, - trigger: { + eventTrigger: { eventType: "google.cloud.audit.log.v1.written", eventFilters: { resource: "projects/p/regions/r/instances/i", serviceName: "compute.googleapis.com", }, retry: false, - region: "us", }, }); }); it("should copy optional fields", () => { - const extraFields: Partial = { + const extraFields: backend.ServiceConfiguration = { availableMemoryMb: 128, vpcConnector: "connector", vpcConnectorEgressSettings: "ALL_TRAFFIC", @@ -545,7 +269,7 @@ describe("cloudfunctionsv2", () => { }, }; expect( - cloudfunctionsv2.specFromFunction({ + cloudfunctionsv2.endpointFromFunction({ ...HAVE_CLOUD_FUNCTION_V2, serviceConfig: { ...HAVE_CLOUD_FUNCTION_V2.serviceConfig, @@ -556,8 +280,9 @@ describe("cloudfunctionsv2", () => { }, }) ).to.deep.equal({ - ...FUNCTION_SPEC, + ...ENDPOINT, platform: "gcfv2", + httpsTrigger: {}, uri: RUN_URI, ...extraFields, labels: { @@ -567,7 +292,7 @@ describe("cloudfunctionsv2", () => { }); it("should transform fields", () => { - const extraFields: Partial = { + const extraFields: backend.ServiceConfiguration = { minInstances: 1, maxInstances: 42, timeout: "15s", @@ -580,7 +305,7 @@ describe("cloudfunctionsv2", () => { }; expect( - cloudfunctionsv2.specFromFunction({ + cloudfunctionsv2.endpointFromFunction({ ...HAVE_CLOUD_FUNCTION_V2, serviceConfig: { ...HAVE_CLOUD_FUNCTION_V2.serviceConfig, @@ -588,9 +313,10 @@ describe("cloudfunctionsv2", () => { }, }) ).to.deep.equal({ - ...FUNCTION_SPEC, + ...ENDPOINT, platform: "gcfv2", uri: RUN_URI, + httpsTrigger: {}, ...extraFields, }); }); diff --git a/src/test/gcp/cloudscheduler.spec.ts b/src/test/gcp/cloudscheduler.spec.ts index 4821a6f500d..a4626445b2a 100644 --- a/src/test/gcp/cloudscheduler.spec.ts +++ b/src/test/gcp/cloudscheduler.spec.ts @@ -2,9 +2,10 @@ import { expect } from "chai"; import * as _ from "lodash"; import * as nock from "nock"; -import * as cloudscheduler from "../../gcp/cloudscheduler"; import { FirebaseError } from "../../error"; import * as api from "../../api"; +import * as backend from "../../deploy/functions/backend"; +import * as cloudscheduler from "../../gcp/cloudscheduler"; const VERSION = "v1beta1"; @@ -119,20 +120,20 @@ describe("cloudscheduler", () => { }); }); - describe("toJob", () => { - const SCHEDULE = { - id: "firebase-schedule-id-region", + describe("jobFromEndpoint", () => { + const ENDPOINT: backend.Endpoint = { + platform: "gcfv1", + id: "id", + region: "region", project: "project", - schedule: "every 1 minutes", - transport: "pubsub" as any, - targetService: { - id: "id", - region: "region", - project: "project", + entryPoint: "id", + runtime: "nodejs16", + scheduleTrigger: { + schedule: "every 1 minutes", }, }; it("should copy minimal fields", () => { - expect(cloudscheduler.jobFromSpec(SCHEDULE, "appEngineLocation")).to.deep.equal({ + expect(cloudscheduler.jobFromEndpoint(ENDPOINT, "appEngineLocation")).to.deep.equal({ name: "projects/project/locations/appEngineLocation/jobs/firebase-schedule-id-region", schedule: "every 1 minutes", pubsubTarget: { @@ -146,15 +147,18 @@ describe("cloudscheduler", () => { it("should copy optional fields", () => { expect( - cloudscheduler.jobFromSpec( + cloudscheduler.jobFromEndpoint( { - ...SCHEDULE, - timeZone: "America/Los_Angeles", - retryConfig: { - maxDoublings: 2, - maxBackoffDuration: "20s", - minBackoffDuration: "1s", - maxRetryDuration: "60s", + ...ENDPOINT, + scheduleTrigger: { + schedule: "every 1 minutes", + timeZone: "America/Los_Angeles", + retryConfig: { + maxDoublings: 2, + maxBackoffDuration: "20s", + minBackoffDuration: "1s", + maxRetryDuration: "60s", + }, }, }, "appEngineLocation"