diff --git a/ava.config.mjs b/ava.config.mjs index c7da146f4..c4b841c21 100644 --- a/ava.config.mjs +++ b/ava.config.mjs @@ -11,9 +11,13 @@ const rewritePaths = Object.fromEntries( export default { files: ["packages/*/test/**/*.spec.ts"], nodeArguments: ["--no-warnings", "--experimental-vm-modules"], + require: ["./packages/miniflare/test/setup.mjs"], workerThreads: inspector.url() === undefined, typescript: { compile: false, rewritePaths, }, + environmentVariables: { + MINIFLARE_ASSERT_BODIES_CONSUMED: "true", + }, }; diff --git a/package-lock.json b/package-lock.json index 5555f00e9..63033f590 100644 --- a/package-lock.json +++ b/package-lock.json @@ -4610,11 +4610,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/set-cookie-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", - "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==" - }, "node_modules/shebang-command": { "version": "2.0.0", "dev": true, @@ -5089,13 +5084,14 @@ } }, "node_modules/undici": { - "version": "5.13.0", - "license": "MIT", + "version": "5.23.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.23.0.tgz", + "integrity": "sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==", "dependencies": { "busboy": "^1.6.0" }, "engines": { - "node": ">=12.18" + "node": ">=14.0" } }, "node_modules/universalify": { @@ -5460,10 +5456,9 @@ "glob-to-regexp": "^0.4.1", "http-cache-semantics": "^4.1.0", "kleur": "^4.1.5", - "set-cookie-parser": "^2.6.0", "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.13.0", + "undici": "^5.22.1", "workerd": "1.20230814.1", "ws": "^8.11.0", "youch": "^3.2.2", @@ -7719,10 +7714,9 @@ "http-cache-semantics": "^4.1.0", "kleur": "^4.1.5", "semiver": "^1.1.0", - "set-cookie-parser": "^2.6.0", "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.13.0", + "undici": "^5.22.1", "workerd": "1.20230814.1", "ws": "^8.11.0", "youch": "^3.2.2", @@ -8275,11 +8269,6 @@ } } }, - "set-cookie-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", - "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==" - }, "shebang-command": { "version": "2.0.0", "dev": true, @@ -8563,7 +8552,9 @@ } }, "undici": { - "version": "5.13.0", + "version": "5.23.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.23.0.tgz", + "integrity": "sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==", "requires": { "busboy": "^1.6.0" } diff --git a/package.json b/package.json index 69e2f62d3..4ad95323c 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,7 @@ "lint:fix": "npm run lint -- --fix", "prepublishOnly": "npm run lint && npm run clean && npm run build && npm run types:bundle && npm run test", "release": "./scripts/release.sh", - "test": "npm run build && ava && rimraf ./.tmp", + "test": "npm run build && ava --serial && rimraf ./.tmp", "types:build": "tsc && tsc -p packages/miniflare/src/workers/tsconfig.json", "types:bundle": "npm run types:build && node scripts/types.mjs" }, diff --git a/packages/miniflare/README.md b/packages/miniflare/README.md index 2d1147f97..3ddebeb6c 100644 --- a/packages/miniflare/README.md +++ b/packages/miniflare/README.md @@ -577,7 +577,9 @@ defined at the top-level. Updates the configuration for this Miniflare instance and restarts the `workerd` server. Note unlike Miniflare 2, this does _not_ merge the new - configuration with the old configuration. + configuration with the old configuration. Note that calling this function will + invalidate any existing values returned by the `Miniflare#get*()` methods, + preventing them from being used. - `ready: Promise` @@ -592,7 +594,61 @@ defined at the top-level. no need to do that yourself first. Additionally, the host of the request's URL is always ignored and replaced with the `workerd` server's. +- `getBindings = Record>(workerName?: string): Promise` + + Returns a `Promise` that resolves with a record mapping binding names to + bindings, for all bindings in the Worker with the specified `workerName`. If + `workerName` is not specified, defaults to the entrypoint Worker. + +- `getCaches(): Promise` + + Returns a `Promise` that resolves with the + [`CacheStorage`](https://developers.cloudflare.com/workers/runtime-apis/cache/) + instance of the entrypoint Worker. This means if `cache: false` is set on the + entrypoint, calling methods on the resolved value won't do anything. + +- `getD1Database(bindingName: string, workerName?: string): Promise` + + Returns a `Promise` that resolves with the + [`D1Database`](https://developers.cloudflare.com/d1/platform/client-api/) + instance corresponding to the specified `bindingName` of `workerName`. Note + `bindingName` must not begin with `__D1_BETA__`. If `workerName` is not + specified, defaults to the entrypoint Worker. + +- `getDurableObjectNamespace(bindingName: string, workerName?: string): Promise` + + Returns a `Promise` that resolves with the + [`DurableObjectNamespace`](https://developers.cloudflare.com/workers/runtime-apis/durable-objects/#access-a-durable-object-from-a-worker) + instance corresponding to the specified `bindingName` of `workerName`. If + `workerName` is not specified, defaults to the entrypoint Worker. + +- `getKVNamespace(bindingName: string, workerName?: string): Promise` + + Returns a `Promise` that resolves with the + [`KVNamespace`](https://developers.cloudflare.com/workers/runtime-apis/kv/) + instance corresponding to the specified `bindingName` of `workerName`. If + `workerName` is not specified, defaults to the entrypoint Worker. + +- `getQueueProducer(bindingName: string, workerName?: string): Promise>` + + Returns a `Promise` that resolves with the + [`Queue`](https://developers.cloudflare.com/queues/platform/javascript-apis/) + producer instance corresponding to the specified `bindingName` of + `workerName`. If `workerName` is not specified, defaults to the entrypoint + Worker. + +- `getR2Bucket(bindingName: string, workerName?: string): Promise` + + Returns a `Promise` that resolves with the + [`R2Bucket`](https://developers.cloudflare.com/r2/api/workers/workers-api-reference/) + producer instance corresponding to the specified `bindingName` of + `workerName`. If `workerName` is not specified, defaults to the entrypoint + Worker. + - `dispose(): Promise` Cleans up the Miniflare instance, and shuts down the `workerd` server. Note - that after this is called, `setOptions` and `dispatchFetch` cannot be called. + that after this is called, `Miniflare#setOptions()` and + `Miniflare#dispatchFetch()` cannot be called. Additionally, calling this + function will invalidate any values returned by the `Miniflare#get*()` + methods, preventing them from being used. diff --git a/packages/miniflare/package.json b/packages/miniflare/package.json index 4456025cf..9f395ea3c 100644 --- a/packages/miniflare/package.json +++ b/packages/miniflare/package.json @@ -35,10 +35,9 @@ "glob-to-regexp": "^0.4.1", "http-cache-semantics": "^4.1.0", "kleur": "^4.1.5", - "set-cookie-parser": "^2.6.0", "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.13.0", + "undici": "^5.22.1", "workerd": "1.20230814.1", "ws": "^8.11.0", "youch": "^3.2.2", diff --git a/packages/miniflare/src/http/request.ts b/packages/miniflare/src/http/request.ts index 972849a34..759371742 100644 --- a/packages/miniflare/src/http/request.ts +++ b/packages/miniflare/src/http/request.ts @@ -28,9 +28,11 @@ export class Request< // error. [kCf]?: CfType; - constructor(input: RequestInfo, init?: RequestInit) { + constructor(input: RequestInfo | Request, init?: RequestInit) { super(input, init); this[kCf] = init?.cf; + // Prefer `cf` from `init`, but if it's set on `input`, use that + if (input instanceof Request) this[kCf] ??= input.cf; } get cf() { diff --git a/packages/miniflare/src/index.ts b/packages/miniflare/src/index.ts index fd33c3289..77bd7cb7d 100644 --- a/packages/miniflare/src/index.ts +++ b/packages/miniflare/src/index.ts @@ -8,9 +8,16 @@ import path from "path"; import { Duplex, Transform, Writable } from "stream"; import { ReadableStream } from "stream/web"; import zlib from "zlib"; -import type { RequestInitCfProperties } from "@cloudflare/workers-types/experimental"; +import type { + CacheStorage, + D1Database, + DurableObjectNamespace, + KVNamespace, + Queue, + R2Bucket, + RequestInitCfProperties, +} from "@cloudflare/workers-types/experimental"; import exitHook from "exit-hook"; -import { splitCookiesString } from "set-cookie-parser"; import stoppable from "stoppable"; import { WebSocketServer } from "ws"; import { z } from "zod"; @@ -26,21 +33,29 @@ import { fetch, } from "./http"; import { + D1_PLUGIN_NAME, + DURABLE_OBJECTS_PLUGIN_NAME, DispatchFetch, DurableObjectClassNames, GatewayConstructor, GatewayFactory, HEADER_CF_BLOB, + KV_PLUGIN_NAME, PLUGIN_ENTRIES, Persistence, PluginServicesOptions, Plugins, + ProxyClient, + QUEUES_PLUGIN_NAME, QueueConsumers, QueuesError, + R2_PLUGIN_NAME, + ReplaceWorkersTypes, SharedOptions, SourceMapRegistry, WorkerOptions, getGlobalServices, + kProxyNodeBinding, maybeGetSitesManifestModule, normaliseDurableObject, } from "./plugins"; @@ -56,6 +71,7 @@ import { } from "./plugins/core"; import { Config, + Extension, Runtime, RuntimeOptions, Service, @@ -78,7 +94,7 @@ import { maybeApply, } from "./shared"; import { Storage } from "./storage"; -import { CoreHeaders } from "./workers"; +import { CoreBindings, CoreHeaders } from "./workers"; // ===== `Miniflare` User Options ===== export type MiniflareOptions = SharedOptions & @@ -251,6 +267,47 @@ function getWorkerRoutes( return allRoutes; } +// Get the name of a binding in the `ProxyServer`'s `env` +function getProxyBindingName(plugin: string, worker: string, binding: string) { + return [ + CoreBindings.DURABLE_OBJECT_NAMESPACE_PROXY, + plugin, + worker, + binding, + ].join(":"); +} +// Get whether a binding will need a proxy to be supported in Node (i.e. is +// the implementation of this binding in `workerd`?). If this returns `false`, +// there's no need to bind the binding to the `ProxyServer`. +function isNativeTargetBinding(binding: Worker_Binding) { + return !( + "json" in binding || + "wasmModule" in binding || + "text" in binding || + "data" in binding + ); +} +// Converts a regular worker binding to binding suitable for the `ProxyServer`. +function buildProxyBinding( + plugin: string, + worker: string, + binding: Worker_Binding +): Worker_Binding { + assert(binding.name !== undefined); + const name = getProxyBindingName(plugin, worker, binding.name); + const proxyBinding = { ...binding, name }; + // If this is a Durable Object namespace binding to the current worker, + // make sure it continues to point to that worker when bound elsewhere + if ( + "durableObjectNamespace" in proxyBinding && + proxyBinding.durableObjectNamespace !== undefined + ) { + proxyBinding.durableObjectNamespace.serviceName ??= + getUserServiceName(worker); + } + return proxyBinding; +} + // ===== `Miniflare` Internal Storage & Routing ===== type OptionalGatewayFactoryType< Gateway extends GatewayConstructor | undefined @@ -315,7 +372,7 @@ async function writeResponse(response: Response, res: http.ServerResponse) { const key = entry[0].toLowerCase(); const value = entry[1]; if (key === "set-cookie") { - headers[key] = splitCookiesString(value); + headers[key] = response.headers.getSetCookie(); } else { headers[key] = value; } @@ -391,6 +448,15 @@ function safeReadableStreamFrom(iterable: AsyncIterable) { ); } +// Maps `Miniflare` instances to stack traces for thier construction. Used to identify un-`dispose()`d instances. +let maybeInstanceRegistry: + | Map + | undefined; +/** @internal */ +export function _initialiseInstanceRegistry() { + return (maybeInstanceRegistry = new Map()); +} + export class Miniflare { readonly #gatewayFactories: PluginGatewayFactories; readonly #routers: PluginRouters; @@ -404,6 +470,7 @@ export class Miniflare { #runtime?: Runtime; #removeRuntimeExitHook?: () => void; #runtimeEntryURL?: URL; + #proxyClient?: ProxyClient; #sourceMapRegistry?: SourceMapRegistry; // Path to temporary directory for use as scratch space/"in-memory" Durable @@ -438,6 +505,15 @@ export class Miniflare { const [sharedOpts, workerOpts] = validateOptions(opts); this.#sharedOpts = sharedOpts; this.#workerOpts = workerOpts; + + // Add to registry after initial options validation, before any servers/ + // child processes are started + if (maybeInstanceRegistry !== undefined) { + const object = { name: "Miniflare", stack: "" }; + Error.captureStackTrace(object, Miniflare); + maybeInstanceRegistry.set(this, object.stack); + } + this.#log = this.#sharedOpts.core.log ?? new NoOpLog(); this.#timers = this.#sharedOpts.core.timers ?? defaultTimers; this.#host = this.#sharedOpts.core.host ?? "127.0.0.1"; @@ -782,23 +858,14 @@ export class Miniflare { // Use Map to dedupe services by name const services = new Map(); - const globalServices = getGlobalServices({ - sharedOptions: sharedOpts.core, - allWorkerRoutes, - fallbackWorkerName: this.#workerOpts[0].core.name, - loopbackPort, - log: this.#log, - }); - for (const service of globalServices) { - // Global services should all have unique names - assert(service.name !== undefined && !services.has(service.name)); - services.set(service.name, service); - } const sockets: Socket[] = [await configureEntrySocket(sharedOpts.core)]; + // Bindings for `ProxyServer` Durable Object + const proxyBindings: Worker_Binding[] = []; for (let i = 0; i < allWorkerOpts.length; i++) { const workerOpts = allWorkerOpts[i]; + const workerName = workerOpts.core.name ?? ""; // Collect all bindings from this worker const workerBindings: Worker_Binding[] = []; @@ -808,7 +875,14 @@ export class Miniflare { // missing in other plugins' options. const pluginBindings = await plugin.getBindings(workerOpts[key], i); if (pluginBindings !== undefined) { - workerBindings.push(...pluginBindings); + for (const binding of pluginBindings) { + workerBindings.push(binding); + // Only `workerd` native bindings need to be proxied, the rest are + // already supported by Node.js (e.g. json, text/data blob, wasm) + if (isNativeTargetBinding(binding)) { + proxyBindings.push(buildProxyBinding(key, workerName, binding)); + } + } if (key === "kv") { // Add "__STATIC_CONTENT_MANIFEST" module if sites enabled @@ -851,11 +925,57 @@ export class Miniflare { } } + // For testing proxy client serialisation, add an API that just returns its + // arguments. Note without the `.pipeThrough(new TransformStream())` below, + // we'll see `TypeError: Inter-TransformStream ReadableStream.pipeTo() is + // not implemented.`. `IdentityTransformStream` doesn't work here. + // TODO(soon): add support for wrapped bindings and remove this. The API + // will probably look something like `{ wrappedBindings: { A: "a" } }` + // where `"a"` is the name of a "worker" in `workers`. + const extensions: Extension[] = [ + { + modules: [ + { + name: "miniflare-internal:identity", + internal: true, // Not accessible to user code + esModule: ` + class Identity { + async asyncIdentity(...args) { + const i = args.findIndex((arg) => arg instanceof ReadableStream); + if (i !== -1) args[i] = args[i].pipeThrough(new TransformStream()); + return args; + } + } + export default function() { return new Identity(); } + `, + }, + ], + }, + ]; + proxyBindings.push({ + name: "IDENTITY", + wrapped: { moduleName: "miniflare-internal:identity" }, + }); + + const globalServices = getGlobalServices({ + sharedOptions: sharedOpts.core, + allWorkerRoutes, + fallbackWorkerName: this.#workerOpts[0].core.name, + loopbackPort, + log: this.#log, + proxyBindings, + }); + for (const service of globalServices) { + // Global services should all have unique names + assert(service.name !== undefined && !services.has(service.name)); + services.set(service.name, service); + } + // Once we've assembled the config, and are about to restart the runtime, // update the source map registry. this.#sourceMapRegistry = sourceMapRegistry; - return { services: Array.from(services.values()), sockets }; + return { services: Array.from(services.values()), sockets, extensions }; } async #assembleAndUpdateConfig() { @@ -883,6 +1003,17 @@ export class Miniflare { this.#runtimeEntryURL = new URL( `${secure ? "https" : "http"}://${this.#accessibleHost}:${maybePort}` ); + if (this.#proxyClient === undefined) { + this.#proxyClient = new ProxyClient( + this.#runtimeEntryURL, + this.dispatchFetch + ); + } else { + // The `ProxyServer` "heap" will have been destroyed when `workerd` was + // restarted, invalidating all existing native target references. Mark + // all proxies as invalid, noting the new runtime URL to send requests to. + this.#proxyClient.poisonProxies(this.#runtimeEntryURL); + } if (!this.#runtimeMutex.hasWaiting) { // Only log and trigger reload if there aren't pending updates @@ -978,6 +1109,25 @@ export class Miniflare { throw reviveError(this.#workerSrcOpts, caught); } + if ( + process.env.MINIFLARE_ASSERT_BODIES_CONSUMED !== undefined && + response.body !== null + ) { + // Throw an uncaught exception if the body from this response isn't + // consumed "immediately". `undici` may hang or throw socket errors if we + // don't remember to do this: + // https://github.com/nodejs/undici/issues/583#issuecomment-1577468249 + const originalLimit = Error.stackTraceLimit; + Error.stackTraceLimit = Infinity; + const error = new Error( + "`body` returned from `Miniflare#dispatchFetch()` not consumed immediately" + ); + Error.stackTraceLimit = originalLimit; + setImmediate(() => { + if (!response.bodyUsed) throw error; + }); + } + return response; }; @@ -992,6 +1142,114 @@ export class Miniflare { return factory.getStorage(namespace, persist); } + /** @internal */ + async _getProxyClient(): Promise { + this.#checkDisposed(); + await this.ready; + assert(this.#proxyClient !== undefined); + return this.#proxyClient; + } + + async getBindings>( + workerName?: string + ): Promise { + const bindings: Record = {}; + const proxyClient = await this._getProxyClient(); + + // Find worker by name, defaulting to entrypoint worker if none specified + let workerOpts: PluginWorkerOptions | undefined; + if (workerName === undefined) { + workerOpts = this.#workerOpts[0]; + } else { + workerOpts = this.#workerOpts.find( + ({ core }) => (core.name ?? "") === workerName + ); + if (workerOpts === undefined) { + throw new TypeError(`${JSON.stringify(workerName)} worker not found`); + } + } + workerName = workerOpts.core.name ?? ""; + + // Populate bindings from each plugin + for (const [key, plugin] of PLUGIN_ENTRIES) { + // @ts-expect-error `CoreOptionsSchema` has required options which are + // missing in other plugins' options. + const pluginBindings = await plugin.getNodeBindings(workerOpts[key]); + for (const [name, binding] of Object.entries(pluginBindings)) { + if (binding === kProxyNodeBinding) { + const proxyBindingName = getProxyBindingName(key, workerName, name); + const proxy = proxyClient.env[proxyBindingName]; + assert( + proxy !== undefined, + `Expected ${proxyBindingName} to be bound` + ); + bindings[name] = proxy; + } else { + bindings[name] = binding; + } + } + } + + return bindings as Env; + } + + async #getProxy( + pluginName: string, + bindingName: string, + workerName?: string + ): Promise { + const proxyClient = await this._getProxyClient(); + const proxyBindingName = getProxyBindingName( + pluginName, + // Default to entrypoint worker if none specified + workerName ?? this.#workerOpts[0].core.name ?? "", + bindingName + ); + const proxy = proxyClient.env[proxyBindingName]; + if (proxy === undefined) { + // If the user specified an invalid binding/worker name, throw + const friendlyWorkerName = + workerName === undefined ? "entrypoint" : JSON.stringify(workerName); + throw new TypeError( + `${JSON.stringify(bindingName)} unbound in ${friendlyWorkerName} worker` + ); + } + return proxy as T; + } + // TODO(someday): would be nice to define these in plugins + async getCaches(): Promise> { + const proxyClient = await this._getProxyClient(); + return proxyClient.global + .caches as unknown as ReplaceWorkersTypes; + } + getD1Database(bindingName: string, workerName?: string): Promise { + return this.#getProxy(D1_PLUGIN_NAME, bindingName, workerName); + } + getDurableObjectNamespace( + bindingName: string, + workerName?: string + ): Promise> { + return this.#getProxy(DURABLE_OBJECTS_PLUGIN_NAME, bindingName, workerName); + } + getKVNamespace( + bindingName: string, + workerName?: string + ): Promise> { + return this.#getProxy(KV_PLUGIN_NAME, bindingName, workerName); + } + getQueueProducer( + bindingName: string, + workerName?: string + ): Promise> { + return this.#getProxy(QUEUES_PLUGIN_NAME, bindingName, workerName); + } + getR2Bucket( + bindingName: string, + workerName?: string + ): Promise> { + return this.#getProxy(R2_PLUGIN_NAME, bindingName, workerName); + } + async dispose(): Promise { this.#disposeController.abort(); try { @@ -1002,10 +1260,15 @@ export class Miniflare { this.#removeRuntimeExitHook?.(); // Cleanup as much as possible even if `#init()` threw + await this.#proxyClient?.dispose(); await this.#runtime?.dispose(); await this.#stopLoopbackServer(); // `rm -rf ${#tmpPath}`, this won't throw if `#tmpPath` doesn't exist await fs.promises.rm(this.#tmpPath, { force: true, recursive: true }); + + // Remove from instance registry as last step in `finally`, to make sure + // all dispose steps complete + maybeInstanceRegistry?.delete(this); } } } diff --git a/packages/miniflare/src/plugins/cache/index.ts b/packages/miniflare/src/plugins/cache/index.ts index 6383515aa..0b543a81e 100644 --- a/packages/miniflare/src/plugins/cache/index.ts +++ b/packages/miniflare/src/plugins/cache/index.ts @@ -67,6 +67,9 @@ export const CACHE_PLUGIN: Plugin< getBindings() { return []; }, + getNodeBindings() { + return {}; + }, getServices({ sharedOptions, options, workerIndex }) { const persistBinding = encodePersist(sharedOptions.cachePersist); return [ diff --git a/packages/miniflare/src/plugins/core/index.ts b/packages/miniflare/src/plugins/core/index.ts index 5ccb8fde8..68959024a 100644 --- a/packages/miniflare/src/plugins/core/index.ts +++ b/packages/miniflare/src/plugins/core/index.ts @@ -22,6 +22,7 @@ import { Log, MiniflareCoreError, Timers, + viewToBuffer, } from "../../shared"; import { CoreBindings, CoreHeaders } from "../../workers"; import { getCacheServiceName } from "../cache"; @@ -32,6 +33,7 @@ import { SERVICE_LOOPBACK, SourceMapRegistry, WORKER_BINDING_SERVICE_LOOPBACK, + kProxyNodeBinding, parseRoutes, } from "../shared"; import { @@ -314,6 +316,51 @@ export const CORE_PLUGIN: Plugin< return Promise.all(bindings); }, + async getNodeBindings(options) { + const bindingEntries: Awaitable[] = []; + + if (options.bindings !== undefined) { + bindingEntries.push( + ...Object.entries(options.bindings).map(([name, value]) => [ + name, + JSON.parse(JSON.stringify(value)), + ]) + ); + } + if (options.wasmBindings !== undefined) { + bindingEntries.push( + ...Object.entries(options.wasmBindings).map(([name, path]) => + fs + .readFile(path) + .then((buffer) => [name, new WebAssembly.Module(buffer)]) + ) + ); + } + if (options.textBlobBindings !== undefined) { + bindingEntries.push( + ...Object.entries(options.textBlobBindings).map(([name, path]) => + fs.readFile(path, "utf8").then((text) => [name, text]) + ) + ); + } + if (options.dataBlobBindings !== undefined) { + bindingEntries.push( + ...Object.entries(options.dataBlobBindings).map(([name, path]) => + fs.readFile(path).then((buffer) => [name, viewToBuffer(buffer)]) + ) + ); + } + if (options.serviceBindings !== undefined) { + bindingEntries.push( + ...Object.keys(options.serviceBindings).map((name) => [ + name, + kProxyNodeBinding, + ]) + ); + } + + return Object.fromEntries(await Promise.all(bindingEntries)); + }, async getServices({ log, options, @@ -415,6 +462,7 @@ export interface GlobalServicesOptions { fallbackWorkerName: string | undefined; loopbackPort: number; log: Log; + proxyBindings: Worker_Binding[]; } export function getGlobalServices({ sharedOptions, @@ -422,6 +470,7 @@ export function getGlobalServices({ fallbackWorkerName, loopbackPort, log, + proxyBindings, }: GlobalServicesOptions): Service[] { // Collect list of workers we could route to, then parse and sort all routes const workerNames = [...allWorkerRoutes.keys()]; @@ -441,6 +490,15 @@ export function getGlobalServices({ name: CoreBindings.SERVICE_USER_ROUTE_PREFIX + name, service: { name: getUserServiceName(name) }, })), + { + name: CoreBindings.DURABLE_OBJECT_NAMESPACE_PROXY, + durableObjectNamespace: { className: "ProxyServer" }, + }, + // Add `proxyBindings` here, they'll be added to the `ProxyServer` `env`. + // It would be nice if we didn't add all these bindings to the entry worker, + // but the entry worker shares lots of `devalue` code with the proxy, and + // we'd rather not duplicate that. + ...proxyBindings, ]; if (sharedOptions.upstream !== undefined) { serviceEntryBindings.push({ @@ -467,6 +525,18 @@ export function getGlobalServices({ compatibilityDate: "2023-04-04", compatibilityFlags: ["nodejs_compat", "service_binding_extra_handlers"], bindings: serviceEntryBindings, + durableObjectNamespaces: [ + { + className: "ProxyServer", + uniqueKey: `${SERVICE_ENTRY}-ProxyServer`, + }, + ], + // `ProxyServer` doesn't make use of Durable Object storage + durableObjectStorage: { inMemory: kVoid }, + // Always use the entrypoints cache implementation for proxying. This + // means if the entrypoint disables caching, proxied cache operations + // will be no-ops. Note we always require at least one worker to be set. + cacheApiOutbound: { name: "cache:0" }, }, }, { @@ -535,6 +605,7 @@ function getWorkerScript( } export * from "./errors"; +export * from "./proxy"; export * from "./constants"; export * from "./modules"; export * from "./services"; diff --git a/packages/miniflare/src/plugins/core/proxy/README.md b/packages/miniflare/src/plugins/core/proxy/README.md new file mode 100644 index 000000000..b50f8b02d --- /dev/null +++ b/packages/miniflare/src/plugins/core/proxy/README.md @@ -0,0 +1,49 @@ +# Miniflare API Proxy + +Miniflare provides methods for accessing bindings from Node.js, outside a +`workerd` process. These are implemented by an API proxy that proxies accesses +and calls to properties and methods over HTTP calls. The directory implements +the client for this. The server is implemented as a Durable Object in +[`src/workers/core/proxy.worker.ts`](../../../workers/core/proxy.worker.ts). + +Using a Durable Object allows us to share references containing I/O objects like +streams across requests. The Durable Object contains a "heap", mapping addresses +to references. The "heap" is initialised with `globalThis` and `env`. + +The proxy client builds proxies for each object on the heap. Accessing a +property on a proxy will result in a synchronous `GET` operation to the proxy +server. If the property is not a method, the value will be serialised back to +the client. Note this may involve putting more references on the "heap". + +If the property is a method, the client will return a function from the access. +Future accesses to the same property key will return the same function without +the synchronous `GET` operation. Calling this function will serialise all the +arguments, then perform a synchronous `CALL` operation on the target, and the +return value will be serialised back to the client. If this function returns a +`Promise`, it will be stored on the "heap", and a reference returned. An +asynchronous `GET` operation will then be performed to resolve the `Promise`, +and serialise the result. If a function returns a `Promise` once, all future +calls will send asynchronous `CALL` operations instead, that resolve the +`Promise` without an additional round trip. + +If the function call had `ReadableStream` arguments, the first will be sent +unbuffered after the rest of the arguments. All function calls with +`ReadableStream` or `Blob` arguments are assumed to be asynchronous. This +assumption is required as synchronous operations block the main thread and +prevent chunks from `ReadableStream`s being read. + +If the function call threw, or returned a `Promise` that rejected, the error +will be serialised and re-thrown/rejected in the client. Note that the stack +trace will be updated to reflect the calling location in the client, not the +server. + +To prevent unbounded growth of the "heap", all proxies are registered with a +`FinalizationRegistry`. When the proxy is garbage collected, a `FREE` operation +will remove the corresponding "heap" entry allowing it to be garbage collected +on the server too. + +When `workerd` is restarted with `Miniflare#setOptions()` or stopped with +`Miniflare#dispose()`, all proxies are _poisoned_. Once a proxy is poisoned, it +cannot be used, and must be recreated. Poisoned proxies are unregistered from +the `FinalizationRegistry` too, as the addresses they point too will be invalid +and shouldn't be freed again. diff --git a/packages/miniflare/src/plugins/core/proxy/client.ts b/packages/miniflare/src/plugins/core/proxy/client.ts new file mode 100644 index 000000000..3d1a5d390 --- /dev/null +++ b/packages/miniflare/src/plugins/core/proxy/client.ts @@ -0,0 +1,503 @@ +/* eslint-disable @typescript-eslint/ban-types */ +import assert from "assert"; +import { ReadableStream, TransformStream } from "stream/web"; +import util from "util"; +import type { ServiceWorkerGlobalScope } from "@cloudflare/workers-types/experimental"; +import { stringify } from "devalue"; +import { Headers } from "undici"; +import { Request, Response } from "../../../http"; +import { Awaitable, prefixStream, readPrefix } from "../../../shared"; +import { + CoreHeaders, + ProxyAddresses, + ProxyOps, + ReducersRevivers, + StringifiedWithStream, + createHTTPReducers, + createHTTPRevivers, + isFetcherFetch, + isR2ObjectWriteHttpMetadata, + parseWithReadableStreams, + stringifyWithStreams, + structuredSerializableReducers, + structuredSerializableRevivers, +} from "../../../workers"; +import { DispatchFetch } from "../../shared"; +import { DECODER, SynchronousFetcher, SynchronousResponse } from "./fetch-sync"; +import { NODE_PLATFORM_IMPL } from "./types"; + +const kAddress = Symbol("kAddress"); +const kName = Symbol("kName"); +interface NativeTarget { + // `kAddress` is used as a brand for `NativeTarget`. Pointer to the "heap" + // map in the `ProxyServer` Durable Object. + [kAddress]: number; + // Use `Symbol` for name too, so we can use it as a unique property key in + // `ProxyClientHandler`. Usually the `.constructor.name` of the object. + [kName]: string; +} +function isNativeTarget(value: unknown): value is NativeTarget { + return typeof value === "object" && value !== null && kAddress in value; +} + +// Special targets for objects automatically added to the `ProxyServer` "heap" +const TARGET_GLOBAL: NativeTarget = { + [kAddress]: ProxyAddresses.GLOBAL, + [kName]: "global", +}; +const TARGET_ENV: NativeTarget = { + [kAddress]: ProxyAddresses.ENV, + [kName]: "env", +}; + +const reducers: ReducersRevivers = { + ...structuredSerializableReducers, + ...createHTTPReducers(NODE_PLATFORM_IMPL), + Native(value) { + if (isNativeTarget(value)) return [value[kAddress], value[kName]]; + }, +}; +const revivers: ReducersRevivers = { + ...structuredSerializableRevivers, + ...createHTTPRevivers(NODE_PLATFORM_IMPL), + // `Native` reviver depends on `ProxyStubHandler` methods +}; + +// Exported public API of the proxy system +export class ProxyClient { + #bridge: ProxyClientBridge; + + constructor(runtimeEntryURL: URL, dispatchFetch: DispatchFetch) { + this.#bridge = new ProxyClientBridge(runtimeEntryURL, dispatchFetch); + } + + // Lazily initialise proxies as required + #globalProxy?: ServiceWorkerGlobalScope; + #envProxy?: Record; + get global(): ServiceWorkerGlobalScope { + return (this.#globalProxy ??= this.#bridge.getProxy(TARGET_GLOBAL)); + } + get env(): Record { + return (this.#envProxy ??= this.#bridge.getProxy(TARGET_ENV)); + } + + poisonProxies(runtimeEntryURL?: URL): void { + this.#bridge.poisonProxies(runtimeEntryURL); + // Reset `#{global,env}Proxy` so they aren't poisoned on next access + this.#globalProxy = undefined; + this.#envProxy = undefined; + } + + dispose(): Promise { + // Intentionally not resetting `#{global,env}Proxy` to keep them poisoned. + // `workerd` won't be started again by this `Miniflare` instance after + // `dispose()` is called. + return this.#bridge.dispose(); + } +} + +interface NativeTargetHeldValue { + address: number; + version: number; +} + +// Class containing functions that should accessible by both `ProxyClient` and +// `ProxyStubHandler`, but not exported to consumers of `ProxyClient` +class ProxyClientBridge { + // Each proxy stub is initialised with the version stored here. Whenever + // `poisonProxies()` is called, this version is incremented. Before the + // proxy makes any request to `workerd`, it checks the version number here + // matches its own internal version, and throws if not. + #version = 0; + // Whenever the `ProxyServer` returns a native target, it adds a strong + // reference to the "heap" in the singleton object. This prevents the object + // being garbage collected. To solve this, we register the native target + // proxies on the client in a `FinalizationRegistry`. When the proxies get + // garbage collected, we let the `ProxyServer` know it can release the strong + // "heap" reference, as we'll never be able to access it again. Importantly, + // we need to unregister all proxies from the registry when we poison them, + // as the references will be invalid, and a new object with the same address + // may be added to the "heap". + readonly #finalizationRegistry: FinalizationRegistry; + readonly sync = new SynchronousFetcher(); + + constructor(public url: URL, readonly dispatchFetch: DispatchFetch) { + this.#finalizationRegistry = new FinalizationRegistry(this.#finalizeProxy); + } + + get version(): number { + return this.#version; + } + + #finalizeProxy = async (held: NativeTargetHeldValue) => { + // Sanity check: make sure the proxy hasn't been poisoned. We should + // unregister all proxies from the finalisation registry when poisoning, + // but it doesn't hurt to be careful. + if (held.version !== this.#version) return; + + // Called when the `Proxy` with address `targetAddress` gets garbage + // collected. This removes the target from the `ProxyServer` "heap". + try { + await this.dispatchFetch(this.url, { + method: "DELETE", + headers: { + [CoreHeaders.OP]: ProxyOps.FREE, + [CoreHeaders.OP_TARGET]: held.address.toString(), + }, + }); + } catch { + // Ignore network errors when freeing. If this `dispatchFetch()` throws, + // it's likely the runtime has shutdown, so the entire "heap" has been + // destroyed anyway. + } + }; + + getProxy(target: NativeTarget): T { + const handler = new ProxyStubHandler(this, target); + const proxy = new Proxy( + { [util.inspect.custom]: handler.inspect } as T, + handler + ); + const held: NativeTargetHeldValue = { + address: target[kAddress], + version: this.#version, + }; + this.#finalizationRegistry.register(proxy, held, this); + return proxy; + } + + poisonProxies(url?: URL): void { + this.#version++; + // This function will be called whenever the runtime restarts. The URL may + // be different if the port has changed. We must also unregister all + // finalizers as the heap will be reset, and we don't want a new object + // added with the same address to be freed when it's still accessible. + if (url !== undefined) this.url = url; + this.#finalizationRegistry.unregister(this); + } + + dispose(): Promise { + this.poisonProxies(); + return this.sync.dispose(); + } +} + +class ProxyStubHandler implements ProxyHandler { + readonly #version: number; + readonly #stringifiedTarget: string; + readonly #known = new Map(); + + revivers: ReducersRevivers = { + ...revivers, + Native: (value) => { + assert(Array.isArray(value)); + const [address, name] = value as unknown[]; + assert(typeof address === "number"); + assert(typeof name === "string"); + const target: NativeTarget = { [kAddress]: address, [kName]: name }; + if (name === "Promise") { + // We'll only see `Promise`s here if we're parsing from + // `#parseSyncResponse`. In that case, we'll want to make an async fetch + // to actually resolve the `Promise` and get the value. + const resPromise = this.bridge.dispatchFetch(this.bridge.url, { + method: "POST", + headers: { + [CoreHeaders.OP]: ProxyOps.GET, // GET without key just gets target + [CoreHeaders.OP_TARGET]: stringify(target, reducers), + }, + }); + return this.#parseAsyncResponse(resPromise); + } else { + // Otherwise, return a `Proxy` for this target + return this.bridge.getProxy(target); + } + }, + }; + + constructor( + readonly bridge: ProxyClientBridge, + readonly target: NativeTarget + ) { + this.#version = bridge.version; + this.#stringifiedTarget = stringify(this.target, reducers); + } + + get #poisoned() { + return this.#version !== this.bridge.version; + } + #assertSafe() { + if (this.#poisoned) { + throw new Error( + "Attempted to use poisoned stub. Stubs to runtime objects must be " + + "re-created after calling `Miniflare#setOptions()` or `Miniflare#dispose()`." + ); + } + } + + inspect = (depth: number, options: util.InspectOptions) => { + const details = { name: this.target[kName], poisoned: this.#poisoned }; + return `ProxyStub ${util.inspect(details, options)}`; + }; + + #maybeThrow( + res: { status: number }, + result: unknown, + caller: Function + ): unknown { + if (res.status === 500) { + if (typeof result === "object" && result !== null) { + // Update the stack trace to include the calling location in Node + // (as opposed to inside the proxy server) which is much more useful + // for debugging. Specifying the original `caller` here hides our + // internal implementation functions from the stack. + Error.captureStackTrace(result, caller); + } + throw result; + } else { + // Returning a non-200/500 is an internal error. Note we special case + // `Fetcher#fetch()` calls, so user can still return any status code. + assert(res.status === 200); + return result; + } + } + async #parseAsyncResponse(resPromise: Promise): Promise { + const res = await resPromise; + + const typeHeader = res.headers.get(CoreHeaders.OP_RESULT_TYPE); + if (typeHeader === "Promise, ReadableStream") return res.body; + assert(typeHeader === "Promise"); // Must be async + + let stringifiedResult: string; + let unbufferedStream: ReadableStream | undefined; + const stringifiedSizeHeader = res.headers.get( + CoreHeaders.OP_STRINGIFIED_SIZE + ); + if (stringifiedSizeHeader === null) { + // No unbuffered stream + stringifiedResult = await res.text(); + } else { + // Response contains unbuffered `ReadableStream` + const stringifiedSize = parseInt(stringifiedSizeHeader); + assert(!Number.isNaN(stringifiedSize)); + assert(res.body !== null); + const [buffer, rest] = await readPrefix(res.body, stringifiedSize); + stringifiedResult = buffer.toString(); + // Need to `.pipeThrough()` here otherwise we'll get + // `TypeError: Response body object should not be disturbed or locked` + // when trying to construct a `Response` with the stream. + // TODO(soon): add support for MINIFLARE_ASSERT_BODIES_CONSUMED here + unbufferedStream = rest.pipeThrough(new TransformStream()); + } + + const result = parseWithReadableStreams( + NODE_PLATFORM_IMPL, + { value: stringifiedResult, unbufferedStream }, + this.revivers + ); + // We get an empty stack trace if we thread the caller through here, + // specifying `this.#parseAsyncResponse` is good enough though, we just + // get an extra `processTicksAndRejections` entry + return this.#maybeThrow(res, result, this.#parseAsyncResponse); + } + #parseSyncResponse(syncRes: SynchronousResponse, caller: Function): unknown { + assert(syncRes.body !== null); + // Unbuffered streams should only be sent as part of async responses + assert(syncRes.headers.get(CoreHeaders.OP_STRINGIFIED_SIZE) === null); + if (syncRes.body instanceof ReadableStream) return syncRes.body; + + const stringifiedResult = DECODER.decode(syncRes.body); + const result = parseWithReadableStreams( + NODE_PLATFORM_IMPL, + { value: stringifiedResult }, + this.revivers + ); + return this.#maybeThrow(syncRes, result, caller); + } + + get(_target: T, key: string | symbol, _receiver: unknown) { + this.#assertSafe(); + + // When `devalue` `stringify`ing `Proxy`, treat it as a `NativeTarget` + // (allows native proxies to be used as arguments, e.g. `DurableObjectId`s) + if (key === kAddress) return this.target[kAddress]; + if (key === kName) return this.target[kName]; + // Ignore all other symbol properties, or `then()`s. We should never return + // `Promise`s or thenables as native targets, and want to avoid the extra + // network call when `await`ing the proxy. + if (typeof key === "symbol" || key === "then") return undefined; + + // See optimisation comments below for cases where this will be set + const maybeKnown = this.#known.get(key); + if (maybeKnown !== undefined) return maybeKnown; + + // Always perform a synchronous GET, if this returns a `Promise`, we'll + // do an asynchronous GET in the reviver + const syncRes = this.bridge.sync.fetch(this.bridge.url, { + method: "POST", + headers: { + [CoreHeaders.OP]: ProxyOps.GET, + [CoreHeaders.OP_TARGET]: this.#stringifiedTarget, + [CoreHeaders.OP_KEY]: key, + }, + }); + let result: unknown; + if (syncRes.headers.get(CoreHeaders.OP_RESULT_TYPE) === "Function") { + result = this.#createFunction(key); + } else { + result = this.#parseSyncResponse(syncRes, this.get); + } + + if ( + // Optimisation: if this property is a function, we assume constant + // prototypes of proxied objects, so it's never going to change + typeof result === "function" || + // Optimisation: if this property is a reference, we assume it's never + // going to change. This allows us to reuse the known cache of nested + // objects on multiple access (e.g. reusing `env["..."]` proxy if + // `getR2Bucket()` is called on the same bucket multiple times). + isNativeTarget(result) || + // Once a `ReadableStream` sent across proxy, we won't be able to read it + // again in the server, so reuse the same stream for future accesses + // (e.g. accessing `R2ObjectBody#body` multiple times) + result instanceof ReadableStream + ) { + this.#known.set(key, result); + } + return result; + } + + has(target: T, key: string | symbol) { + // Not technically correct, but a close enough approximation for `in` + return this.get(target, key, undefined) !== undefined; + } + + #createFunction(key: string) { + // Optimisation: if the function returns a `Promise`, we know it must be + // async (assuming all async functions always return `Promise`s). When + // combined with the optimisation to cache known methods, this allows us to + // perform a single async network call per invocation as opposed to three: + // 1) Synchronously get method + // 2) Synchronously call method returning `Promise` + // 3) Asynchronously resolve returned `Promise` + let knownAsync = false; + // `{ [key]: () => {} }[key]` evaluates to a function named `key` as opposed + // to `(anonymous)`. This is useful for debugging, as logging the function + // will include the name. + const func = { + [key]: (...args: unknown[]) => { + const result = this.#call(key, knownAsync, args, func); + if (!knownAsync && result instanceof Promise) knownAsync = true; + return result; + }, + }[key]; + return func; + } + #call( + key: string, + knownAsync: boolean, + args: unknown[], + caller: Function + ): unknown { + this.#assertSafe(); + + const targetName = this.target[kName]; + // See `isFetcherFetch()` comment for why this special + if (isFetcherFetch(targetName, key)) return this.#fetcherFetchCall(args); + + const stringified = stringifyWithStreams( + NODE_PLATFORM_IMPL, + args, + reducers, + /* allowUnbufferedStream */ true + ); + if ( + knownAsync || + // We assume every call with `ReadableStream`/`Blob` arguments is async. + // Note that you can't consume `ReadableStream`/`Blob` synchronously: if + // you tried a similar trick to `SynchronousFetcher`, blocking the main + // thread with `Atomics.wait()` would prevent chunks being read. This + // assumption doesn't hold for `Blob`s and `FormData#{append,set}()`, but + // we should never expose proxies for those APIs to users. + stringified instanceof Promise || // (instanceof Promise if buffered `ReadableStream`/`Blob`s) + stringified.unbufferedStream !== undefined // (if at least one `ReadableStream` passed) + ) { + return this.#asyncCall(key, stringified); + } else { + const result = this.#syncCall(key, stringified.value, caller); + // See `isR2ObjectWriteHttpMetadata()` comment for why this special + if (isR2ObjectWriteHttpMetadata(targetName, key)) { + const arg = args[0]; + assert(arg instanceof Headers); + assert(result instanceof Headers); + for (const [key, value] of result) arg.set(key, value); + return; // void + } + return result; + } + } + #syncCall(key: string, stringifiedValue: string, caller: Function): unknown { + const argsSize = Buffer.byteLength(stringifiedValue).toString(); + const syncRes = this.bridge.sync.fetch(this.bridge.url, { + method: "POST", + headers: { + [CoreHeaders.OP]: ProxyOps.CALL, + [CoreHeaders.OP_TARGET]: this.#stringifiedTarget, + [CoreHeaders.OP_KEY]: key, + [CoreHeaders.OP_STRINGIFIED_SIZE]: argsSize, + "Content-Length": argsSize, + }, + body: stringifiedValue, + }); + return this.#parseSyncResponse(syncRes, caller); + } + async #asyncCall( + key: string, + stringifiedAwaitable: Awaitable> + ): Promise { + const stringified = await stringifiedAwaitable; + + let resPromise: Promise; + if (stringified.unbufferedStream === undefined) { + const argsSize = Buffer.byteLength(stringified.value).toString(); + resPromise = this.bridge.dispatchFetch(this.bridge.url, { + method: "POST", + headers: { + [CoreHeaders.OP]: ProxyOps.CALL, + [CoreHeaders.OP_TARGET]: this.#stringifiedTarget, + [CoreHeaders.OP_KEY]: key, + [CoreHeaders.OP_STRINGIFIED_SIZE]: argsSize, + "Content-Length": argsSize, + }, + body: stringified.value, + }); + } else { + const encodedArgs = Buffer.from(stringified.value); + const argsSize = encodedArgs.byteLength.toString(); + const body = prefixStream(encodedArgs, stringified.unbufferedStream); + resPromise = this.bridge.dispatchFetch(this.bridge.url, { + method: "POST", + headers: { + [CoreHeaders.OP]: ProxyOps.CALL, + [CoreHeaders.OP_TARGET]: this.#stringifiedTarget, + [CoreHeaders.OP_KEY]: key, + [CoreHeaders.OP_STRINGIFIED_SIZE]: argsSize, + }, + duplex: "half", + body, + }); + } + + return this.#parseAsyncResponse(resPromise); + } + #fetcherFetchCall(args: unknown[]) { + // @ts-expect-error `...args` isn't type-safe here, but `undici` should + // validate types at runtime, and throw appropriate errors + const request = new Request(...args); + // If adding new headers here, remember to `delete()` them in `ProxyServer` + // before calling `fetch()`. + request.headers.set(CoreHeaders.OP, ProxyOps.CALL); + request.headers.set(CoreHeaders.OP_TARGET, this.#stringifiedTarget); + request.headers.set(CoreHeaders.OP_KEY, "fetch"); + return this.bridge.dispatchFetch(request); + } +} diff --git a/packages/miniflare/src/plugins/core/proxy/fetch-sync.ts b/packages/miniflare/src/plugins/core/proxy/fetch-sync.ts new file mode 100644 index 000000000..bdb0d012f --- /dev/null +++ b/packages/miniflare/src/plugins/core/proxy/fetch-sync.ts @@ -0,0 +1,144 @@ +import assert from "assert"; +import { ReadableStream } from "stream/web"; +import { MessageChannel, Worker, receiveMessageOnPort } from "worker_threads"; +import { Headers } from "../../../http"; +import { CoreHeaders } from "../../../workers"; +import { JsonErrorSchema, reviveError } from "../errors"; + +export const DECODER = new TextDecoder(); + +export interface SynchronousRequestInit { + method?: string; + headers?: Record; + // `body` cannot be a `ReadableStream`, as we're blocking the main thread, so + // chunks could never be read until after the response was received, leading + // to deadlock + body?: ArrayBuffer | NodeJS.ArrayBufferView | string | null; +} +export interface SynchronousResponse { + status: number; + headers: H; + // `ReadableStream` returned if `CoreHeaders.OP_RESULT_TYPE` header is + // `ReadableStream`. In that case, we'll return the `ReadableStream` directly. + body: ReadableStream | ArrayBuffer | null; +} + +type WorkerResponse = { id: number } & ( + | { response: SynchronousResponse> } + | { error: unknown } +); + +const WORKER_SCRIPT = /* javascript */ ` +const { workerData } = require("worker_threads"); +const { fetch } = require("undici"); + +// Not using parentPort here so we can call receiveMessageOnPort() in host +const { notifyHandle, port } = workerData; + +port.addEventListener("message", async (event) => { + const { id, method, url, headers, body } = event.data; + headers["${CoreHeaders.OP_SYNC}"] = "true"; + try { + // body cannot be a ReadableStream, so no need to specify duplex + const response = await fetch(url, { method, headers, body }); + const responseBody = response.headers.get("${CoreHeaders.OP_RESULT_TYPE}") === "ReadableStream" + ? response.body + : await response.arrayBuffer(); + const transferList = responseBody === null ? undefined : [responseBody]; + port.postMessage( + { + id, + response: { + status: response.status, + headers: Object.fromEntries(response.headers), + body: responseBody, + } + }, + transferList + ); + } catch (error) { + try { + port.postMessage({ id, error }); + } catch { + // If error failed to serialise, post simplified version + port.postMessage({ id, error: new Error(String(error)) }); + } + } + Atomics.store(notifyHandle, /* index */ 0, /* value */ 1); + Atomics.notify(notifyHandle, /* index */ 0); +}); +`; + +// Ideally we would just have a single, shared `unref()`ed `Worker`, and an +// exported `fetchSync()` method. However, if a `ReadableStream` is transferred +// from the worker, and not consumed, it will prevent the process from exiting. +// Since we'll pass some of these `ReadableStream`s directly to users (e.g. +// `R2ObjectBody#body`), we can't guarantee they'll all be consumed. Therefore, +// we create a new `SynchronousFetcher` instance per `Miniflare` instance, and +// clean it up on `Miniflare#dispose()`, allowing the process to exit cleanly. +export class SynchronousFetcher { + readonly #channel: MessageChannel; + readonly #notifyHandle: Int32Array; + #worker?: Worker; + #nextId = 0; + + constructor() { + this.#channel = new MessageChannel(); + this.#notifyHandle = new Int32Array(new SharedArrayBuffer(4)); + } + + #ensureWorker() { + if (this.#worker !== undefined) return; + this.#worker = new Worker(WORKER_SCRIPT, { + eval: true, + workerData: { + notifyHandle: this.#notifyHandle, + port: this.#channel.port2, + }, + transferList: [this.#channel.port2], + }); + } + + fetch(url: URL | string, init: SynchronousRequestInit): SynchronousResponse { + this.#ensureWorker(); + Atomics.store(this.#notifyHandle, /* index */ 0, /* value */ 0); + const id = this.#nextId++; + this.#channel.port1.postMessage({ + id, + method: init.method, + url: url.toString(), + headers: init.headers, + body: init.body, + }); + // If index 0 contains value 0, block until wake-up notification + Atomics.wait(this.#notifyHandle, /* index */ 0, /* value */ 0); + // Never yielded to the event loop here, and we're the only ones with access + // to port1, so know this message is for this request + const message: WorkerResponse | undefined = receiveMessageOnPort( + this.#channel.port1 + )?.message; + assert(message?.id === id); + if ("response" in message) { + const { status, headers: rawHeaders, body } = message.response; + const headers = new Headers(rawHeaders); + const stack = headers.get(CoreHeaders.ERROR_STACK); + if (status === 500 && stack !== null && body !== null) { + // `CoreHeaders.ERROR_STACK` header should never be set with + // `CoreHeaders.OP_RESULT_TYPE: ReadableStream` + assert(!(body instanceof ReadableStream)); + const caught = JsonErrorSchema.parse(JSON.parse(DECODER.decode(body))); + // No need to specify `workerSrcOpts` here assuming we only + // synchronously fetch from internal Miniflare code (e.g. proxy server) + throw reviveError([], caught); + } + // TODO(soon): add support for MINIFLARE_ASSERT_BODIES_CONSUMED here + return { status, headers, body }; + } else { + throw message.error; + } + } + + async dispose() { + await this.#worker?.terminate(); + } +} diff --git a/packages/miniflare/src/plugins/core/proxy/index.ts b/packages/miniflare/src/plugins/core/proxy/index.ts new file mode 100644 index 000000000..d2ec2302c --- /dev/null +++ b/packages/miniflare/src/plugins/core/proxy/index.ts @@ -0,0 +1,2 @@ +export * from "./client"; +export * from "./types"; diff --git a/packages/miniflare/src/plugins/core/proxy/types.ts b/packages/miniflare/src/plugins/core/proxy/types.ts new file mode 100644 index 000000000..bd8c6f51f --- /dev/null +++ b/packages/miniflare/src/plugins/core/proxy/types.ts @@ -0,0 +1,191 @@ +import { Blob } from "buffer"; +import { arrayBuffer } from "stream/consumers"; +import { ReadableStream } from "stream/web"; +import type { + AbortSignal as WorkerAbortSignal, + Blob as WorkerBlob, + File as WorkerFile, + Headers as WorkerHeaders, + ReadableStream as WorkerReadableStream, + Request as WorkerRequest, + RequestInit as WorkerRequestInit, + Response as WorkerResponse, +} from "@cloudflare/workers-types/experimental"; +import { File, Headers } from "undici"; +import { Request, RequestInit, Response } from "../../../http"; +import { PlatformImpl } from "../../../workers"; + +export const NODE_PLATFORM_IMPL: PlatformImpl = { + // Node's implementation of these classes don't quite match Workers', + // but they're close enough for us + Blob: Blob as unknown as typeof WorkerBlob, + File: File as unknown as typeof WorkerFile, + Headers: Headers as unknown as typeof WorkerHeaders, + Request: Request as unknown as typeof WorkerRequest, + Response: Response as unknown as typeof WorkerResponse, + + isReadableStream(value): value is ReadableStream { + return value instanceof ReadableStream; + }, + bufferReadableStream(stream) { + return arrayBuffer(stream); + }, + unbufferReadableStream(buffer) { + return new Blob([new Uint8Array(buffer)]).stream(); + }, +}; + +// Substitutes workers types with the corresponding Node implementations. +// prettier-ignore +export type ReplaceWorkersTypes = + T extends WorkerRequest ? Request : + T extends WorkerResponse ? Response : + T extends WorkerReadableStream ? ReadableStream : + Required extends Required ? RequestInit : + T extends WorkerHeaders ? Headers : + T extends WorkerBlob ? Blob : + T extends WorkerAbortSignal ? AbortSignal : + T extends Promise ? Promise> : + T extends (...args: infer P) => infer R ? (...args: ReplaceWorkersTypes

) => ReplaceWorkersTypes : + T extends object ? { [K in keyof T]: OverloadReplaceWorkersTypes } : + T; + +export type OverloadReplaceWorkersTypes = T extends (...args: any[]) => any + ? UnionToIntersection>> + : ReplaceWorkersTypes; + +export type UnionToIntersection = ( + U extends any ? (k: U) => void : never +) extends (k: infer I) => void + ? I + : never; + +export type OverloadUnion2 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; +} + ? ((...args: P1) => R1) | ((...args: P2) => R2) + : T; + +export type OverloadUnion3 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; +} + ? ((...args: P1) => R1) | ((...args: P2) => R2) | ((...args: P3) => R3) + : OverloadUnion2; + +export type OverloadUnion4 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + : OverloadUnion3; + +export type OverloadUnion5 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; + (...args: infer P5): infer R5; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + | ((...args: P5) => R5) + : OverloadUnion4; + +export type OverloadUnion6 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; + (...args: infer P5): infer R5; + (...args: infer P6): infer R6; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + | ((...args: P5) => R5) + | ((...args: P6) => R6) + : OverloadUnion5; + +export type OverloadUnion7 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; + (...args: infer P5): infer R5; + (...args: infer P6): infer R6; + (...args: infer P7): infer R7; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + | ((...args: P5) => R5) + | ((...args: P6) => R6) + | ((...args: P7) => R7) + : OverloadUnion6; + +export type OverloadUnion8 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; + (...args: infer P5): infer R5; + (...args: infer P6): infer R6; + (...args: infer P7): infer R7; + (...args: infer P8): infer R8; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + | ((...args: P5) => R5) + | ((...args: P6) => R6) + | ((...args: P7) => R7) + | ((...args: P8) => R8) + : OverloadUnion7; + +// `KVNamespace#{get,getWithMetadata}()` each have 9 overloads :D +export type OverloadUnion9 = T extends { + (...args: infer P1): infer R1; + (...args: infer P2): infer R2; + (...args: infer P3): infer R3; + (...args: infer P4): infer R4; + (...args: infer P5): infer R5; + (...args: infer P6): infer R6; + (...args: infer P7): infer R7; + (...args: infer P8): infer R8; + (...args: infer P9): infer R9; +} + ? + | ((...args: P1) => R1) + | ((...args: P2) => R2) + | ((...args: P3) => R3) + | ((...args: P4) => R4) + | ((...args: P5) => R5) + | ((...args: P6) => R6) + | ((...args: P7) => R7) + | ((...args: P8) => R8) + | ((...args: P9) => R9) + : OverloadUnion8; + +export type OverloadUnion any> = + // Functions with no parameters pass the `extends` checks in the + // `OverloadUnionN` types with `(...args: unknown[]) => unknown` for the + // other overloads. Therefore, filter them out early. + Parameters extends [] ? T : OverloadUnion9; diff --git a/packages/miniflare/src/plugins/d1/index.ts b/packages/miniflare/src/plugins/d1/index.ts index 280776df4..329b0b494 100644 --- a/packages/miniflare/src/plugins/d1/index.ts +++ b/packages/miniflare/src/plugins/d1/index.ts @@ -3,7 +3,9 @@ import { Service, Worker_Binding } from "../../runtime"; import { PersistenceSchema, Plugin, + kProxyNodeBinding, namespaceEntries, + namespaceKeys, pluginNamespacePersistWorker, } from "../shared"; import { D1Gateway } from "./gateway"; @@ -52,6 +54,12 @@ export const D1_PLUGIN: Plugin< return { name, ...binding }; }); }, + getNodeBindings(options) { + const databases = namespaceKeys(options.d1Databases); + return Object.fromEntries( + databases.map((name) => [name, kProxyNodeBinding]) + ); + }, getServices({ options, sharedOptions }) { const persist = sharedOptions.d1Persist; const databases = namespaceEntries(options.d1Databases); diff --git a/packages/miniflare/src/plugins/do/index.ts b/packages/miniflare/src/plugins/do/index.ts index 08fb00306..0d71c8eac 100644 --- a/packages/miniflare/src/plugins/do/index.ts +++ b/packages/miniflare/src/plugins/do/index.ts @@ -10,6 +10,7 @@ import { Persistence, PersistenceSchema, Plugin, + kProxyNodeBinding, maybeParseURL, } from "../shared"; @@ -111,6 +112,10 @@ export const DURABLE_OBJECTS_PLUGIN: Plugin< } ); }, + getNodeBindings(options) { + const objects = Object.keys(options.durableObjects ?? {}); + return Object.fromEntries(objects.map((name) => [name, kProxyNodeBinding])); + }, async getServices({ sharedOptions, tmpPath, durableObjectClassNames }) { // Check if we even have any Durable Object bindings, if we don't, we can // skip creating the storage directory diff --git a/packages/miniflare/src/plugins/index.ts b/packages/miniflare/src/plugins/index.ts index e08b2d523..ec0545d13 100644 --- a/packages/miniflare/src/plugins/index.ts +++ b/packages/miniflare/src/plugins/index.ts @@ -89,6 +89,7 @@ export { ModuleRuleSchema, ModuleDefinitionSchema, SourceOptionsSchema, + ProxyClient, } from "./core"; export type { ModuleRuleType, @@ -97,6 +98,7 @@ export type { GlobalServicesOptions, SourceOptions, } from "./core"; +export type * from "./core/proxy/types"; export * from "./d1"; export * from "./do"; export * from "./kv"; diff --git a/packages/miniflare/src/plugins/kv/index.ts b/packages/miniflare/src/plugins/kv/index.ts index f7e275aaa..78bc3f760 100644 --- a/packages/miniflare/src/plugins/kv/index.ts +++ b/packages/miniflare/src/plugins/kv/index.ts @@ -3,13 +3,20 @@ import { Service, Worker_Binding } from "../../runtime"; import { PersistenceSchema, Plugin, + kProxyNodeBinding, namespaceEntries, + namespaceKeys, pluginNamespacePersistWorker, } from "../shared"; import { KV_PLUGIN_NAME } from "./constants"; import { KVGateway } from "./gateway"; import { KVRouter } from "./router"; -import { SitesOptions, getSitesBindings, getSitesService } from "./sites"; +import { + SitesOptions, + getSitesBindings, + getSitesNodeBindings, + getSitesService, +} from "./sites"; export const KVOptionsSchema = z.object({ kvNamespaces: z.union([z.record(z.string()), z.string().array()]).optional(), @@ -53,6 +60,16 @@ export const KV_PLUGIN: Plugin< return bindings; }, + async getNodeBindings(options) { + const namespaces = namespaceKeys(options.kvNamespaces); + const bindings = Object.fromEntries( + namespaces.map((name) => [name, kProxyNodeBinding]) + ); + if (isWorkersSitesEnabled(options)) { + Object.assign(bindings, await getSitesNodeBindings(options)); + } + return bindings; + }, getServices({ options, sharedOptions }) { const persist = sharedOptions.kvPersist; const namespaces = namespaceEntries(options.kvNamespaces); diff --git a/packages/miniflare/src/plugins/kv/sites.ts b/packages/miniflare/src/plugins/kv/sites.ts index a3ea355b9..e6d82a1dd 100644 --- a/packages/miniflare/src/plugins/kv/sites.ts +++ b/packages/miniflare/src/plugins/kv/sites.ts @@ -20,6 +20,7 @@ import { HEADER_PERSIST, Persistence, WORKER_BINDING_SERVICE_LOOPBACK, + kProxyNodeBinding, } from "../shared"; import { HEADER_SITES, @@ -179,6 +180,20 @@ async function handleRequest(request) { addEventListener("fetch", (event) => event.respondWith(handleRequest(event.request))); `; +async function buildStaticContentManifest( + sitePath: string, + siteRegExps: SiteMatcherRegExps +) { + // Build __STATIC_CONTENT_MANIFEST contents + const staticContentManifest: Record = {}; + for await (const key of listKeysInDirectory(sitePath)) { + if (testSiteRegExps(siteRegExps, key)) { + staticContentManifest[key] = encodeSitesKey(key); + } + } + return staticContentManifest; +} + export async function getSitesBindings( options: SitesOptions ): Promise { @@ -189,14 +204,10 @@ export async function getSitesBindings( }; sitesRegExpsCache.set(options, siteRegExps); - // Build __STATIC_CONTENT_MANIFEST contents - const staticContentManifest: Record = {}; - for await (const key of listKeysInDirectory(options.sitePath)) { - if (testSiteRegExps(siteRegExps, key)) { - staticContentManifest[key] = encodeSitesKey(key); - } - } - const __STATIC_CONTENT_MANIFEST = JSON.stringify(staticContentManifest); + const __STATIC_CONTENT_MANIFEST = await buildStaticContentManifest( + options.sitePath, + siteRegExps + ); return [ { @@ -205,10 +216,24 @@ export async function getSitesBindings( }, { name: BINDING_JSON_SITE_MANIFEST, - json: __STATIC_CONTENT_MANIFEST, + json: JSON.stringify(__STATIC_CONTENT_MANIFEST), }, ]; } +export async function getSitesNodeBindings( + options: SitesOptions +): Promise> { + const siteRegExps = sitesRegExpsCache.get(options); + assert(siteRegExps !== undefined); + const __STATIC_CONTENT_MANIFEST = await buildStaticContentManifest( + options.sitePath, + siteRegExps + ); + return { + [BINDING_KV_NAMESPACE_SITE]: kProxyNodeBinding, + [BINDING_JSON_SITE_MANIFEST]: __STATIC_CONTENT_MANIFEST, + }; +} export function maybeGetSitesManifestModule( bindings: Worker_Binding[] diff --git a/packages/miniflare/src/plugins/queues/index.ts b/packages/miniflare/src/plugins/queues/index.ts index adc5ad1c4..12d5d134e 100644 --- a/packages/miniflare/src/plugins/queues/index.ts +++ b/packages/miniflare/src/plugins/queues/index.ts @@ -6,7 +6,9 @@ import { maybeApply } from "../../shared"; import { Plugin, QueueConsumerOptionsSchema, + kProxyNodeBinding, namespaceEntries, + namespaceKeys, pluginNamespacePersistWorker, } from "../shared"; import { QueuesError } from "./errors"; @@ -68,10 +70,14 @@ export const QUEUES_PLUGIN: Plugin< queue: { name: `${QUEUES_PLUGIN_NAME}:${id}` }, })); }, + getNodeBindings(options) { + const queues = namespaceKeys(options.queueProducers); + return Object.fromEntries(queues.map((name) => [name, kProxyNodeBinding])); + }, async getServices({ options, queueConsumers: allQueueConsumers }) { - const buckets = namespaceEntries(options.queueProducers); - if (buckets.length === 0) return []; - return buckets.map(([_, id]) => { + const queues = namespaceEntries(options.queueProducers); + if (queues.length === 0) return []; + return queues.map(([_, id]) => { // Abusing persistence to store queue consumer. We don't support // persisting queued data yet, but we are essentially persisting messages // to a consumer. We'll unwrap this in the router as usual. Note we're diff --git a/packages/miniflare/src/plugins/r2/errors.ts b/packages/miniflare/src/plugins/r2/errors.ts index 71bfb19c8..69889f55e 100644 --- a/packages/miniflare/src/plugins/r2/errors.ts +++ b/packages/miniflare/src/plugins/r2/errors.ts @@ -1,7 +1,7 @@ import { Response } from "../../http"; import { HttpError } from "../../shared"; import { CfHeader } from "../shared/constants"; -import { R2Object } from "./r2Object"; +import { InternalR2Object } from "./r2Object"; enum Status { BadRequest = 400, @@ -28,7 +28,7 @@ enum CfCode { } export class R2Error extends HttpError { - object?: R2Object; + object?: InternalR2Object; constructor(code: number, message: string, readonly v4Code: number) { super(code, message); @@ -69,7 +69,7 @@ export class R2Error extends HttpError { return this; } - attach(object: R2Object) { + attach(object: InternalR2Object) { this.object = object; return this; } diff --git a/packages/miniflare/src/plugins/r2/gateway.ts b/packages/miniflare/src/plugins/r2/gateway.ts index 4f03fa21c..89f5a8fdb 100644 --- a/packages/miniflare/src/plugins/r2/gateway.ts +++ b/packages/miniflare/src/plugins/r2/gateway.ts @@ -27,19 +27,23 @@ import { NoSuchUpload, PreconditionFailed, } from "./errors"; -import { R2Object, R2ObjectBody, R2Objects } from "./r2Object"; import { + InternalR2Object, + InternalR2ObjectBody, + InternalR2Objects, +} from "./r2Object"; +import { + InternalR2CreateMultipartUploadOptions, + InternalR2GetOptions, + InternalR2ListOptions, + InternalR2PutOptions, MultipartPartRow, MultipartUploadRow, MultipartUploadState, ObjectRow, R2Conditional, - R2CreateMultipartUploadOptions, R2CreateMultipartUploadResponse, - R2GetOptions, - R2ListOptions, R2PublishedPart, - R2PutOptions, R2Range, R2UploadPartResponse, SQL_SCHEMA, @@ -641,20 +645,20 @@ export class R2Gateway { return identity.readable; } - async head(key: string): Promise { + async head(key: string): Promise { validate.key(key); const row = this.#stmts.getByKey.get({ key }); if (row === undefined) throw new NoSuchKey(); const range: R2Range = { offset: 0, length: row.size }; - return new R2Object(row, range); + return new InternalR2Object(row, range); } async get( key: string, - options: R2GetOptions = {} - ): Promise { + options: InternalR2GetOptions = {} + ): Promise { validate.key(key); // Try to get this key, including multipart parts if it's multipart @@ -668,7 +672,7 @@ export class R2Gateway { validate.condition(row, options.onlyIf); } catch (e) { if (e instanceof PreconditionFailed) { - e.attach(new R2Object(row, defaultR2Range)); + e.attach(new InternalR2Object(row, defaultR2Range)); } throw e; } @@ -700,15 +704,15 @@ export class R2Gateway { if (value === null) throw new NoSuchKey(); } - return new R2ObjectBody(row, value, r2Range); + return new InternalR2ObjectBody(row, value, r2Range); } async put( key: string, value: ReadableStream, valueSize: number, - options: R2PutOptions - ): Promise { + options: InternalR2PutOptions + ): Promise { // Store value in the blob store, computing required digests as we go // (this means we don't have to buffer the entire stream to compute them) const algorithms: (keyof R2Hashes)[] = []; @@ -751,7 +755,7 @@ export class R2Gateway { if (oldBlobIds !== undefined) { for (const blobId of oldBlobIds) this.#backgroundDelete(blobId); } - return new R2Object(row); + return new InternalR2Object(row); } async delete(keys: string | string[]) { @@ -768,7 +772,7 @@ export class R2Gateway { return this.#stmts.listHttpCustomMetadataWithoutDelimiter; } - async list(opts: R2ListOptions = {}): Promise { + async list(opts: InternalR2ListOptions = {}): Promise { const prefix = opts.prefix ?? ""; let limit = opts.limit ?? MAX_LIST_KEYS; @@ -793,7 +797,7 @@ export class R2Gateway { if (row.custom_metadata === undefined || excludeCustom) { row.custom_metadata = "{}"; } - return new R2Object(row as Omit); + return new InternalR2Object(row as Omit); }; // If cursor set, and lexicographically after `startAfter`, use that for @@ -819,7 +823,7 @@ export class R2Gateway { limit: limit + 1, }; - let objects: R2Object[]; + let objects: InternalR2Object[]; const delimitedPrefixes: string[] = []; let nextCursorStartAfter: string | undefined; @@ -872,7 +876,7 @@ export class R2Gateway { async createMultipartUpload( key: string, - opts: R2CreateMultipartUploadOptions + opts: InternalR2CreateMultipartUploadOptions ): Promise { validate.key(key); @@ -932,7 +936,7 @@ export class R2Gateway { key: string, uploadId: string, parts: R2PublishedPart[] - ): Promise { + ): Promise { validate.key(key); const { newRow, oldBlobIds } = this.#stmts.completeMultipartUpload( key, @@ -940,7 +944,7 @@ export class R2Gateway { parts ); for (const blobId of oldBlobIds) this.#backgroundDelete(blobId); - return new R2Object(newRow); + return new InternalR2Object(newRow); } async abortMultipartUpload(key: string, uploadId: string): Promise { diff --git a/packages/miniflare/src/plugins/r2/index.ts b/packages/miniflare/src/plugins/r2/index.ts index 8950e2ed9..570a07099 100644 --- a/packages/miniflare/src/plugins/r2/index.ts +++ b/packages/miniflare/src/plugins/r2/index.ts @@ -3,7 +3,9 @@ import { Service, Worker_Binding } from "../../runtime"; import { PersistenceSchema, Plugin, + kProxyNodeBinding, namespaceEntries, + namespaceKeys, pluginNamespacePersistWorker, } from "../shared"; import { R2Gateway } from "./gateway"; @@ -33,6 +35,10 @@ export const R2_PLUGIN: Plugin< r2Bucket: { name: `${R2_PLUGIN_NAME}:${id}` }, })); }, + getNodeBindings(options) { + const buckets = namespaceKeys(options.r2Buckets); + return Object.fromEntries(buckets.map((name) => [name, kProxyNodeBinding])); + }, getServices({ options, sharedOptions }) { const persist = sharedOptions.r2Persist; const buckets = namespaceEntries(options.r2Buckets); diff --git a/packages/miniflare/src/plugins/r2/r2Object.ts b/packages/miniflare/src/plugins/r2/r2Object.ts index 886dcd306..fbc59c681 100644 --- a/packages/miniflare/src/plugins/r2/r2Object.ts +++ b/packages/miniflare/src/plugins/r2/r2Object.ts @@ -9,7 +9,7 @@ export interface EncodedMetadata { value: ReadableStream; } -export class R2Object { +export class InternalR2Object { readonly key: string; readonly version: string; readonly size: number; @@ -69,7 +69,7 @@ export class R2Object { return { metadataSize: blob.size, value: blob.stream() }; } - static encodeMultiple(objects: R2Objects): EncodedMetadata { + static encodeMultiple(objects: InternalR2Objects): EncodedMetadata { const json = JSON.stringify({ ...objects, objects: objects.objects.map((o) => o.#rawProperties()), @@ -79,7 +79,7 @@ export class R2Object { } } -export class R2ObjectBody extends R2Object { +export class InternalR2ObjectBody extends InternalR2Object { constructor( metadata: Omit, readonly body: ReadableStream, @@ -101,9 +101,9 @@ export class R2ObjectBody extends R2Object { } } -export interface R2Objects { +export interface InternalR2Objects { // An array of objects matching the list request. - objects: R2Object[]; + objects: InternalR2Object[]; // If true, indicates there are more results to be retrieved for the current // list request. truncated: boolean; diff --git a/packages/miniflare/src/plugins/r2/router.ts b/packages/miniflare/src/plugins/r2/router.ts index ed6cbd2ab..74f70d196 100644 --- a/packages/miniflare/src/plugins/r2/router.ts +++ b/packages/miniflare/src/plugins/r2/router.ts @@ -1,6 +1,7 @@ import assert from "assert"; -import { ReadableStream, TransformStream } from "stream/web"; +import { ReadableStream } from "stream/web"; import { Request, Response } from "../../http"; +import { readPrefix } from "../../shared"; import { CfHeader, GET, @@ -11,7 +12,12 @@ import { } from "../shared"; import { InternalError, InvalidMetadata } from "./errors"; import { R2Gateway } from "./gateway"; -import { EncodedMetadata, R2Object, R2ObjectBody, R2Objects } from "./r2Object"; +import { + EncodedMetadata, + InternalR2Object, + InternalR2ObjectBody, + InternalR2Objects, +} from "./r2Object"; import { R2BindingRequestSchema } from "./schemas"; async function decodeMetadata(req: Request) { @@ -21,44 +27,11 @@ async function decodeMetadata(req: Request) { assert(req.body !== null); const body = req.body as ReadableStream; - // Read just metadata from body stream (NOTE: we can't use a `TransformStream` - // and buffer the first N chunks as we need this metadata to determine what - // to do with the rest of the body. We have to *pull* the data as opposed to - // passively transforming it as it's piped somewhere else. If `body` were - // a byte stream, we could use BYOB reads to read just enough. Even better, if - // this were running in the Workers runtime, we could use `readAtLeast()` to - // read everything at once.) - const chunks: Uint8Array[] = []; - let chunksLength = 0; - for await (const chunk of body.values({ preventCancel: true })) { - chunks.push(chunk); - chunksLength += chunk.byteLength; - // Once we've read enough bytes, stop - if (chunksLength >= metadataSize) break; - } - // If we read the entire stream without enough bytes for metadata, throw - if (chunksLength < metadataSize) throw new InvalidMetadata(); - const atLeastMetadata = Buffer.concat(chunks, chunksLength); - const metadataJson = atLeastMetadata.subarray(0, metadataSize).toString(); + // Read just metadata from body stream + const [metadataBuffer, value] = await readPrefix(body, metadataSize); + const metadataJson = metadataBuffer.toString(); const metadata = R2BindingRequestSchema.parse(JSON.parse(metadataJson)); - let value = body; - // If we read some value when reading metadata (quite likely), create a new - // stream, write the bit we read, then write the rest of the body stream - if (chunksLength > metadataSize) { - const identity = new TransformStream(); - const writer = identity.writable.getWriter(); - // The promise returned by `writer.write()` will only resolve once the chunk - // is read, which won't be until after this function returns, so we can't - // use `await` here - void writer.write(atLeastMetadata.subarray(metadataSize)).then(() => { - // Release the writer without closing the stream - writer.releaseLock(); - return body.pipeTo(identity.writable); - }); - value = identity.readable; - } - return { metadata, metadataSize, value }; } function decodeHeaderMetadata(req: Request) { @@ -67,12 +40,14 @@ function decodeHeaderMetadata(req: Request) { return R2BindingRequestSchema.parse(JSON.parse(header)); } -function encodeResult(result: R2Object | R2ObjectBody | R2Objects) { +function encodeResult( + result: InternalR2Object | InternalR2ObjectBody | InternalR2Objects +) { let encoded: EncodedMetadata; - if (result instanceof R2Object) { + if (result instanceof InternalR2Object) { encoded = result.encode(); } else { - encoded = R2Object.encodeMultiple(result); + encoded = InternalR2Object.encodeMultiple(result); } return new Response(encoded.value, { @@ -105,7 +80,7 @@ export class R2Router extends Router { const bucket = decodeURIComponent(params.bucket); const gateway = this.gatewayFactory.get(bucket, persist); - let result: R2Object | R2ObjectBody | R2Objects; + let result: InternalR2Object | InternalR2ObjectBody | InternalR2Objects; if (metadata.method === "head") { result = await gateway.head(metadata.object); } else if (metadata.method === "get") { diff --git a/packages/miniflare/src/plugins/r2/schemas.ts b/packages/miniflare/src/plugins/r2/schemas.ts index 78f9b9534..3166945a4 100644 --- a/packages/miniflare/src/plugins/r2/schemas.ts +++ b/packages/miniflare/src/plugins/r2/schemas.ts @@ -267,10 +267,16 @@ export const R2BindingRequestSchema = z.union([ ]); export type OmitRequest = Omit; -export type R2GetOptions = OmitRequest>; -export type R2PutOptions = OmitRequest>; -export type R2ListOptions = OmitRequest>; -export type R2CreateMultipartUploadOptions = OmitRequest< +export type InternalR2GetOptions = OmitRequest< + z.infer +>; +export type InternalR2PutOptions = OmitRequest< + z.infer +>; +export type InternalR2ListOptions = OmitRequest< + z.infer +>; +export type InternalR2CreateMultipartUploadOptions = OmitRequest< z.infer >; diff --git a/packages/miniflare/src/plugins/r2/validator.ts b/packages/miniflare/src/plugins/r2/validator.ts index fcffcb1ca..31578e91c 100644 --- a/packages/miniflare/src/plugins/r2/validator.ts +++ b/packages/miniflare/src/plugins/r2/validator.ts @@ -11,8 +11,8 @@ import { MetadataTooLarge, PreconditionFailed, } from "./errors"; -import { R2Object } from "./r2Object"; -import { R2Conditional, R2Etag, R2GetOptions } from "./schemas"; +import { InternalR2Object } from "./r2Object"; +import { InternalR2GetOptions, R2Conditional, R2Etag } from "./schemas"; export const MAX_LIST_KEYS = 1_000; const MAX_KEY_SIZE = 1024; @@ -46,7 +46,7 @@ function includesEtag( /** @internal */ export function _testR2Conditional( cond: R2Conditional, - metadata?: Pick + metadata?: Pick ): boolean { // Adapted from internal R2 gateway implementation. // See also https://datatracker.ietf.org/doc/html/rfc7232#section-6. @@ -120,7 +120,7 @@ export class Validator { } condition( - meta?: Pick, + meta?: Pick, onlyIf?: R2Conditional ): Validator { if (onlyIf !== undefined && !_testR2Conditional(onlyIf, meta)) { @@ -130,7 +130,7 @@ export class Validator { } range( - options: Pick, + options: Pick, size: number ): InclusiveRange | undefined { if (options.rangeHeader !== undefined) { diff --git a/packages/miniflare/src/plugins/shared/index.ts b/packages/miniflare/src/plugins/shared/index.ts index 3ba476eb1..446fdacce 100644 --- a/packages/miniflare/src/plugins/shared/index.ts +++ b/packages/miniflare/src/plugins/shared/index.ts @@ -58,6 +58,9 @@ export interface PluginBase< options: z.infer, workerIndex: number ): Awaitable; + getNodeBindings( + options: z.infer + ): Awaitable>; getServices( options: PluginServicesOptions ): Awaitable; @@ -78,6 +81,22 @@ export type Plugin< router: RouterConstructor; }); +// When this is returned as the binding from `PluginBase#getNodeBindings()`, +// Miniflare will replace it with a proxy to the binding in `workerd` +export const kProxyNodeBinding = Symbol("kProxyNodeBinding"); + +export function namespaceKeys( + namespaces?: Record | string[] +): string[] { + if (Array.isArray(namespaces)) { + return namespaces; + } else if (namespaces !== undefined) { + return Object.keys(namespaces); + } else { + return []; + } +} + export function namespaceEntries( namespaces?: Record | string[] ): [bindingName: string, id: string][] { diff --git a/packages/miniflare/src/runtime/config/workerd.ts b/packages/miniflare/src/runtime/config/workerd.ts index a6ed15d83..29b6245c4 100644 --- a/packages/miniflare/src/runtime/config/workerd.ts +++ b/packages/miniflare/src/runtime/config/workerd.ts @@ -18,7 +18,7 @@ export interface Config { services?: Service[]; sockets?: Socket[]; v8Flags?: string[]; - extension?: Extension[]; + extensions?: Extension[]; } export type Socket = { diff --git a/packages/miniflare/src/shared/index.ts b/packages/miniflare/src/shared/index.ts index 533a20493..df14b2802 100644 --- a/packages/miniflare/src/shared/index.ts +++ b/packages/miniflare/src/shared/index.ts @@ -4,5 +4,6 @@ export * from "./error"; export * from "./event"; export * from "./log"; export * from "./matcher"; +export * from "./streams"; export * from "./sync"; export * from "./types"; diff --git a/packages/miniflare/src/shared/streams.ts b/packages/miniflare/src/shared/streams.ts new file mode 100644 index 000000000..3c52adac5 --- /dev/null +++ b/packages/miniflare/src/shared/streams.ts @@ -0,0 +1,60 @@ +import { ReadableStream, TransformStream } from "stream/web"; + +export function prefixStream( + prefix: Uint8Array, + stream: ReadableStream +): ReadableStream { + const identity = new TransformStream(); + const writer = identity.writable.getWriter(); + // The promise returned by `writer.write()` will only resolve once the chunk + // is read, which won't be until after this function returns, so we can't + // use `await` here + void writer + .write(prefix) + .then(() => { + // Release the writer without closing the stream + writer.releaseLock(); + return stream.pipeTo(identity.writable); + }) + .catch((error) => { + return writer.abort(error); + }); + return identity.readable; +} + +export async function readPrefix( + stream: ReadableStream, + prefixLength: number +): Promise<[prefix: Buffer, rest: ReadableStream]> { + // NOTE: we can't use a `TransformStream` and buffer the first N chunks as we + // need this metadata to determine what to do with the rest of the body. We + // have to *pull* the data as opposed to passively transforming it as it's + // piped somewhere else. If `body` were a byte stream, we could use BYOB reads + // to read just enough. Even better, if this were running in the Workers + // runtime, we could use `readAtLeast()` to read everything at once. + const chunks: Uint8Array[] = []; + let chunksLength = 0; + for await (const chunk of stream.values({ preventCancel: true })) { + chunks.push(chunk); + chunksLength += chunk.byteLength; + // Once we've read enough bytes, stop + if (chunksLength >= prefixLength) break; + } + // If we read the entire stream without enough bytes for prefix, throw + if (chunksLength < prefixLength) { + throw new RangeError( + `Expected ${prefixLength} byte prefix, but received ${chunksLength} byte stream` + ); + } + const atLeastPrefix = Buffer.concat(chunks, chunksLength); + const prefix = atLeastPrefix.subarray(0, prefixLength); + + let rest = stream; + // If we read over when reading prefix (quite likely), create a new stream, + // write the bit we read, then write the rest of the stream + if (chunksLength > prefixLength) { + rest = prefixStream(atLeastPrefix.subarray(prefixLength), stream); + } + + return [prefix, rest]; +} diff --git a/packages/miniflare/src/workers/README.md b/packages/miniflare/src/workers/README.md index 199ad119c..440ce95bf 100644 --- a/packages/miniflare/src/workers/README.md +++ b/packages/miniflare/src/workers/README.md @@ -1,9 +1,8 @@ # Miniflare 3 Workers This directory contains code for Workers used internally by Miniflare 3. Files -ending in `*.worker.ts` will be bundled as additional ES Module entrypoints, and -type-checked under `@cloudflare/workers-types/experimental`, instead of -`@types/node`. +ending in `*.worker.ts` will be type-checked under +`@cloudflare/workers-types/experimental`, instead of `@types/node`. It also contains dependencies (i.e. header/binding names, other constants and types) shared between Workers and the Node.js components of Miniflare. These diff --git a/packages/miniflare/src/workers/core/constants.ts b/packages/miniflare/src/workers/core/constants.ts index 46d5f4daa..803652e6c 100644 --- a/packages/miniflare/src/workers/core/constants.ts +++ b/packages/miniflare/src/workers/core/constants.ts @@ -4,6 +4,14 @@ export const CoreHeaders = { ERROR_STACK: "MF-Experimental-Error-Stack", ROUTE_OVERRIDE: "MF-Route-Override", CUSTOM_EVENT: "MF-Custom-Event", + + // API Proxy + OP: "MF-Op", + OP_TARGET: "MF-Op-Target", + OP_KEY: "MF-Op-Key", + OP_SYNC: "MF-Op-Sync", + OP_STRINGIFIED_SIZE: "MF-Op-Stringified-Size", + OP_RESULT_TYPE: "MF-Op-Result-Type", } as const; export const CoreBindings = { @@ -16,6 +24,7 @@ export const CoreBindings = { JSON_ROUTES: "MINIFLARE_ROUTES", JSON_LOG_LEVEL: "MINIFLARE_LOG_LEVEL", DATA_LIVE_RELOAD_SCRIPT: "MINIFLARE_LIVE_RELOAD_SCRIPT", + DURABLE_OBJECT_NAMESPACE_PROXY: "MINIFLARE_PROXY", } as const; export enum LogLevel { @@ -26,3 +35,48 @@ export enum LogLevel { DEBUG, VERBOSE, } + +export const ProxyOps = { + // Get the target or a property of the target + GET: "GET", + // Call a method on the target + CALL: "CALL", + // Remove the strong reference to the target on the "heap", allowing it to be + // garbage collected + FREE: "FREE", +} as const; +export const ProxyAddresses = { + GLOBAL: 0, // globalThis + ENV: 1, // env + USER_START: 2, +} as const; + +// ### Proxy Special Cases +// The proxy supports serialising `Request`/`Response`s for the Cache API. It +// doesn't support serialising `WebSocket`s though. Rather than attempting this, +// we call `Fetcher#fetch()` using `dispatchFetch()` directly, using the passed +// request. This gives us WebSocket support, and is much more efficient, since +// there's no need to serialise the `Request`/`Response`: we just pass +// everything to `dispatchFetch()` and return what that gives us. +export function isFetcherFetch(targetName: string, key: string) { + // `DurableObject` is the internal name of `DurableObjectStub`: + // https://github.com/cloudflare/workerd/blob/34e3f96dae6a4ba799fe0ab9ad9f3f5a88633fc7/src/workerd/api/actor.h#L86 + return ( + (targetName === "Fetcher" || targetName === "DurableObject") && + key === "fetch" + ); +} +// `R2Object#writeHttpMetadata()` is one of the few functions that mutates its +// arguments. This would be proxied correctly if the argument were a native +// target proxy itself, but `Headers` can be constructed in Node. Instead, we +// respond with the updated headers in the proxy server, then copy them to the +// original argument on the client. +export function isR2ObjectWriteHttpMetadata(targetName: string, key: string) { + // `HeadResult` and `GetResult` are the internal names of `R2Object` and `R2ObjectBody` respectively: + // https://github.com/cloudflare/workerd/blob/ae612f0563d864c82adbfa4c2e5ed78b547aa0a1/src/workerd/api/r2-bucket.h#L210 + // https://github.com/cloudflare/workerd/blob/ae612f0563d864c82adbfa4c2e5ed78b547aa0a1/src/workerd/api/r2-bucket.h#L263-L264 + return ( + (targetName === "HeadResult" || targetName === "GetResult") && + key === "writeHttpMetadata" + ); +} diff --git a/packages/miniflare/src/workers/core/devalue.ts b/packages/miniflare/src/workers/core/devalue.ts index cc196471b..d4c6e84e9 100644 --- a/packages/miniflare/src/workers/core/devalue.ts +++ b/packages/miniflare/src/workers/core/devalue.ts @@ -1,11 +1,23 @@ import assert from "node:assert"; import { Buffer } from "node:buffer"; +import type { + Blob as WorkerBlob, + BlobOptions as WorkerBlobOptions, + File as WorkerFile, + FileOptions as WorkerFileOptions, + Headers as WorkerHeaders, + ReadableStream as WorkerReadableStream, + Request as WorkerRequest, + Response as WorkerResponse, +} from "@cloudflare/workers-types/experimental"; +import { parse, stringify } from "devalue"; // This file implements `devalue` reducers and revivers for structured- // serialisable types not supported by default. See serialisable types here: // https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Structured_clone_algorithm#supported_types -export type ReducersRevivers = Record unknown>; +export type ReducerReviver = (value: unknown) => unknown; +export type ReducersRevivers = Record; const ALLOWED_ARRAY_BUFFER_VIEW_CONSTRUCTORS = [ DataView, @@ -34,7 +46,8 @@ const ALLOWED_ERROR_CONSTRUCTORS = [ export const structuredSerializableReducers: ReducersRevivers = { ArrayBuffer(value) { if (value instanceof ArrayBuffer) { - return Buffer.from(value).toString("base64"); + // Return single element array so empty `ArrayBuffer` serialised as truthy + return [Buffer.from(value).toString("base64")]; } }, ArrayBufferView(value) { @@ -60,8 +73,10 @@ export const structuredSerializableReducers: ReducersRevivers = { }; export const structuredSerializableRevivers: ReducersRevivers = { ArrayBuffer(value) { - assert(typeof value === "string"); - const view = Buffer.from(value, "base64"); + assert(Array.isArray(value)); + const [encoded] = value as unknown[]; + assert(typeof encoded === "string"); + const view = Buffer.from(encoded, "base64"); return view.buffer.slice( view.byteOffset, view.byteOffset + view.byteLength @@ -97,3 +112,202 @@ export const structuredSerializableRevivers: ReducersRevivers = { return error; }, }; + +// This file gets imported both by Node and workers. These platforms have +// different ways of accessing/performing operations required by this code. +// This interface should be implemented by both platforms to provide this +// functionality. `RS` is the type of `ReadableStream`. +export interface PlatformImpl { + Blob: typeof WorkerBlob; + File: typeof WorkerFile; + Headers: typeof WorkerHeaders; + Request: typeof WorkerRequest; + Response: typeof WorkerResponse; + + isReadableStream(value: unknown): value is RS; + bufferReadableStream(stream: RS): Promise; + unbufferReadableStream(buffer: ArrayBuffer): RS; +} + +export function createHTTPReducers( + impl: PlatformImpl +): ReducersRevivers { + return { + Headers(val) { + if (val instanceof impl.Headers) return Object.fromEntries(val); + }, + Request(val) { + if (val instanceof impl.Request) { + return [val.method, val.url, val.headers, val.cf, val.body]; + } + }, + Response(val) { + if (val instanceof impl.Response) { + return [val.status, val.statusText, val.headers, val.cf, val.body]; + } + }, + }; +} +export function createHTTPRevivers( + impl: PlatformImpl +): ReducersRevivers { + return { + Headers(value) { + assert(typeof value === "object" && value !== null); + return new impl.Headers(value as Record); + }, + Request(value) { + assert(Array.isArray(value)); + const [method, url, headers, cf, body] = value as unknown[]; + assert(typeof method === "string"); + assert(typeof url === "string"); + assert(headers instanceof impl.Headers); + assert(body === null || impl.isReadableStream(body)); + return new impl.Request(url, { + method, + headers, + cf, + // @ts-expect-error `duplex` is not required by `workerd` yet + duplex: body === null ? undefined : "half", + body: body as WorkerReadableStream | null, + }); + }, + Response(value) { + assert(Array.isArray(value)); + const [status, statusText, headers, cf, body] = value as unknown[]; + assert(typeof status === "number"); + assert(typeof statusText === "string"); + assert(headers instanceof impl.Headers); + assert(body === null || impl.isReadableStream(body)); + return new impl.Response(body as WorkerReadableStream | null, { + status, + statusText, + headers, + cf, + }); + }, + }; +} + +export interface StringifiedWithStream { + value: string; + unbufferedStream?: RS; +} +// `devalue` `stringify()` that allows a single stream to be "unbuffered", and +// sent separately. Other streams will be buffered. +export function stringifyWithStreams( + impl: PlatformImpl, + value: unknown, + reducers: ReducersRevivers, + allowUnbufferedStream: boolean +): StringifiedWithStream | Promise> { + let unbufferedStream: RS | undefined; + // The tricky thing here is that `devalue` `stringify()` is synchronous, and + // doesn't support asynchronous reducers. Assuming we visit values in the same + // order each time, we can use an array to store buffer promises. + const bufferPromises: Promise[] = []; + const streamReducers: ReducersRevivers = { + ReadableStream(value) { + if (impl.isReadableStream(value)) { + if (allowUnbufferedStream && unbufferedStream === undefined) { + unbufferedStream = value; + } else { + bufferPromises.push(impl.bufferReadableStream(value)); + } + // Using `true` to signify unbuffered stream, buffered streams will + // have this replaced with an `ArrayBuffer` on the 2nd `stringify()` + // If we don't have any buffer promises, this will encode to the correct + // value, so we don't need to re-`stringify()`. + return true; + } + }, + Blob(value) { + if (value instanceof impl.Blob) { + // `Blob`s are always buffered. We can't just serialise with a stream + // here (and recursively use the reducer above), because `workerd` + // doesn't allow us to synchronously reconstruct a `Blob` from a stream: + // its `new Blob([...])` doesn't support `ReadableStream` blob bits. + bufferPromises.push(value.arrayBuffer()); + return true; + } + }, + + ...reducers, + }; + const stringifiedValue = stringify(value, streamReducers); + // If we didn't need to buffer anything, we've just encoded correctly. Note + // `unbufferedStream` may be undefined if the `value` didn't contain streams. + // Note also in this case we're returning synchronously, so we can use this + // for synchronous methods too. + if (bufferPromises.length === 0) { + return { value: stringifiedValue, unbufferedStream }; + } + + // Otherwise, wait for buffering to complete, and `stringify()` again with + // a reducer that expects buffers. + return Promise.all(bufferPromises).then((streamBuffers) => { + // Again, we're assuming values are visited in the same order, so `shift()` + // will give us the next correct buffer + streamReducers.ReadableStream = function (value) { + if (impl.isReadableStream(value)) { + if (value === unbufferedStream) { + return true; + } else { + return streamBuffers.shift(); + } + } + }; + streamReducers.Blob = function (value) { + if (value instanceof impl.Blob) { + const array: unknown[] = [streamBuffers.shift(), value.type]; + if (value instanceof impl.File) { + array.push(value.name, value.lastModified); + } + return array; + } + }; + const stringifiedValue = stringify(value, streamReducers); + return { value: stringifiedValue, unbufferedStream }; + }); +} +export function parseWithReadableStreams( + impl: PlatformImpl, + stringified: StringifiedWithStream, + revivers: ReducersRevivers +): unknown { + const streamRevivers: ReducersRevivers = { + ReadableStream(value) { + if (value === true) { + assert(stringified.unbufferedStream !== undefined); + return stringified.unbufferedStream; + } + assert(value instanceof ArrayBuffer); + return impl.unbufferReadableStream(value); + }, + Blob(value) { + assert(Array.isArray(value)); + if (value.length === 2) { + // Blob + const [buffer, type] = value as unknown[]; + assert(buffer instanceof ArrayBuffer); + assert(typeof type === "string"); + const opts: WorkerBlobOptions = {}; + if (type !== "") opts.type = type; + return new impl.Blob([buffer], opts); + } else { + // File + assert(value.length === 4); + const [buffer, type, name, lastModified] = value as unknown[]; + assert(buffer instanceof ArrayBuffer); + assert(typeof type === "string"); + assert(typeof name === "string"); + assert(typeof lastModified === "number"); + const opts: WorkerFileOptions = { lastModified }; + if (type !== "") opts.type = type; + return new impl.File([buffer], name, opts); + } + }, + ...revivers, + }; + return parse(stringified.value, streamRevivers); +} diff --git a/packages/miniflare/src/workers/core/entry.worker.ts b/packages/miniflare/src/workers/core/entry.worker.ts index 47789c261..b6623d19a 100644 --- a/packages/miniflare/src/workers/core/entry.worker.ts +++ b/packages/miniflare/src/workers/core/entry.worker.ts @@ -12,6 +12,7 @@ type Env = { [CoreBindings.JSON_ROUTES]: WorkerRoute[]; [CoreBindings.JSON_LOG_LEVEL]: LogLevel; [CoreBindings.DATA_LIVE_RELOAD_SCRIPT]: ArrayBuffer; + [CoreBindings.DURABLE_OBJECT_NAMESPACE_PROXY]: DurableObjectNamespace; } & { [K in `${typeof CoreBindings.SERVICE_USER_ROUTE_PREFIX}${string}`]: | Fetcher @@ -149,6 +150,15 @@ function maybeLogRequest( ); } +function handleProxy(request: Request, env: Env) { + const ns = env[CoreBindings.DURABLE_OBJECT_NAMESPACE_PROXY]; + // Always use the same singleton Durable Object instance, so we always have + // access to the same "heap" + const id = ns.idFromName(""); + const stub = ns.get(id); + return stub.fetch(request); +} + async function handleQueue( request: Request, url: URL, @@ -186,6 +196,10 @@ export default >{ async fetch(request, env, ctx) { const startTime = Date.now(); + // The proxy client will always specify an operation + const isProxy = request.headers.get(CoreHeaders.OP) !== null; + if (isProxy) return handleProxy(request, env); + // `dispatchFetch()` will always inject the passed URL as a header. When // calling this function, we never want to display the pretty-error page. // Instead, we propagate the error and reject the returned `Promise`. @@ -221,3 +235,5 @@ export default >{ } }, }; + +export { ProxyServer } from "./proxy.worker"; diff --git a/packages/miniflare/src/workers/core/proxy.worker.ts b/packages/miniflare/src/workers/core/proxy.worker.ts new file mode 100644 index 000000000..3889b6aa7 --- /dev/null +++ b/packages/miniflare/src/workers/core/proxy.worker.ts @@ -0,0 +1,321 @@ +import assert from "node:assert"; +import { parse } from "devalue"; +import { + CoreHeaders, + ProxyAddresses, + ProxyOps, + isFetcherFetch, + isR2ObjectWriteHttpMetadata, +} from "./constants"; +import { + PlatformImpl, + ReducersRevivers, + createHTTPReducers, + createHTTPRevivers, + parseWithReadableStreams, + stringifyWithStreams, + structuredSerializableReducers, + structuredSerializableRevivers, +} from "./devalue"; + +const ENCODER = new TextEncoder(); +const DECODER = new TextDecoder(); + +const WORKERS_PLATFORM_IMPL: PlatformImpl = { + Blob, + File, + Headers, + Request, + Response, + + isReadableStream(value): value is ReadableStream { + return value instanceof ReadableStream; + }, + bufferReadableStream(stream) { + return new Response(stream).arrayBuffer(); + }, + unbufferReadableStream(buffer) { + const body = new Response(buffer).body; + assert(body !== null); + return body; + }, +}; + +interface JsonError { + message?: string; + name?: string; + stack?: string; + cause?: JsonError; +} + +function reduceError(e: any): JsonError { + return { + name: e?.name, + message: e?.message ?? String(e), + stack: e?.stack, + cause: e?.cause === undefined ? undefined : reduceError(e.cause), + }; +} + +async function readPrefix( + stream: ReadableStream, + prefixLength: number +): Promise<[prefix: Uint8Array, rest: ReadableStream]> { + const reader = await stream.getReader({ mode: "byob" }); + const result = await reader.readAtLeast( + prefixLength, + new Uint8Array(prefixLength) + ); + assert(result.value !== undefined); + reader.releaseLock(); + // TODO(cleanup): once https://github.com/cloudflare/workerd/issues/892 fixed, + // should just be able to use `stream` here + const rest = stream.pipeThrough(new IdentityTransformStream()); + return [result.value, rest]; +} + +// Helpers taken from `devalue` (unfortunately not exported): +// https://github.com/Rich-Harris/devalue/blob/50af63e2b2c648f6e6ea29904a14faac25a581fc/src/utils.js#L31-L51 +const objectProtoNames = Object.getOwnPropertyNames(Object.prototype) + .sort() + .join("\0"); +function isPlainObject(value: unknown) { + const proto = Object.getPrototypeOf(value); + return ( + proto === Object.prototype || + proto === null || + Object.getOwnPropertyNames(proto).sort().join("\0") === objectProtoNames + ); +} +function getType(value: unknown) { + return Object.prototype.toString.call(value).slice(8, -1); // `[object ]` +} + +// TODO(someday): extract `ProxyServer` into component that could be used by +// other (user) Durable Objects +export class ProxyServer implements DurableObject { + // On the first `fetch()`, start a `setInterval()` to keep this Durable Object + // and its heap alive. This is required to ensure heap references stay valid + // for the lifetime of this `workerd` process (except it isn't since `workerd` + // doesn't evict Durable Objects yet :P, but it probably will soon). + anchorInterval?: number; + nextHeapAddress = ProxyAddresses.USER_START; + readonly heap = new Map(); + + reducers: ReducersRevivers = { + ...structuredSerializableReducers, + ...createHTTPReducers(WORKERS_PLATFORM_IMPL), + // Corresponding revivers in `ProxyClient` + // `Native` reducer *MUST* be applied last + Native: (value) => { + // For instances of runtime API classes implemented in C++, `getType()` + // should only ever return `Object`, as none override `Symbol.toStringTag` + // https://tc39.es/ecma262/multipage/fundamental-objects.html#sec-object.prototype.tostring + const type = getType(value); + if ((type === "Object" && !isPlainObject(value)) || type === "Promise") { + const address = this.nextHeapAddress++; + this.heap.set(address, value); + assert(typeof value === "object" && value !== null); + return [address, value.constructor.name]; + } + }, + }; + revivers: ReducersRevivers = { + ...structuredSerializableRevivers, + ...createHTTPRevivers(WORKERS_PLATFORM_IMPL), + // Corresponding reducers in `ProxyClient` + Native: (value) => { + assert(Array.isArray(value)); + const [address] = value as unknown[]; + assert(typeof address === "number"); + const heapValue = this.heap.get(address); + assert(heapValue !== undefined); + // We should only store `Promise`s on the heap if we attempted to make a + // synchronous GET/CALL that then returned a `Promise`. In that case, + // we'll immediately make an asynchronous GET to resolve the `Promise`. + // Rather than worrying about cleaning up `Promise`s some other way, we + // just remove them from the heap immediately, since we should never make + // a request to resolve them again. + if (heapValue instanceof Promise) this.heap.delete(address); + return heapValue; + }, + }; + nativeReviver: ReducersRevivers = { Native: this.revivers.Native }; + + constructor(_state: DurableObjectState, env: Record) { + this.heap.set(ProxyAddresses.GLOBAL, globalThis); + this.heap.set(ProxyAddresses.ENV, env); + } + + async fetch(request: Request) { + // Make sure this instance is kept alive + this.anchorInterval ??= setInterval(() => {}, 10_000); + try { + return await this.#fetch(request); + } catch (e) { + const error = reduceError(e); + return Response.json(error, { + status: 500, + headers: { [CoreHeaders.ERROR_STACK]: "true" }, + }); + } + } + + async #fetch(request: Request) { + const opHeader = request.headers.get(CoreHeaders.OP); + const targetHeader = request.headers.get(CoreHeaders.OP_TARGET); + const keyHeader = request.headers.get(CoreHeaders.OP_KEY); + const allowAsync = request.headers.get(CoreHeaders.OP_SYNC) === null; + const argsSizeHeader = request.headers.get(CoreHeaders.OP_STRINGIFIED_SIZE); + const contentLengthHeader = request.headers.get("Content-Length"); + + // Get target to perform operations on + if (targetHeader === null) return new Response(null, { status: 400 }); + + // If this is a FREE operation, remove the target from the heap + if (opHeader === ProxyOps.FREE) { + const targetAddress = parseInt(targetHeader); + assert(!Number.isNaN(targetAddress)); + this.heap.delete(targetAddress); + return new Response(null, { status: 204 }); + } + + // Revive the target from the heap + const target: Record = parse( + targetHeader, + this.nativeReviver + ); + const targetName = target.constructor.name; + + let status = 200; + let result; + let unbufferedRest: ReadableStream | undefined; + if (opHeader === ProxyOps.GET) { + // If no key header is specified, just return the target + result = keyHeader === null ? target : target[keyHeader]; + if (typeof result === "function") { + // Calling functions-which-return-functions not yet supported + return new Response(null, { + status: 204, + headers: { [CoreHeaders.OP_RESULT_TYPE]: "Function" }, + }); + } + } else if (opHeader === ProxyOps.CALL) { + // We don't allow callable targets yet (could be useful to implement if + // we ever need to proxy functions that return functions) + if (keyHeader === null) return new Response(null, { status: 400 }); + const func = target[keyHeader]; + assert(typeof func === "function"); + + // See `isFetcherFetch()` comment for why this special + if (isFetcherFetch(targetName, keyHeader)) { + // Create a new request to allow header mutation + request = new Request(request); + request.headers.delete(CoreHeaders.OP); + request.headers.delete(CoreHeaders.OP_TARGET); + request.headers.delete(CoreHeaders.OP_KEY); + return func.call(target, request); + } + + let args: unknown; + if (argsSizeHeader === null || argsSizeHeader === contentLengthHeader) { + // No unbuffered `ReadableStream` + args = parseWithReadableStreams( + WORKERS_PLATFORM_IMPL, + { value: await request.text() }, + this.revivers + ); + } else { + // Unbuffered `ReadableStream` argument + const argsSize = parseInt(argsSizeHeader); + assert(!Number.isNaN(argsSize)); + assert(request.body !== null); + const [encodedArgs, rest] = await readPrefix(request.body, argsSize); + unbufferedRest = rest; + const stringifiedArgs = DECODER.decode(encodedArgs); + args = parseWithReadableStreams( + WORKERS_PLATFORM_IMPL, + { value: stringifiedArgs, unbufferedStream: rest }, + this.revivers + ); + } + assert(Array.isArray(args)); + try { + result = func.apply(target, args); + // See `isR2ObjectWriteHttpMetadata()` comment for why this special + if (isR2ObjectWriteHttpMetadata(targetName, keyHeader)) { + result = args[0]; + } + } catch (e) { + status = 500; + result = e; + } + } else { + return new Response(null, { status: 404 }); + } + + const headers = new Headers(); + if (allowAsync && result instanceof Promise) { + // Note we only resolve `Promise`s if we're allowing async operations. + // Otherwise, we'll treat the `Promise` as a native target. This allows + // us to use regular HTTP status/headers to indicate whether the `Promise` + // resolved/rejected and whether the body should be interpreted as a raw + // `ReadableStream`. Otherwise, we'd need to devise an encoding scheme for + // this in the body. + try { + result = await result; + } catch (e) { + status = 500; + result = e; + } + headers.append(CoreHeaders.OP_RESULT_TYPE, "Promise"); + } + // Make sure we fully-consume the request body if it wasn't used (e.g. key + // validation failed). Without this, we'll get a `TypeError: Can't read from + // request stream after response has been sent.` + // TODO(soon): remove once https://github.com/cloudflare/workerd/issues/918 fixed + if (unbufferedRest !== undefined && !unbufferedRest.locked) { + try { + await unbufferedRest.pipeTo(new WritableStream()); + } catch {} + } + if (result instanceof ReadableStream) { + // If this was also a resolve `Promise`, the result type header will end + // up as "Promise, ReadableStream" + headers.append(CoreHeaders.OP_RESULT_TYPE, "ReadableStream"); + return new Response(result, { status, headers }); + } else { + const stringified = await stringifyWithStreams( + WORKERS_PLATFORM_IMPL, + result, + this.reducers, + /* allowUnbufferedStream */ allowAsync + ); + if (stringified.unbufferedStream === undefined) { + return new Response(stringified.value, { status, headers }); + } else { + const body = new IdentityTransformStream(); + const encodedValue = ENCODER.encode(stringified.value); + const encodedSize = encodedValue.byteLength.toString(); + headers.set(CoreHeaders.OP_STRINGIFIED_SIZE, encodedSize); + void this.#writeWithUnbufferedStream( + body.writable, + encodedValue, + stringified.unbufferedStream + ); + return new Response(body.readable, { status, headers }); + } + } + } + + async #writeWithUnbufferedStream( + writable: WritableStream, + encodedValue: Uint8Array, + unbufferedStream: ReadableStream + ) { + const writer = writable.getWriter(); + await writer.write(encodedValue); + writer.releaseLock(); + await unbufferedStream.pipeTo(writable); + } +} diff --git a/packages/miniflare/test/index.spec.ts b/packages/miniflare/test/index.spec.ts index 9d89fc967..a86288369 100644 --- a/packages/miniflare/test/index.spec.ts +++ b/packages/miniflare/test/index.spec.ts @@ -1,25 +1,43 @@ +// noinspection TypeScriptValidateJSTypes + import assert from "assert"; +import fs from "fs/promises"; import http from "http"; import { AddressInfo } from "net"; +import path from "path"; import { Writable } from "stream"; -import test from "ava"; +import util from "util"; +import { + D1Database, + DurableObjectNamespace, + Fetcher, + KVNamespace, + Queue, + R2Bucket, +} from "@cloudflare/workers-types/experimental"; +import test, { ThrowsExpectation } from "ava"; import { DeferredPromise, MessageEvent, Miniflare, MiniflareCoreError, MiniflareOptions, + ReplaceWorkersTypes, Response, + _QUEUES_COMPATIBLE_V8_VERSION, _transformsForContentEncoding, createFetchMock, fetch, + viewToBuffer, } from "miniflare"; import { CloseEvent as StandardCloseEvent, MessageEvent as StandardMessageEvent, WebSocketServer, } from "ws"; -import { TestLog, useServer } from "./test-shared"; +import { TestLog, useServer, useTmp, utf8Encode } from "./test-shared"; + +const queuesTest = _QUEUES_COMPATIBLE_V8_VERSION ? test : test.skip; test("Miniflare: validates options", async (t) => { // Check empty workers array rejected @@ -67,6 +85,7 @@ test("Miniflare: keeps port between updates", async (t) => { })`, }; const mf = new Miniflare(opts); + t.teardown(() => mf.dispose()); const initialURL = await mf.ready; await mf.setOptions(opts); @@ -96,6 +115,7 @@ test("Miniflare: routes to multiple workers with fallback", async (t) => { ], }; const mf = new Miniflare(opts); + t.teardown(() => mf.dispose()); // Check "a"'s more specific route checked first let res = await mf.dispatchFetch("http://localhost/api"); @@ -363,6 +383,7 @@ test("Miniflare: custom outbound service", async (t) => { }, ], }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("http://localhost"); t.deepEqual(await res.json(), { res1: "one", @@ -385,6 +406,7 @@ test("Miniflare: fetch mocking", async (t) => { }`, fetchMock, }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("http://localhost"); t.is(await res.text(), "Mocked response!"); @@ -417,6 +439,7 @@ test("Miniflare: custom upstream as origin", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); // Check rewrites protocol, hostname, and port, but keeps pathname and query const res = await mf.dispatchFetch("https://random:0/path?a=1"); t.is(await res.text(), "upstream: http://upstream/extra/path?a=1"); @@ -438,6 +461,7 @@ test("Miniflare: `node:` and `cloudflare:` modules", async (t) => { } `, }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("http://localhost"); t.is(await res.text(), "dGVzdA=="); }); @@ -462,6 +486,7 @@ test("Miniflare: modules in sub-directories", async (t) => { }, ], }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("http://localhost"); t.is(await res.text(), "123"); }); @@ -475,11 +500,13 @@ test("Miniflare: HTTPS fetches using browser CA certificates", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("http://localhost"); t.true(res.ok); + await res.arrayBuffer(); // (drain) }); -test("Miniflare: Accepts https requests", async (t) => { +test("Miniflare: accepts https requests", async (t) => { const log = new TestLog(t); const mf = new Miniflare({ @@ -492,14 +519,16 @@ test("Miniflare: Accepts https requests", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); const res = await mf.dispatchFetch("https://localhost"); t.true(res.ok); + await res.arrayBuffer(); // (drain) t.assert(log.logs[0][1].startsWith("Ready on https://")); }); -test("Miniflare: Manually triggered scheduled events", async (t) => { +test("Miniflare: manually triggered scheduled events", async (t) => { const log = new TestLog(t); const mf = new Miniflare({ @@ -516,6 +545,7 @@ test("Miniflare: Manually triggered scheduled events", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); let res = await mf.dispatchFetch("http://localhost"); t.is(await res.text(), "false"); @@ -526,3 +556,169 @@ test("Miniflare: Manually triggered scheduled events", async (t) => { res = await mf.dispatchFetch("http://localhost"); t.is(await res.text(), "true"); }); + +queuesTest("Miniflare: getBindings() returns all bindings", async (t) => { + const tmp = await useTmp(t); + const blobPath = path.join(tmp, "blob.txt"); + await fs.writeFile(blobPath, "blob"); + const mf = new Miniflare({ + modules: true, + script: ` + export class DurableObject {} + export default { fetch() { return new Response(null, { status: 404 }); } } + `, + bindings: { STRING: "hello", OBJECT: { a: 1, b: { c: 2 } } }, + textBlobBindings: { TEXT: blobPath }, + dataBlobBindings: { DATA: blobPath }, + serviceBindings: { SELF: "" }, + d1Databases: ["DB"], + durableObjects: { DO: "DurableObject" }, + kvNamespaces: ["KV"], + queueProducers: ["QUEUE"], + r2Buckets: ["BUCKET"], + }); + let disposed = false; + t.teardown(() => { + if (!disposed) return mf.dispose(); + }); + + interface Env { + STRING: string; + OBJECT: unknown; + TEXT: string; + DATA: ArrayBuffer; + SELF: ReplaceWorkersTypes; + DB: D1Database; + DO: ReplaceWorkersTypes; + KV: ReplaceWorkersTypes; + QUEUE: Queue; + BUCKET: ReplaceWorkersTypes; + } + const bindings = await mf.getBindings(); + + t.like(bindings, { + STRING: "hello", + OBJECT: { a: 1, b: { c: 2 } }, + TEXT: "blob", + }); + t.deepEqual(bindings.DATA, viewToBuffer(utf8Encode("blob"))); + + const opts: util.InspectOptions = { colors: false }; + t.regex(util.inspect(bindings.SELF, opts), /name: 'Fetcher'/); + t.regex(util.inspect(bindings.DB, opts), /name: 'D1Database'/); + t.regex(util.inspect(bindings.DO, opts), /name: 'DurableObjectNamespace'/); + t.regex(util.inspect(bindings.KV, opts), /name: 'KvNamespace'/); + t.regex(util.inspect(bindings.QUEUE, opts), /name: 'WorkerQueue'/); + t.regex(util.inspect(bindings.BUCKET, opts), /name: 'R2Bucket'/); + + // Check with WebAssembly binding (aren't supported by modules workers) + // (base64 encoded module containing a single `add(i32, i32): i32` export) + const addWasmModule = + "AGFzbQEAAAABBwFgAn9/AX8DAgEABwcBA2FkZAAACgkBBwAgACABagsACgRuYW1lAgMBAAA="; + const addWasmPath = path.join(tmp, "add.wasm"); + await fs.writeFile(addWasmPath, Buffer.from(addWasmModule, "base64")); + await mf.setOptions({ + script: + 'addEventListener("fetch", (event) => event.respondWith(new Response(null, { status: 404 })));', + wasmBindings: { ADD: addWasmPath }, + }); + const { ADD } = await mf.getBindings<{ ADD: WebAssembly.Module }>(); + const instance = new WebAssembly.Instance(ADD); + assert(typeof instance.exports.add === "function"); + t.is(instance.exports.add(1, 2), 3); + + // Check bindings poisoned after dispose + await mf.dispose(); + disposed = true; + const expectations: ThrowsExpectation = { + message: + "Attempted to use poisoned stub. Stubs to runtime objects must be re-created after calling `Miniflare#setOptions()` or `Miniflare#dispose()`.", + }; + t.throws(() => bindings.KV.get("key"), expectations); +}); +queuesTest( + "Miniflare: getBindings() and friends return bindings for different workers", + async (t) => { + const mf = new Miniflare({ + workers: [ + { + name: "a", + modules: true, + script: ` + export class DurableObject {} + export default { fetch() { return new Response(null, { status: 404 }); } } + `, + d1Databases: ["DB"], + durableObjects: { DO: "DurableObject" }, + }, + { + // 2nd worker unnamed, to validate that not specifying a name when + // getting bindings gives the entrypoint, not the unnamed worker + script: + 'addEventListener("fetch", (event) => event.respondWith(new Response(null, { status: 404 })));', + kvNamespaces: ["KV"], + queueProducers: ["QUEUE"], + }, + { + name: "b", + script: + 'addEventListener("fetch", (event) => event.respondWith(new Response(null, { status: 404 })));', + r2Buckets: ["BUCKET"], + }, + ], + }); + t.teardown(() => mf.dispose()); + + // Check `getBindings()` + let bindings = await mf.getBindings(); + t.deepEqual(Object.keys(bindings), ["DB", "DO"]); + bindings = await mf.getBindings(""); + t.deepEqual(Object.keys(bindings), ["KV", "QUEUE"]); + bindings = await mf.getBindings("b"); + t.deepEqual(Object.keys(bindings), ["BUCKET"]); + await t.throwsAsync(() => mf.getBindings("c"), { + instanceOf: TypeError, + message: '"c" worker not found', + }); + + const unboundExpectations = ( + name: string + ): ThrowsExpectation => ({ + instanceOf: TypeError, + message: `"${name}" unbound in "c" worker`, + }); + + // Check `getD1Database()` + let binding: unknown = await mf.getD1Database("DB"); + t.not(binding, undefined); + let expectations = unboundExpectations("DB"); + await t.throwsAsync(() => mf.getD1Database("DB", "c"), expectations); + + // Check `getDurableObjectNamespace()` + binding = await mf.getDurableObjectNamespace("DO"); + t.not(binding, undefined); + expectations = unboundExpectations("DO"); + await t.throwsAsync( + () => mf.getDurableObjectNamespace("DO", "c"), + expectations + ); + + // Check `getKVNamespace()` + binding = await mf.getKVNamespace("KV", ""); + t.not(binding, undefined); + expectations = unboundExpectations("KV"); + await t.throwsAsync(() => mf.getKVNamespace("KV", "c"), expectations); + + // Check `getQueueProducer()` + binding = await mf.getQueueProducer("QUEUE", ""); + t.not(binding, undefined); + expectations = unboundExpectations("QUEUE"); + await t.throwsAsync(() => mf.getQueueProducer("QUEUE", "c"), expectations); + + // Check `getR2Bucket()` + binding = await mf.getR2Bucket("BUCKET", "b"); + t.not(binding, undefined); + expectations = unboundExpectations("BUCKET"); + await t.throwsAsync(() => mf.getQueueProducer("BUCKET", "c"), expectations); + } +); diff --git a/packages/miniflare/test/plugins/cache/index.spec.ts b/packages/miniflare/test/plugins/cache/index.spec.ts index 7dffb288e..83fc888d4 100644 --- a/packages/miniflare/test/plugins/cache/index.spec.ts +++ b/packages/miniflare/test/plugins/cache/index.spec.ts @@ -2,74 +2,52 @@ import assert from "assert"; import crypto from "crypto"; import path from "path"; import { text } from "stream/consumers"; +import type { CacheStorage } from "@cloudflare/workers-types/experimental"; import { HeadersInit, KeyValueStorage, LogLevel, + ReplaceWorkersTypes, + Request, + RequestInit, + Response, createFileStorage, } from "miniflare"; -import { miniflareTest, useTmp } from "../../test-shared"; - -const test = miniflareTest({}, async (global, req) => { - // Partition headers - let name: string | undefined; - let cfCacheKey: string | undefined; - let bufferPut = false; - const reqHeaders = new global.Headers(); - const resHeaders = new global.Headers(); - for (const [key, value] of req.headers) { - const lowerKey = key.toLowerCase(); - if (lowerKey === "test-cache-name") { - name = value; - } else if (lowerKey === "test-cf-cache-key") { - cfCacheKey = value; - } else if (lowerKey === "test-buffer") { - bufferPut = true; - } else if (lowerKey.startsWith("test-response-")) { - resHeaders.set(lowerKey.substring("test-response-".length), value); - } else { - reqHeaders.set(lowerKey, value); - } - } - - // Get cache and cache key - const cache = - name === undefined ? global.caches.default : await global.caches.open(name); - const key = new global.Request(req.url, { - headers: reqHeaders, - cf: cfCacheKey === undefined ? undefined : { cacheKey: cfCacheKey }, - }); - - // Perform cache operation - if (req.method === "GET") { - const cachedRes = await cache.match(key); - return cachedRes ?? new global.Response("", { status: 404 }); - } else if (req.method === "PUT") { - const body = bufferPut ? await req.arrayBuffer() : req.body; - const res = new global.Response(body, { headers: resHeaders }); - await cache.put(key, res); +import { MiniflareTestContext, miniflareTest, useTmp } from "../../test-shared"; + +interface Context extends MiniflareTestContext { + caches: ReplaceWorkersTypes; +} + +const test = miniflareTest({}, async (global, req) => { + const { pathname } = new global.URL(req.url); + // The API proxy doesn't support putting buffered bodies, so register a + // special endpoint for testing + if (pathname === "/put-buffered") { + const resToCache = new global.Response("buffered", { + headers: { "Cache-Control": "max-age=3600" }, + }); + await global.caches.default.put("http://localhost/cache-hit", resToCache); return new global.Response(null, { status: 204 }); - } else if (req.method === "DELETE") { - const deleted = await cache.delete(key); - return new global.Response(null, { status: deleted ? 204 : 404 }); - } else { - return new global.Response(null, { status: 405 }); } + return new global.Response(null, { status: 404 }); +}); + +test.beforeEach(async (t) => { + t.context.caches = await t.context.mf.getCaches(); }); test("match returns cached responses", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-hit"; // Check caching stream body - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-Response-Cache-Control": "max-age=3600", - "Test-Response-X-Key": "value", - }, - body: "body", + let resToCache = new Response("body", { + headers: { "Cache-Control": "max-age=3600", "X-Key": "value" }, }); - let res = await t.context.mf.dispatchFetch(key); + await cache.put(key, resToCache); + let res = await cache.match(key); + assert(res !== undefined); t.is(res.status, 200); t.is(res.headers.get("Cache-Control"), "max-age=3600"); t.is(res.headers.get("CF-Cache-Status"), "HIT"); @@ -78,108 +56,101 @@ test("match returns cached responses", async (t) => { // Check caching binary streamed body const array = new Uint8Array([1, 2, 3]); - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: array, + resToCache = new Response(array, { + headers: { "Cache-Control": "max-age=3600" }, }); - res = await t.context.mf.dispatchFetch(key); + await cache.put(key, resToCache); + res = await cache.match(key); + assert(res !== undefined); t.is(res.status, 200); t.deepEqual(new Uint8Array(await res.arrayBuffer()), array); // Check caching buffered body - await t.context.mf.dispatchFetch(key, { + await t.context.mf.dispatchFetch("http://localhost/put-buffered", { method: "PUT", - headers: { - "Test-Buffer": "1", - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body", }); - res = await t.context.mf.dispatchFetch(key); + res = await cache.match(key); + assert(res !== undefined); t.is(res.status, 200); - t.is(await res.text(), "body"); + t.is(await res.text(), "buffered"); }); test("match returns nothing on cache miss", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-miss"; - const res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 404); - t.is(await res.text(), ""); + const res = await cache.match(key); + t.is(res, undefined); }); test("match respects If-None-Match header", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-if-none-match"; - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-Response-ETag": '"thing"', - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body", + const resToCache = new Response("body", { + headers: { ETag: '"thing"', "Cache-Control": "max-age=3600" }, }); + await cache.put(key, resToCache); const ifNoneMatch = (value: string) => - t.context.mf.dispatchFetch(key, { headers: { "If-None-Match": value } }); + cache.match(new Request(key, { headers: { "If-None-Match": value } })); // Check returns 304 only if an ETag in `If-Modified-Since` matches let res = await ifNoneMatch('"thing"'); - t.is(res.status, 304); + t.is(res?.status, 304); res = await ifNoneMatch(' W/"thing" '); - t.is(res.status, 304); + t.is(res?.status, 304); res = await ifNoneMatch('"not the thing"'); - t.is(res.status, 200); + t.is(res?.status, 200); res = await ifNoneMatch( '"not the thing", "thing" , W/"still not the thing"' ); - t.is(res.status, 304); + t.is(res?.status, 304); res = await ifNoneMatch("*"); - t.is(res.status, 304); + t.is(res?.status, 304); res = await ifNoneMatch(" * "); - t.is(res.status, 304); + t.is(res?.status, 304); }); test("match respects If-Modified-Since header", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-if-modified-since"; - await t.context.mf.dispatchFetch(key, { - method: "PUT", + const resToCache = new Response("body", { headers: { - "Test-Response-Last-Modified": "Tue, 13 Sep 2022 12:00:00 GMT", - "Test-Response-Cache-Control": "max-age=3600", + "Last-Modified": "Tue, 13 Sep 2022 12:00:00 GMT", + "Cache-Control": "max-age=3600", }, - body: "body", }); + await cache.put(key, resToCache); const ifModifiedSince = (value: string) => - t.context.mf.dispatchFetch(key, { - headers: { "If-Modified-Since": value }, - }); + cache.match(new Request(key, { headers: { "If-Modified-Since": value } })); // Check returns 200 if modified after `If-Modified-Since` let res = await ifModifiedSince("Tue, 13 Sep 2022 11:00:00 GMT"); - t.is(res.status, 200); + t.is(res?.status, 200); // Check returns 304 if modified on `If-Modified-Since` res = await ifModifiedSince("Tue, 13 Sep 2022 12:00:00 GMT"); - t.is(res.status, 304); + t.is(res?.status, 304); // Check returns 304 if modified before `If-Modified-Since` res = await ifModifiedSince("Tue, 13 Sep 2022 13:00:00 GMT"); - t.is(res.status, 304); + t.is(res?.status, 304); // Check returns 200 if `If-Modified-Since` is not a "valid" UTC date res = await ifModifiedSince("13 Sep 2022 13:00:00 GMT"); - t.is(res.status, 200); + t.is(res?.status, 200); }); test("match respects Range header", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-range"; - await t.context.mf.dispatchFetch(key, { - method: "PUT", + const resToCache = new Response("0123456789", { headers: { - "Test-Response-Content-Length": "10", - "Test-Response-Cache-Control": "max-age=3600", + "Content-Length": "10", + "Content-Type": "text/plain", + "Cache-Control": "max-age=3600", }, - body: "0123456789", }); + await cache.put(key, resToCache); // Check with single range - let res = await t.context.mf.dispatchFetch(key, { - headers: { Range: "bytes=2-4" }, - }); + let res = await cache.match( + new Request(key, { headers: { Range: "bytes=2-4" } }) + ); + assert(res !== undefined); t.is(res.status, 206); t.is(res.headers.get("Content-Length"), "3"); t.is(res.headers.get("Cache-Control"), "max-age=3600"); @@ -187,9 +158,10 @@ test("match respects Range header", async (t) => { t.is(await res.text(), "234"); // Check with multiple ranges - res = await t.context.mf.dispatchFetch(key, { - headers: { Range: "bytes=1-3,5-6" }, - }); + res = await cache.match( + new Request(key, { headers: { Range: "bytes=1-3,5-6" } }) + ); + assert(res !== undefined); t.is(res.status, 206); t.is(res.headers.get("Cache-Control"), "max-age=3600"); t.is(res.headers.get("CF-Cache-Status"), "HIT"); @@ -201,10 +173,12 @@ test("match respects Range header", async (t) => { await res.text(), [ `--${boundary}`, + "Content-Type: text/plain", "Content-Range: bytes 1-3/10", "", "123", `--${boundary}`, + "Content-Type: text/plain", "Content-Range: bytes 5-6/10", "", "56", @@ -213,9 +187,10 @@ test("match respects Range header", async (t) => { ); // Check with unsatisfiable range - res = await t.context.mf.dispatchFetch(key, { - headers: { Range: "bytes=15-" }, - }); + res = await cache.match( + new Request(key, { headers: { Range: "bytes=15-" } }) + ); + assert(res !== undefined); t.is(res.status, 416); }); @@ -224,6 +199,8 @@ const expireMacro = test.macro({ return `expires after ${providedTitle}`; }, async exec(t, opts: { headers: HeadersInit; expectedTtl: number }) { + const cache = t.context.caches.default; + // Reset clock to known time, restoring afterwards. // Note this macro must be used with `test.serial` to avoid races. const originalTimestamp = t.context.timers.timestamp; @@ -231,36 +208,32 @@ const expireMacro = test.macro({ t.context.timers.timestamp = 1_000_000; // 1000s const key = "http://localhost/cache-expire"; - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: opts.headers, - body: "body", - }); + await cache.put(key, new Response("body", { headers: opts.headers })); - let res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 200); + let res = await cache.match(key); + t.is(res?.status, 200); t.context.timers.timestamp += opts.expectedTtl / 2; - res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 200); + res = await cache.match(key); + t.is(res?.status, 200); t.context.timers.timestamp += opts.expectedTtl / 2; - res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 404); + res = await cache.match(key); + t.is(res, undefined); }, }); test.serial("Expires", expireMacro, { headers: { - "Test-Response-Expires": new Date(1000000 + 2000).toUTCString(), + Expires: new Date(1000000 + 2000).toUTCString(), }, expectedTtl: 2000, }); test.serial("Cache-Control's max-age", expireMacro, { - headers: { "Test-Response-Cache-Control": "max-age=1" }, + headers: { "Cache-Control": "max-age=1" }, expectedTtl: 1000, }); test.serial("Cache-Control's s-maxage", expireMacro, { - headers: { "Test-Response-Cache-Control": "s-maxage=1, max-age=10" }, + headers: { "Cache-Control": "s-maxage=1, max-age=10" }, expectedTtl: 1000, }); @@ -269,6 +242,8 @@ const isCachedMacro = test.macro({ return `put ${providedTitle}`; }, async exec(t, opts: { headers: Record; cached: boolean }) { + const cache = t.context.caches.default; + // Use different key for each invocation of this macro const headersHash = crypto .createHash("sha1") @@ -277,32 +252,28 @@ const isCachedMacro = test.macro({ const key = `http://localhost/cache-is-cached-${headersHash}`; const expires = new Date(t.context.timers.timestamp + 2000).toUTCString(); - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - ...opts.headers, - "Test-Response-Expires": expires, - }, - body: "body", + const resToCache = new Response("body", { + headers: { ...opts.headers, Expires: expires }, }); - const res = await t.context.mf.dispatchFetch(key); - t.is(res.status, opts.cached ? 200 : 404); + await cache.put(key, resToCache); + const res = await cache.match(key); + t.is(res?.status, opts.cached ? 200 : undefined); }, }); test("does not cache with private Cache-Control", isCachedMacro, { - headers: { "Test-Response-Cache-Control": "private" }, + headers: { "Cache-Control": "private" }, cached: false, }); test("does not cache with no-store Cache-Control", isCachedMacro, { - headers: { "Test-Response-Cache-Control": "no-store" }, + headers: { "Cache-Control": "no-store" }, cached: false, }); test("does not cache with no-cache Cache-Control", isCachedMacro, { - headers: { "Test-Response-Cache-Control": "no-cache" }, + headers: { "Cache-Control": "no-cache" }, cached: false, }); test("does not cache with Set-Cookie", isCachedMacro, { - headers: { "Test-Response-Set-Cookie": "key=value" }, + headers: { "Set-Cookie": "key=value" }, cached: false, }); test( @@ -310,97 +281,79 @@ test( isCachedMacro, { headers: { - "Test-Response-Cache-Control": "private=set-cookie", - "Test-Response-Set-Cookie": "key=value", + "Cache-Control": "private=set-cookie", + "Set-Cookie": "key=value", }, cached: true, } ); test("delete returns if deleted", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-delete"; - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", + const resToCache = new Response("body", { + headers: { "Cache-Control": "max-age=3600" }, }); + await cache.put(key, resToCache); // Check first delete deletes - let res = await t.context.mf.dispatchFetch(key, { method: "DELETE" }); - t.is(res.status, 204); + let deleted = await cache.delete(key); + t.true(deleted); // Check subsequent deletes don't match - res = await t.context.mf.dispatchFetch(key, { method: "DELETE" }); - t.is(res.status, 404); + deleted = await cache.delete(key); + t.false(deleted); }); test("operations respect cf.cacheKey", async (t) => { + const cache = t.context.caches.default; const key = "http://localhost/cache-cf-key-unused"; // Check put respects `cf.cacheKey` - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-CF-Cache-Key": "1", - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body1", + const key1 = new Request(key, { cf: { cacheKey: "1" } }); + const key2 = new Request(key, { cf: { cacheKey: "2" } }); + const resToCache1 = new Response("body1", { + headers: { "Cache-Control": "max-age=3600" }, }); - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-CF-Cache-Key": "2", - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body2", + const resToCache2 = new Response("body2", { + headers: { "Cache-Control": "max-age=3600" }, }); + await cache.put(key1, resToCache1); + await cache.put(key2, resToCache2); // Check match respects `cf.cacheKey` - let res1 = await t.context.mf.dispatchFetch(key, { - headers: { "Test-CF-Cache-Key": "1" }, - }); - let res2 = await t.context.mf.dispatchFetch(key, { - headers: { "Test-CF-Cache-Key": "2" }, - }); - t.is(await res1.text(), "body1"); - t.is(await res2.text(), "body2"); + const res1 = await cache.match(key1); + t.is(await res1?.text(), "body1"); + const res2 = await cache.match(key2); + t.is(await res2?.text(), "body2"); // Check delete respects `cf.cacheKey` - res1 = await t.context.mf.dispatchFetch(key, { - method: "DELETE", - headers: { "Test-CF-Cache-Key": "1" }, - }); - res2 = await t.context.mf.dispatchFetch(key, { - method: "DELETE", - headers: { "Test-CF-Cache-Key": "2" }, - }); - t.is(res1.status, 204); - t.is(res2.status, 204); + const deleted1 = await cache.delete(key1); + t.true(deleted1); + const deleted2 = await cache.delete(key2); + t.true(deleted2); }); test.serial("operations log warning on workers.dev subdomain", async (t) => { // Set option, then reset after test await t.context.setOptions({ cacheWarnUsage: true }); t.teardown(() => t.context.setOptions({})); + t.context.caches = await t.context.mf.getCaches(); + const cache = t.context.caches.default; const key = "http://localhost/cache-workers-dev-warning"; t.context.log.logs = []; - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", + const resToCache = new Response("body", { + headers: { "Cache-Control": "max-age=3600" }, }); + await cache.put(key, resToCache.clone()); t.deepEqual(t.context.log.logsAtLevel(LogLevel.WARN), [ "Cache operations will have no impact if you deploy to a workers.dev subdomain!", ]); // Check only warns once t.context.log.logs = []; - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", - }); + await cache.put(key, resToCache); t.deepEqual(t.context.log.logsAtLevel(LogLevel.WARN), []); }); test.serial("operations persist cached data", async (t) => { @@ -412,27 +365,28 @@ test.serial("operations persist cached data", async (t) => { // Set option, then reset after test await t.context.setOptions({ cachePersist: tmp }); t.teardown(() => t.context.setOptions({})); + t.context.caches = await t.context.mf.getCaches(); + const cache = t.context.caches.default; const key = "http://localhost/cache-persist"; // Check put respects persist - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", + const resToCache = new Response("body", { + headers: { "Cache-Control": "max-age=3600" }, }); + await cache.put(key, resToCache); let stored = await kvStorage.get(key); assert(stored?.value !== undefined); t.deepEqual(await text(stored.value), "body"); // Check match respects persist - let res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 200); - t.is(await res.text(), "body"); + const res = await cache.match(key); + t.is(res?.status, 200); + t.is(await res?.text(), "body"); // Check delete respects persist - res = await t.context.mf.dispatchFetch(key, { method: "DELETE" }); - t.is(res.status, 204); + const deleted = await cache.delete(key); + t.true(deleted); stored = await kvStorage.get(key); t.is(stored, null); }); @@ -440,77 +394,51 @@ test.serial("operations are no-ops when caching disabled", async (t) => { // Set option, then reset after test await t.context.setOptions({ cache: false }); t.teardown(() => t.context.setOptions({})); + t.context.caches = await t.context.mf.getCaches(); + const cache = t.context.caches.default; const key = "http://localhost/cache-disabled"; // Check match never matches - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", + const resToCache = new Response("body", { + headers: { "Cache-Control": "max-age=3600" }, }); - let res = await t.context.mf.dispatchFetch(key); - t.is(res.status, 404); + await cache.put(key, resToCache.clone()); + const res = await cache.match(key); + t.is(res, undefined); // Check delete never deletes - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "body", - }); - res = await t.context.mf.dispatchFetch(key, { method: "DELETE" }); - t.is(res.status, 404); + await cache.put(key, resToCache); + const deleted = await cache.delete(key); + t.false(deleted); }); test("default and named caches are disjoint", async (t) => { const key = "http://localhost/cache-disjoint"; + const defaultCache = t.context.caches.default; + const namedCache1 = await t.context.caches.open("1"); + const namedCache2 = await t.context.caches.open("2"); // Check put respects cache name - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { "Test-Response-Cache-Control": "max-age=3600" }, - body: "bodyDefault", - }); - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-Cache-Name": "1", - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body1", - }); - await t.context.mf.dispatchFetch(key, { - method: "PUT", - headers: { - "Test-Cache-Name": "2", - "Test-Response-Cache-Control": "max-age=3600", - }, - body: "body2", - }); + const init: RequestInit = { headers: { "Cache-Control": "max-age=3600" } }; + await defaultCache.put(key, new Response("bodyDefault", init)); + await namedCache1.put(key, new Response("body1", init)); + await namedCache2.put(key, new Response("body2", init)); // Check match respects cache name - let resDefault = await t.context.mf.dispatchFetch(key); - let res1 = await t.context.mf.dispatchFetch(key, { - headers: { "Test-Cache-Name": "1" }, - }); - let res2 = await t.context.mf.dispatchFetch(key, { - headers: { "Test-Cache-Name": "2" }, - }); - t.is(await resDefault.text(), "bodyDefault"); - t.is(await res1.text(), "body1"); - t.is(await res2.text(), "body2"); + const resDefault = await defaultCache.match(key); + const res1 = await namedCache1.match(key); + const res2 = await namedCache2.match(key); + + t.is(await resDefault?.text(), "bodyDefault"); + t.is(await res1?.text(), "body1"); + t.is(await res2?.text(), "body2"); // Check delete respects cache name - resDefault = await t.context.mf.dispatchFetch(key, { method: "DELETE" }); - res1 = await t.context.mf.dispatchFetch(key, { - method: "DELETE", - headers: { "Test-Cache-Name": "1" }, - }); - res2 = await t.context.mf.dispatchFetch(key, { - method: "DELETE", - headers: { "Test-Cache-Name": "2" }, - }); - t.is(resDefault.status, 204); - t.is(res1.status, 204); - t.is(res2.status, 204); + const deletedDefault = await defaultCache.delete(key); + const deleted1 = await namedCache1.delete(key); + const deleted2 = await namedCache2.delete(key); + t.true(deletedDefault); + t.true(deleted1); + t.true(deleted2); }); diff --git a/packages/miniflare/test/plugins/core/errors/index.spec.ts b/packages/miniflare/test/plugins/core/errors/index.spec.ts index fccc3ac29..db8271c1d 100644 --- a/packages/miniflare/test/plugins/core/errors/index.spec.ts +++ b/packages/miniflare/test/plugins/core/errors/index.spec.ts @@ -133,6 +133,7 @@ addEventListener("fetch", (event) => { }, ], }); + t.teardown(() => mf.dispose()); // Check service-workers source mapped let error = await t.throwsAsync(mf.dispatchFetch("http://localhost"), { diff --git a/packages/miniflare/test/plugins/core/proxy/client.spec.ts b/packages/miniflare/test/plugins/core/proxy/client.spec.ts new file mode 100644 index 000000000..e610450ae --- /dev/null +++ b/packages/miniflare/test/plugins/core/proxy/client.spec.ts @@ -0,0 +1,217 @@ +import assert from "assert"; +import { Blob } from "buffer"; +import { text } from "stream/consumers"; +import { ReadableStream } from "stream/web"; +import util from "util"; +import type { Fetcher } from "@cloudflare/workers-types/experimental"; +import test, { ThrowsExpectation } from "ava"; +import { + DeferredPromise, + File, + MessageEvent, + Miniflare, + ReplaceWorkersTypes, + Response, + WebSocketPair, +} from "miniflare"; + +// This file tests API proxy edge cases. Cache, D1, Durable Object and R2 tests +// make extensive use of the API proxy, testing their specific special cases. + +const nullScript = + 'addEventListener("fetch", (event) => event.respondWith(new Response(null, { status: 404 })));'; + +test("ProxyClient: supports service bindings with WebSockets", async (t) => { + const mf = new Miniflare({ + script: nullScript, + serviceBindings: { + CUSTOM() { + const { 0: webSocket1, 1: webSocket2 } = new WebSocketPair(); + webSocket1.accept(); + webSocket1.addEventListener("message", (event) => { + webSocket1.send(`echo:${event.data}`); + }); + return new Response(null, { status: 101, webSocket: webSocket2 }); + }, + }, + }); + t.teardown(() => mf.dispose()); + + const { CUSTOM } = await mf.getBindings<{ + CUSTOM: ReplaceWorkersTypes; + }>(); + + const res = await CUSTOM.fetch("http://placeholder/", { + headers: { Upgrade: "websocket" }, + }); + assert(res.webSocket !== null); + const eventPromise = new DeferredPromise(); + res.webSocket.addEventListener("message", eventPromise.resolve); + res.webSocket.accept(); + res.webSocket.send("hello"); + const event = await eventPromise; + t.is(event.data, "echo:hello"); +}); + +test("ProxyClient: supports serialising multiple ReadableStreams, Blobs and Files", async (t) => { + const mf = new Miniflare({ script: nullScript }); + t.teardown(() => mf.dispose()); + + const client = await mf._getProxyClient(); + const IDENTITY = client.env.IDENTITY as { + asyncIdentity(...args: Args): Promise; + }; + + // Test serialising multiple ReadableStreams + const streamResult = await IDENTITY.asyncIdentity( + new Blob(["hello"]).stream(), + new Blob(["abc"]).stream(), + new Blob(["123"]).stream() + ); + const streamTexts = await Promise.all(streamResult.map(text)); + t.deepEqual(streamTexts, ["hello", "abc", "123"]); + + // Test serialising single Blob + const [blobResult] = await IDENTITY.asyncIdentity( + new Blob(["xyz"], { type: "text/plain" }) + ); + t.is(blobResult.type, "text/plain"); + t.is(await blobResult.text(), "xyz"); + + // Test serialising ReadableStream, Blob and File + const allResult = await IDENTITY.asyncIdentity( + new Blob(["no type"]), + new Blob(["stream"]).stream(), + new File(["text file"], "text.txt", { + type: "text/plain", + lastModified: 1000, + }) + ); + t.false(allResult[0] instanceof File); + t.true(allResult[0] instanceof Blob); + t.is(await allResult[0].text(), "no type"); + t.true(allResult[1] instanceof ReadableStream); + t.is(await text(allResult[1]), "stream"); + t.true(allResult[2] instanceof File); + t.is(allResult[2].type, "text/plain"); + t.is(allResult[2].lastModified, 1000); + t.is(await allResult[2].text(), "text file"); +}); +test("ProxyClient: poisons dependent proxies after setOptions()/dispose()", async (t) => { + const mf = new Miniflare({ script: nullScript }); + let disposed = false; + t.teardown(() => { + if (!disposed) return mf.dispose(); + }); + let caches = await mf.getCaches(); + let defaultCache = caches.default; + let namedCache = await caches.open("name"); + + const key = "http://localhost"; + await defaultCache.match(key); + + await mf.setOptions({ script: nullScript }); + + const expectations: ThrowsExpectation = { + message: + "Attempted to use poisoned stub. Stubs to runtime objects must be re-created after calling `Miniflare#setOptions()` or `Miniflare#dispose()`.", + }; + t.throws(() => caches.default, expectations); + t.throws(() => defaultCache.match(key), expectations); + t.throws(() => namedCache.match(key), expectations); + + caches = await mf.getCaches(); + defaultCache = caches.default; + namedCache = await caches.open("name"); + + await defaultCache.match(key); + + await mf.dispose(); + disposed = true; + t.throws(() => caches.default, expectations); + t.throws(() => defaultCache.match(key), expectations); + t.throws(() => namedCache.match(key), expectations); +}); +test("ProxyClient: logging proxies provides useful information", async (t) => { + const mf = new Miniflare({ script: nullScript }); + t.teardown(() => mf.dispose()); + + const caches = await mf.getCaches(); + const inspectOpts: util.InspectOptions = { colors: false }; + t.is( + util.inspect(caches, inspectOpts), + "ProxyStub { name: 'CacheStorage', poisoned: false }" + ); + t.is(util.inspect(caches.open, inspectOpts), "[Function: open]"); +}); + +test("ProxyClient: stack traces don't include internal implementation", async (t) => { + function hasStack(value: unknown): value is { stack: string } { + return ( + typeof value === "object" && + value !== null && + "stack" in value && + typeof value.stack === "string" + ); + } + + const mf = new Miniflare({ + modules: true, + script: `export class DurableObject {} + export default { + fetch() { return new Response(null, { status: 404 }); } + }`, + durableObjects: { OBJECT: "DurableObject" }, + // Make sure asynchronous functions are rejecting, not throwing: + // https://developers.cloudflare.com/workers/configuration/compatibility-dates/#do-not-throw-from-async-functions + compatibilityFlags: ["capture_async_api_throws"], + }); + t.teardown(() => mf.dispose()); + + const ns = await mf.getDurableObjectNamespace("OBJECT"); + const caches = await mf.getCaches(); + + function syncUserFunction() { + try { + ns.idFromString("bad id"); + } catch (e) { + assert(hasStack(e)); + t.regex(e.stack, /syncUserFunction/); + t.notRegex(e.stack, /ProxyStubHandler/); + } + } + syncUserFunction(); + + async function asyncUserFunction() { + try { + await caches.default.match("bad url"); + t.fail(); + } catch (e) { + assert(hasStack(e)); + t.regex(e.stack, /asyncUserFunction/); + t.notRegex(e.stack, /ProxyStubHandler/); + } + } + await asyncUserFunction(); +}); +test("ProxyClient: can access ReadableStream property multiple times", async (t) => { + const mf = new Miniflare({ script: nullScript, r2Buckets: ["BUCKET"] }); + t.teardown(() => mf.dispose()); + + const bucket = await mf.getR2Bucket("BUCKET"); + await bucket.put("key", "value"); + const objectBody = await bucket.get("key"); + assert(objectBody != null); + t.not(objectBody.body, null); // 1st access + t.is(await text(objectBody.body), "value"); // 2nd access +}); +test("ProxyClient: returns empty ReadableStream synchronously", async (t) => { + const mf = new Miniflare({ script: nullScript, r2Buckets: ["BUCKET"] }); + t.teardown(() => mf.dispose()); + + const bucket = await mf.getR2Bucket("BUCKET"); + await bucket.put("key", ""); + const objectBody = await bucket.get("key"); + assert(objectBody != null); + t.is(await text(objectBody.body), ""); // Synchronous empty stream access +}); diff --git a/packages/miniflare/test/plugins/d1/index.spec.ts b/packages/miniflare/test/plugins/d1/index.spec.ts index 39a5973f3..f0d8da2ef 100644 --- a/packages/miniflare/test/plugins/d1/index.spec.ts +++ b/packages/miniflare/test/plugins/d1/index.spec.ts @@ -1,7 +1,6 @@ -import path from "path"; -import { FIXTURES_PATH } from "./shared"; -import suite from "./suite"; +import { setupTest } from "./test"; // Post-wrangler 3.3, D1 bindings work directly, so use the input file -// from the fixture, and no prefixc on the binding name -suite("DB", path.join(FIXTURES_PATH, "d1", "worker.mjs")); +// from the fixture, and no prefix on the binding name +setupTest("DB", "worker.mjs", (mf) => mf.getD1Database("DB")); +require("./suite"); diff --git a/packages/miniflare/test/plugins/d1/shared.ts b/packages/miniflare/test/plugins/d1/index.with-wrangler-shim.spec.ts similarity index 54% rename from packages/miniflare/test/plugins/d1/shared.ts rename to packages/miniflare/test/plugins/d1/index.with-wrangler-shim.spec.ts index 873643f57..1fa9f760d 100644 --- a/packages/miniflare/test/plugins/d1/shared.ts +++ b/packages/miniflare/test/plugins/d1/index.with-wrangler-shim.spec.ts @@ -1,31 +1,16 @@ -import path from "path"; -import { +import type { D1Database, + D1ExecResult, D1PreparedStatement, D1Result, -} from "@cloudflare/workers-types/experimental/index"; -import { Miniflare } from "miniflare"; -import { MiniflareTestContext } from "../../test-shared"; - -export const FIXTURES_PATH = path.resolve( - __dirname, - "..", - "..", - "..", - "..", - "test", - "fixtures" -); - -interface D1ExecResult { - count: number; - duration: number; -} +} from "@cloudflare/workers-types/experimental"; +import type { Miniflare } from "miniflare"; +import { setupTest } from "./test"; const kSend = Symbol("kSend"); -// D1-like API for sending requests to the fixture worker. These tests were -// ported from Miniflare 2, which provided this API natively. +// D1-like API for sending requests to the fixture worker. Note we can't use the +// API proxy here, as without the wrapped binding we only get a `Fetcher`. export class TestD1Database implements D1Database { constructor(private readonly mf: Miniflare) {} @@ -74,7 +59,6 @@ class TestD1PreparedStatement implements D1PreparedStatement { return new TestD1PreparedStatement(this.db, this.sql, params); } - // TODO(now): fix, this may also return null first(colName?: string): Promise { return this.db[kSend](`/prepare/first/${colName ?? ""}`, this); } @@ -92,30 +76,11 @@ class TestD1PreparedStatement implements D1PreparedStatement { } } -export const SCHEMA = (tableColours: string, tableKitchenSink: string) => ` -CREATE TABLE ${tableColours} (id INTEGER PRIMARY KEY, name TEXT NOT NULL, rgb INTEGER NOT NULL); -CREATE TABLE ${tableKitchenSink} (id INTEGER PRIMARY KEY, int INTEGER, real REAL, text TEXT, blob BLOB); -INSERT INTO ${tableColours} (id, name, rgb) VALUES (1, 'red', 0xff0000); -INSERT INTO ${tableColours} (id, name, rgb) VALUES (2, 'green', 0x00ff00); -INSERT INTO ${tableColours} (id, name, rgb) VALUES (3, 'blue', 0x0000ff); -`; - -export interface ColourRow { - id: number; - name: string; - rgb: number; -} - -export interface KitchenSinkRow { - id: number; - int: number | null; - real: number | null; - text: string | null; - blob: number[] | null; -} - -export interface Context extends MiniflareTestContext { - db: TestD1Database; // TODO(now): swap this back to `D1Database` once types fixed - tableColours: string; - tableKitchenSink: string; -} +// Pre-wrangler 3.3, D1 bindings needed a local compilation step, so use +// the output version of the fixture, and the appropriately prefixed binding name +setupTest( + "__D1_BETA__DB", + "worker.dist.mjs", + async (mf) => new TestD1Database(mf) +); +require("./suite"); diff --git a/packages/miniflare/test/plugins/d1/suite.ts b/packages/miniflare/test/plugins/d1/suite.ts index 950c98351..0bedb02cf 100644 --- a/packages/miniflare/test/plugins/d1/suite.ts +++ b/packages/miniflare/test/plugins/d1/suite.ts @@ -2,444 +2,445 @@ import assert from "assert"; import fs from "fs/promises"; import path from "path"; import Database from "better-sqlite3"; -import { MiniflareOptions, createFileStorage } from "miniflare"; -import { miniflareTest, useTmp, utf8Encode } from "../../test-shared"; -import { - ColourRow, - Context, - KitchenSinkRow, - SCHEMA, - TestD1Database, -} from "./shared"; - -export default (binding: string, WORKER_PATH: string) => { - const opts: MiniflareOptions = { - modules: true, - scriptPath: WORKER_PATH, - d1Databases: { [binding]: "db" }, - }; - const test = miniflareTest(opts); - test.beforeEach(async (t) => { - const ns = `${Date.now()}_${Math.floor( - Math.random() * Number.MAX_SAFE_INTEGER - )}`; - const tableColours = `colours_${ns}`; - const tableKitchenSink = `kitchen_sink_${ns}`; - - const db = new TestD1Database(t.context.mf); - await db.exec(SCHEMA(tableColours, tableKitchenSink)); - - t.context.db = db; - t.context.tableColours = tableColours; - t.context.tableKitchenSink = tableKitchenSink; +import { createFileStorage } from "miniflare"; +import { useTmp, utf8Encode } from "../../test-shared"; +import { binding, getDatabase, opts, test } from "./test"; + +export const SCHEMA = (tableColours: string, tableKitchenSink: string) => ` +CREATE TABLE ${tableColours} (id INTEGER PRIMARY KEY, name TEXT NOT NULL, rgb INTEGER NOT NULL); +CREATE TABLE ${tableKitchenSink} (id INTEGER PRIMARY KEY, int INTEGER, real REAL, text TEXT, blob BLOB); +INSERT INTO ${tableColours} (id, name, rgb) VALUES (1, 'red', 0xff0000); +INSERT INTO ${tableColours} (id, name, rgb) VALUES (2, 'green', 0x00ff00); +INSERT INTO ${tableColours} (id, name, rgb) VALUES (3, 'blue', 0x0000ff); +`; + +export interface ColourRow { + id: number; + name: string; + rgb: number; +} + +export interface KitchenSinkRow { + id: number; + int: number | null; + real: number | null; + text: string | null; + blob: number[] | null; +} + +test.beforeEach(async (t) => { + const ns = `${Date.now()}_${Math.floor( + Math.random() * Number.MAX_SAFE_INTEGER + )}`; + const tableColours = `colours_${ns}`; + const tableKitchenSink = `kitchen_sink_${ns}`; + + const db = await getDatabase(t.context.mf); + await db.exec(SCHEMA(tableColours, tableKitchenSink)); + + t.context.db = db; + t.context.tableColours = tableColours; + t.context.tableKitchenSink = tableKitchenSink; +}); + +function throwCause(promise: Promise): Promise { + return promise.catch((error) => { + assert.strictEqual(error.message, "D1_ERROR"); + assert.notStrictEqual(error.cause, undefined); + throw error.cause; }); - - function throwCause(promise: Promise): Promise { - return promise.catch((error) => { - assert.strictEqual(error.message, "D1_ERROR"); - assert.notStrictEqual(error.cause, undefined); - throw error.cause; - }); - } - - test("D1Database: dump", async (t) => { - const { db, tableColours } = t.context; - const tmp = await useTmp(t); - const buffer = await db.dump(); - - // Load the dumped data as an SQLite database and try query it - const tmpPath = path.join(tmp, "db.sqlite3"); - await fs.writeFile(tmpPath, new Uint8Array(buffer)); - const sqliteDb = new Database(tmpPath); - const results = sqliteDb.prepare(`SELECT name FROM ${tableColours}`).all(); - t.deepEqual(results, [ - { name: "red" }, - { name: "green" }, - { name: "blue" }, - ]); - }); - test("D1Database: batch", async (t) => { - const { db, tableColours } = t.context; - - const insert = db.prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)` - ); - const batchResults = await db.batch>([ - insert.bind(4, "yellow", 0xffff00), - db.prepare(`SELECT name FROM ${tableColours}`), - ]); - t.is(batchResults.length, 2); - t.true(batchResults[0].success); - t.deepEqual(batchResults[0].results, []); - t.true(batchResults[1].success); - const expectedResults = [ - { name: "red" }, - { name: "green" }, - { name: "blue" }, - { name: "yellow" }, - ]; - t.deepEqual(batchResults[1].results, expectedResults); - - // Check error mid-batch rolls-back entire batch - const badInsert = db.prepare( - `PUT IN ${tableColours} (id, name, rgb) VALUES (?, ?, ?)` - ); - await t.throwsAsync( - throwCause( - db.batch([ - insert.bind(5, "purple", 0xff00ff), - badInsert.bind(6, "blurple", 0x5865f2), - insert.bind(7, "cyan", 0x00ffff), - ]) - ), - { message: /syntax error/ } - ); - const result = await db - .prepare(`SELECT name FROM ${tableColours}`) - .all>(); - t.deepEqual(result.results, expectedResults); - }); - test("D1Database: exec", async (t) => { - const { db, tableColours } = t.context; - - // Check with single statement - let execResult = await db.exec( - `UPDATE ${tableColours} SET name = 'Red' WHERE name = 'red'` - ); - t.is(execResult.count, 1); - t.true(execResult.duration > 0); - let result = await db - .prepare(`SELECT name FROM ${tableColours} WHERE name = 'Red'`) - .all>(); - t.deepEqual(result.results, [{ name: "Red" }]); - - // Check with multiple statements - const statements = [ - `UPDATE ${tableColours} SET name = 'Green' WHERE name = 'green'`, - `UPDATE ${tableColours} SET name = 'Blue' WHERE name = 'blue'`, - ].join("\n"); - execResult = await db.exec(statements); - t.is(execResult.count, 2); - t.true(execResult.duration > 0); - result = await db.prepare(`SELECT name FROM ${tableColours}`).all(); - t.deepEqual(result.results, [ - { name: "Red" }, - { name: "Green" }, - { name: "Blue" }, - ]); - }); - - test("D1PreparedStatement: bind", async (t) => { - const { db, tableColours, tableKitchenSink } = t.context; - - // Check with all parameter types - const blob = utf8Encode("Walshy"); - const blobArray = Array.from(blob); - await db - .prepare( - `INSERT INTO ${tableKitchenSink} (id, int, real, text, blob) VALUES (?, ?, ?, ?, ?)` - ) - // Preserve `Uint8Array` type through JSON serialisation - .bind(1, 42, 3.141, "🙈", { $type: "Uint8Array", contents: blobArray }) - .run(); - let result = await db - .prepare(`SELECT * FROM ${tableKitchenSink}`) - .all(); - t.deepEqual(result.results, [ - { id: 1, int: 42, real: 3.141, text: "🙈", blob: blobArray }, - ]); - - // Check with null values - await db - .prepare(`UPDATE ${tableKitchenSink} SET blob = ?`) - .bind(null) - .run(); - result = await db.prepare(`SELECT * FROM ${tableKitchenSink}`).all(); - t.deepEqual(result.results, [ - { id: 1, int: 42, real: 3.141, text: "🙈", blob: null }, - ]); - - // Check with multiple statements (should only bind first) - const colourResults = await db - .prepare( - `SELECT * FROM ${tableColours} WHERE name = ?; SELECT * FROM ${tableColours} WHERE id = ?;` - ) - .bind("green") - .all(); - t.is(colourResults.results?.length, 1); - - // Check with numbered parameters (execute and query) - // https://github.com/cloudflare/miniflare/issues/504 - await db - .prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?3, ?1, ?2)` - ) - .bind("yellow", 0xffff00, 4) - .run(); - const colourResult = await db - .prepare(`SELECT * FROM ${tableColours} WHERE id = ?1`) - .bind(4) - .first(); - t.deepEqual(colourResult, { id: 4, name: "yellow", rgb: 0xffff00 }); - }); - - // Lots of strange edge cases here... - - test("D1PreparedStatement: first", async (t) => { - const { db, tableColours } = t.context; - - // Check with read statement - const select = await db.prepare(`SELECT * FROM ${tableColours}`); - let result: ColourRow | null = await select.first(); - t.deepEqual(result, { id: 1, name: "red", rgb: 0xff0000 }); - let id: number | null = await select.first("id"); - t.is(id, 1); - - // Check with multiple statements (should only match on first statement) - result = await db - .prepare( - `SELECT * FROM ${tableColours} WHERE name = 'none'; SELECT * FROM ${tableColours} WHERE id = 1;` - ) - .first(); - t.is(result, null); - - // Check with write statement (should actually execute statement) - result = await db - .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) - .bind(4, "yellow", 0xffff00) - .first(); - t.is(result, null); - id = await db - .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) - .bind("yellow") - .first("id"); - t.is(id, 4); - }); - test("D1PreparedStatement: run", async (t) => { - const { db, tableColours, tableKitchenSink } = t.context; - - // Check with read statement - await t.throwsAsync( - throwCause(db.prepare(`SELECT * FROM ${tableColours}`).run()), - { message: /Execute returned results - did you mean to call query\?/ } - ); - // Check with read/write statement - await t.throwsAsync( - throwCause( - db - .prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?) RETURNING *` - ) - .bind(4, "yellow", 0xffff00) - .run() - ), - { message: /Execute returned results - did you mean to call query\?/ } - ); - - // Check with multiple statements (should only execute first statement) - let result = await db - .prepare( - `INSERT INTO ${tableKitchenSink} (id) VALUES (1); INSERT INTO ${tableKitchenSink} (id) VALUES (2);` - ) - .run(); - t.true(result.success); - const results = await db - .prepare(`SELECT id FROM ${tableKitchenSink}`) - .all>(); - t.deepEqual(results.results, [{ id: 1 }]); - - // Check with write statement - result = await db - .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) - .bind(4, "yellow", 0xffff00) - .run(); - t.true(result.meta.duration > 0); - t.deepEqual(result, { - results: [], - success: true, - meta: { - // Don't know duration, so just match on returned value asserted > 0 - duration: result.meta.duration, - last_row_id: 4, - changes: 1, - served_by: "miniflare.db", - internal_stats: null, - }, - }); - }); - test("D1PreparedStatement: all", async (t) => { - const { db, tableColours } = t.context; - - // Check with read statement - let result = await db - .prepare(`SELECT * FROM ${tableColours}`) - .all(); - t.true(result.meta.duration > 0); - t.deepEqual(result, { - results: [ - { id: 1, name: "red", rgb: 0xff0000 }, - { id: 2, name: "green", rgb: 0x00ff00 }, - { id: 3, name: "blue", rgb: 0x0000ff }, - ], - success: true, - meta: { - // Don't know duration, so just match on returned value asserted > 0 - duration: result.meta.duration, - last_row_id: 0, - changes: 0, - served_by: "miniflare.db", - internal_stats: null, - }, - }); - - // Check with multiple statements (should only return first statement results) - result = await db - .prepare( - `SELECT * FROM ${tableColours} WHERE id = 1; SELECT * FROM ${tableColours} WHERE id = 3;` - ) - .all(); - t.deepEqual(result.results, [{ id: 1, name: "red", rgb: 0xff0000 }]); - - // Check with write statement (should actually execute, but return nothing) - result = await db - .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) - .bind(4, "yellow", 0xffff00) - .all(); - t.deepEqual(result.results, []); - t.is(result.meta.last_row_id, 4); - t.is(result.meta.changes, 1); - const id = await db - .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) - .bind("yellow") - .first("id"); - t.is(id, 4); - - // Check with write statement that returns data - result = await db - .prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?) RETURNING id` - ) - .bind(5, "orange", 0xff8000) - .all(); - t.deepEqual(result.results, [{ id: 5 }]); - t.is(result.meta.last_row_id, 5); - t.is(result.meta.changes, 1); +} + +test("D1Database: dump", async (t) => { + const { db, tableColours } = t.context; + const tmp = await useTmp(t); + const buffer = await db.dump(); + + // Load the dumped data as an SQLite database and try query it + const tmpPath = path.join(tmp, "db.sqlite3"); + await fs.writeFile(tmpPath, new Uint8Array(buffer)); + const sqliteDb = new Database(tmpPath); + const results = sqliteDb.prepare(`SELECT name FROM ${tableColours}`).all(); + t.deepEqual(results, [{ name: "red" }, { name: "green" }, { name: "blue" }]); +}); +test("D1Database: batch", async (t) => { + const { db, tableColours } = t.context; + + const insert = db.prepare( + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)` + ); + const batchResults = await db.batch>([ + insert.bind(4, "yellow", 0xffff00), + db.prepare(`SELECT name FROM ${tableColours}`), + ]); + t.is(batchResults.length, 2); + t.true(batchResults[0].success); + t.deepEqual(batchResults[0].results, []); + t.true(batchResults[1].success); + const expectedResults = [ + { name: "red" }, + { name: "green" }, + { name: "blue" }, + { name: "yellow" }, + ]; + t.deepEqual(batchResults[1].results, expectedResults); + + // Check error mid-batch rolls-back entire batch + const badInsert = db.prepare( + `PUT IN ${tableColours} (id, name, rgb) VALUES (?, ?, ?)` + ); + await t.throwsAsync( + throwCause( + db.batch([ + insert.bind(5, "purple", 0xff00ff), + badInsert.bind(6, "blurple", 0x5865f2), + insert.bind(7, "cyan", 0x00ffff), + ]) + ), + { message: /syntax error/ } + ); + const result = await db + .prepare(`SELECT name FROM ${tableColours}`) + .all>(); + t.deepEqual(result.results, expectedResults); +}); +test("D1Database: exec", async (t) => { + const { db, tableColours } = t.context; + + // Check with single statement + let execResult = await db.exec( + `UPDATE ${tableColours} SET name = 'Red' WHERE name = 'red'` + ); + t.is(execResult.count, 1); + t.true(execResult.duration > 0); + let result = await db + .prepare(`SELECT name FROM ${tableColours} WHERE name = 'Red'`) + .all>(); + t.deepEqual(result.results, [{ name: "Red" }]); + + // Check with multiple statements + const statements = [ + `UPDATE ${tableColours} SET name = 'Green' WHERE name = 'green'`, + `UPDATE ${tableColours} SET name = 'Blue' WHERE name = 'blue'`, + ].join("\n"); + execResult = await db.exec(statements); + t.is(execResult.count, 2); + t.true(execResult.duration > 0); + result = await db.prepare(`SELECT name FROM ${tableColours}`).all(); + t.deepEqual(result.results, [ + { name: "Red" }, + { name: "Green" }, + { name: "Blue" }, + ]); +}); + +test("D1PreparedStatement: bind", async (t) => { + const { db, tableColours, tableKitchenSink } = t.context; + + // Check with all parameter types + const blob = utf8Encode("Walshy"); + const blobArray = Array.from(blob); + await db + .prepare( + `INSERT INTO ${tableKitchenSink} (id, int, real, text, blob) VALUES (?, ?, ?, ?, ?)` + ) + // Preserve `Uint8Array` type through JSON serialisation + .bind(1, 42, 3.141, "🙈", blobArray) + .run(); + let result = await db + .prepare(`SELECT * FROM ${tableKitchenSink}`) + .all(); + t.deepEqual(result.results, [ + { id: 1, int: 42, real: 3.141, text: "🙈", blob: blobArray }, + ]); + + // Check with null values + await db.prepare(`UPDATE ${tableKitchenSink} SET blob = ?`).bind(null).run(); + result = await db.prepare(`SELECT * FROM ${tableKitchenSink}`).all(); + t.deepEqual(result.results, [ + { id: 1, int: 42, real: 3.141, text: "🙈", blob: null }, + ]); + + // Check with multiple statements (should only bind first) + const colourResults = await db + .prepare( + `SELECT * FROM ${tableColours} WHERE name = ?; SELECT * FROM ${tableColours} WHERE id = ?;` + ) + .bind("green") + .all(); + t.is(colourResults.results?.length, 1); + + // Check with numbered parameters (execute and query) + // https://github.com/cloudflare/miniflare/issues/504 + await db + .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?3, ?1, ?2)`) + .bind("yellow", 0xffff00, 4) + .run(); + const colourResult = await db + .prepare(`SELECT * FROM ${tableColours} WHERE id = ?1`) + .bind(4) + .first(); + t.deepEqual(colourResult, { id: 4, name: "yellow", rgb: 0xffff00 }); +}); + +// Lots of strange edge cases here... + +test("D1PreparedStatement: first", async (t) => { + const { db, tableColours } = t.context; + + // Check with read statement + const select = await db.prepare(`SELECT * FROM ${tableColours}`); + let result: ColourRow | null = await select.first(); + t.deepEqual(result, { id: 1, name: "red", rgb: 0xff0000 }); + let id: number | null = await select.first("id"); + t.is(id, 1); + + // Check with multiple statements (should only match on first statement) + result = await db + .prepare( + `SELECT * FROM ${tableColours} WHERE name = 'none'; SELECT * FROM ${tableColours} WHERE id = 1;` + ) + .first(); + t.is(result, null); + + // Check with write statement (should actually execute statement) + result = await db + .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) + .bind(4, "yellow", 0xffff00) + .first(); + t.is(result, null); + id = await db + .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) + .bind("yellow") + .first("id"); + t.is(id, 4); +}); +test("D1PreparedStatement: run", async (t) => { + const { db, tableColours, tableKitchenSink } = t.context; + + // Check with read statement + await t.throwsAsync( + throwCause(db.prepare(`SELECT * FROM ${tableColours}`).run()), + { message: /Execute returned results - did you mean to call query\?/ } + ); + // Check with read/write statement + await t.throwsAsync( + throwCause( + db + .prepare( + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?) RETURNING *` + ) + .bind(4, "yellow", 0xffff00) + .run() + ), + { message: /Execute returned results - did you mean to call query\?/ } + ); + + // Check with multiple statements (should only execute first statement) + let result = await db + .prepare( + `INSERT INTO ${tableKitchenSink} (id) VALUES (1); INSERT INTO ${tableKitchenSink} (id) VALUES (2);` + ) + .run(); + t.true(result.success); + const results = await db + .prepare(`SELECT id FROM ${tableKitchenSink}`) + .all>(); + t.deepEqual(results.results, [{ id: 1 }]); + + // Check with write statement + result = await db + .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) + .bind(4, "yellow", 0xffff00) + .run(); + t.true(result.meta.duration > 0); + t.deepEqual(result, { + results: [], + success: true, + meta: { + // Don't know duration, so just match on returned value asserted > 0 + duration: result.meta.duration, + last_row_id: 4, + changes: 1, + served_by: "miniflare.db", + internal_stats: null, + }, }); - test("D1PreparedStatement: raw", async (t) => { - const { db, tableColours } = t.context; - - // Check with read statement - type RawColourRow = [/* id */ number, /* name */ string, /* rgb*/ number]; - let results = await db - .prepare(`SELECT * FROM ${tableColours}`) - .raw(); - t.deepEqual(results, [ - [1, "red", 0xff0000], - [2, "green", 0x00ff00], - [3, "blue", 0x0000ff], - ]); - - // Check with multiple statements (should only return first statement results) - results = await db - .prepare( - `SELECT * FROM ${tableColours} WHERE id = 1; SELECT * FROM ${tableColours} WHERE id = 3;` - ) - .raw(); - t.deepEqual(results, [[1, "red", 0xff0000]]); - - // Check with write statement (should actually execute, but return nothing) - results = await db - .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) - .bind(4, "yellow", 0xffff00) - .raw(); - t.deepEqual(results, []); - const id = await db - .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) - .bind("yellow") - .first("id"); - t.is(id, 4); +}); +test("D1PreparedStatement: all", async (t) => { + const { db, tableColours } = t.context; + + // Check with read statement + let result = await db + .prepare(`SELECT * FROM ${tableColours}`) + .all(); + t.true(result.meta.duration > 0); + t.deepEqual(result, { + results: [ + { id: 1, name: "red", rgb: 0xff0000 }, + { id: 2, name: "green", rgb: 0x00ff00 }, + { id: 3, name: "blue", rgb: 0x0000ff }, + ], + success: true, + meta: { + // Don't know duration, so just match on returned value asserted > 0 + duration: result.meta.duration, + last_row_id: 0, + changes: 0, + served_by: "miniflare.db", + internal_stats: null, + }, }); - test.serial("operations persist D1 data", async (t) => { - const { db, tableColours, tableKitchenSink } = t.context; - - // Create new temporary file-system persistence directory - const tmp = await useTmp(t); - const storage = createFileStorage(path.join(tmp, "db")); - const sqliteDb = storage.db; - - // Set option, then reset after test - await t.context.setOptions({ ...opts, d1Persist: tmp }); - t.teardown(() => t.context.setOptions(opts)); - - // Check execute respects persist - await db.exec(SCHEMA(tableColours, tableKitchenSink)); - await db - .prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (4, 'purple', 0xff00ff);` - ) - .run(); - const result = sqliteDb - .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) - .get(); - t.deepEqual(result, { name: "purple" }); - - // Check query respects persist - await sqliteDb - .prepare( - // Is white a colour? ¯\_(ツ)_/¯ - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (5, 'white', 0xffffff);` - ) - .run(); - const name = await db - .prepare(`SELECT name FROM ${tableColours} WHERE id = 5`) - .first("name"); - t.is(name, "white"); - - // Check dump respects persist - const buffer = await db.dump(); - const tmpPath = path.join(tmp, "db-dump.sqlite3"); - await fs.writeFile(tmpPath, new Uint8Array(buffer)); - const sqliteDbDump = new Database(tmpPath); - const results = sqliteDbDump - .prepare(`SELECT name FROM ${tableColours} WHERE id >= 4`) - .all(); - t.deepEqual(results, [{ name: "purple" }, { name: "white" }]); - }); - - test.serial("operations permit strange database names", async (t) => { - const { db, tableColours, tableKitchenSink } = t.context; - const tmp = await useTmp(t); - - // Set option, then reset after test - const id = "my/ Database"; - await t.context.setOptions({ ...opts, d1Databases: { [binding]: id } }); - t.teardown(() => t.context.setOptions(opts)); - - // Check basic operations work - // a) execute - await db.exec(SCHEMA(tableColours, tableKitchenSink)); - // b) query - await db - .prepare( - `INSERT INTO ${tableColours} (id, name, rgb) VALUES (4, 'pink', 0xff00ff);` - ) - .run(); - // c) dump - const buffer = await db.dump(); - const tmpPath = path.join(tmp, "db-dump.sqlite3"); - await fs.writeFile(tmpPath, new Uint8Array(buffer)); - const sqliteDbDump = new Database(tmpPath); - let result = sqliteDbDump - .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) - .get(); - t.deepEqual(result, { name: "pink" }); - - // Check stored with correct ID - const storage = t.context.mf._getPluginStorage("d1", id); - result = storage.db - .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) - .get(); - t.deepEqual(result, { name: "pink" }); - }); -}; + // Check with multiple statements (should only return first statement results) + result = await db + .prepare( + `SELECT * FROM ${tableColours} WHERE id = 1; SELECT * FROM ${tableColours} WHERE id = 3;` + ) + .all(); + t.deepEqual(result.results, [{ id: 1, name: "red", rgb: 0xff0000 }]); + + // Check with write statement (should actually execute, but return nothing) + result = await db + .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) + .bind(4, "yellow", 0xffff00) + .all(); + t.deepEqual(result.results, []); + t.is(result.meta.last_row_id, 4); + t.is(result.meta.changes, 1); + const id = await db + .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) + .bind("yellow") + .first("id"); + t.is(id, 4); + + // Check with write statement that returns data + result = await db + .prepare( + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?) RETURNING id` + ) + .bind(5, "orange", 0xff8000) + .all(); + t.deepEqual(result.results, [{ id: 5 }]); + t.is(result.meta.last_row_id, 5); + t.is(result.meta.changes, 1); +}); +test("D1PreparedStatement: raw", async (t) => { + const { db, tableColours } = t.context; + + // Check with read statement + type RawColourRow = [/* id */ number, /* name */ string, /* rgb*/ number]; + let results = await db + .prepare(`SELECT * FROM ${tableColours}`) + .raw(); + t.deepEqual(results, [ + [1, "red", 0xff0000], + [2, "green", 0x00ff00], + [3, "blue", 0x0000ff], + ]); + + // Check with multiple statements (should only return first statement results) + results = await db + .prepare( + `SELECT * FROM ${tableColours} WHERE id = 1; SELECT * FROM ${tableColours} WHERE id = 3;` + ) + .raw(); + t.deepEqual(results, [[1, "red", 0xff0000]]); + + // Check with write statement (should actually execute, but return nothing) + results = await db + .prepare(`INSERT INTO ${tableColours} (id, name, rgb) VALUES (?, ?, ?)`) + .bind(4, "yellow", 0xffff00) + .raw(); + t.deepEqual(results, []); + const id = await db + .prepare(`SELECT id FROM ${tableColours} WHERE name = ?`) + .bind("yellow") + .first("id"); + t.is(id, 4); +}); + +test.serial("operations persist D1 data", async (t) => { + const { tableColours, tableKitchenSink } = t.context; + + // Create new temporary file-system persistence directory + const tmp = await useTmp(t); + const storage = createFileStorage(path.join(tmp, "db")); + const sqliteDb = storage.db; + + // Set option, then reset after test + await t.context.setOptions({ ...opts, d1Persist: tmp }); + t.teardown(() => t.context.setOptions(opts)); + const db = await getDatabase(t.context.mf); + + // Check execute respects persist + await db.exec(SCHEMA(tableColours, tableKitchenSink)); + await db + .prepare( + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (4, 'purple', 0xff00ff);` + ) + .run(); + const result = sqliteDb + .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) + .get(); + t.deepEqual(result, { name: "purple" }); + + // Check query respects persist + await sqliteDb + .prepare( + // Is white a colour? ¯\_(ツ)_/¯ + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (5, 'white', 0xffffff);` + ) + .run(); + const name = await db + .prepare(`SELECT name FROM ${tableColours} WHERE id = 5`) + .first("name"); + t.is(name, "white"); + + // Check dump respects persist + const buffer = await db.dump(); + const tmpPath = path.join(tmp, "db-dump.sqlite3"); + await fs.writeFile(tmpPath, new Uint8Array(buffer)); + const sqliteDbDump = new Database(tmpPath); + const results = sqliteDbDump + .prepare(`SELECT name FROM ${tableColours} WHERE id >= 4`) + .all(); + t.deepEqual(results, [{ name: "purple" }, { name: "white" }]); +}); + +test.serial("operations permit strange database names", async (t) => { + const { tableColours, tableKitchenSink } = t.context; + const tmp = await useTmp(t); + + // Set option, then reset after test + const id = "my/ Database"; + await t.context.setOptions({ ...opts, d1Databases: { [binding]: id } }); + t.teardown(() => t.context.setOptions(opts)); + const db = await getDatabase(t.context.mf); + + // Check basic operations work + // a) execute + await db.exec(SCHEMA(tableColours, tableKitchenSink)); + // b) query + await db + .prepare( + `INSERT INTO ${tableColours} (id, name, rgb) VALUES (4, 'pink', 0xff00ff);` + ) + .run(); + // c) dump + const buffer = await db.dump(); + const tmpPath = path.join(tmp, "db-dump.sqlite3"); + await fs.writeFile(tmpPath, new Uint8Array(buffer)); + const sqliteDbDump = new Database(tmpPath); + let result = sqliteDbDump + .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) + .get(); + t.deepEqual(result, { name: "pink" }); + + // Check stored with correct ID + const storage = t.context.mf._getPluginStorage("d1", id); + result = storage.db + .prepare(`SELECT name FROM ${tableColours} WHERE id = 4`) + .get(); + t.deepEqual(result, { name: "pink" }); +}); diff --git a/packages/miniflare/test/plugins/d1/test.ts b/packages/miniflare/test/plugins/d1/test.ts new file mode 100644 index 000000000..bc429a87e --- /dev/null +++ b/packages/miniflare/test/plugins/d1/test.ts @@ -0,0 +1,41 @@ +import path from "path"; +import type { D1Database } from "@cloudflare/workers-types/experimental"; +import { TestFn } from "ava"; +import type { Miniflare, MiniflareOptions } from "miniflare"; +import { MiniflareTestContext, miniflareTest } from "../../test-shared"; + +const FIXTURES_PATH = path.resolve( + __dirname, + "..", + "..", + "..", + "..", + "test", + "fixtures" +); + +export interface Context extends MiniflareTestContext { + db: D1Database; + tableColours: string; + tableKitchenSink: string; +} + +export let binding: string; +export let opts: MiniflareOptions; +export let test: TestFn; +export let getDatabase: (mf: Miniflare) => Promise; + +export function setupTest( + newBinding: string, + newScriptName: string, + newGetDatabase: (mf: Miniflare) => Promise +) { + binding = newBinding; + opts = { + modules: true, + scriptPath: path.join(FIXTURES_PATH, "d1", newScriptName), + d1Databases: { [newBinding]: "db" }, + }; + test = miniflareTest(opts); + getDatabase = newGetDatabase; +} diff --git a/packages/miniflare/test/plugins/d1/with-wrangler-shim.spec.ts b/packages/miniflare/test/plugins/d1/with-wrangler-shim.spec.ts deleted file mode 100644 index 322a18006..000000000 --- a/packages/miniflare/test/plugins/d1/with-wrangler-shim.spec.ts +++ /dev/null @@ -1,7 +0,0 @@ -import path from "path"; -import { FIXTURES_PATH } from "./shared"; -import suite from "./suite"; - -// Pre-wrangler 3.3, D1 bindings needed a local compilation step, so use -// the output version of the fixture, and the appropriately prefixed binding name -suite("__D1_BETA__DB", path.join(FIXTURES_PATH, "d1", "worker.dist.mjs")); diff --git a/packages/miniflare/test/plugins/do/index.spec.ts b/packages/miniflare/test/plugins/do/index.spec.ts index 04555c413..fb3b86e45 100644 --- a/packages/miniflare/test/plugins/do/index.spec.ts +++ b/packages/miniflare/test/plugins/do/index.spec.ts @@ -1,14 +1,23 @@ +import assert from "assert"; import fs from "fs/promises"; import path from "path"; import test from "ava"; -import { Miniflare, MiniflareOptions } from "miniflare"; +import { + DeferredPromise, + MessageEvent, + Miniflare, + MiniflareOptions, + RequestInit, +} from "miniflare"; import { useTmp } from "../../test-shared"; const COUNTER_SCRIPT = (responsePrefix = "") => `export class Counter { + instanceId = crypto.randomUUID(); constructor(state) { this.storage = state.storage; } async fetch(request) { + if (request.cf?.instanceId) return new Response(this.instanceId); const count = ((await this.storage.get("count")) ?? 0) + 1; void this.storage.put("count", count); return new Response(${JSON.stringify(responsePrefix)} + count); @@ -235,3 +244,68 @@ test("can use Durable Object ID from one object in another", async (t) => { const res = await mf2.dispatchFetch(`http://localhost/${id}`); t.is(await res.text(), `id:${id}`); }); + +test("proxies Durable Object methods", async (t) => { + const mf = new Miniflare({ + verbose: true, + modules: true, + script: COUNTER_SCRIPT(""), + durableObjects: { COUNTER: "Counter" }, + }); + t.teardown(() => mf.dispose()); + + // Check can call synchronous ID creation methods + let ns = await mf.getDurableObjectNamespace("COUNTER"); + let id = ns.idFromName("/a"); + t.regex(String(id), /[0-9a-f]{64}/i); + + // Check using result of proxied method in another + let stub = ns.get(id); + let res = await stub.fetch("http://placeholder/"); + t.is(await res.text(), "1"); + + // Check reuses exact same instance with un-proxied access + res = await mf.dispatchFetch("http://localhost/a"); + t.is(await res.text(), "2"); + const requestId: RequestInit = { cf: { instanceId: true } }; + const proxyIdRes = await stub.fetch("http://placeholder/", requestId); + const proxyId = await proxyIdRes.text(); + const regularIdRes = await mf.dispatchFetch("http://localhost/a", requestId); + const regularId = await regularIdRes.text(); + t.is(proxyId, regularId); + + // Check with WebSocket + await mf.setOptions({ + verbose: true, + modules: true, + script: ` + export class WebSocketObject { + fetch() { + const [webSocket1, webSocket2] = Object.values(new WebSocketPair()); + webSocket1.accept(); + webSocket1.addEventListener("message", (event) => { + webSocket1.send("echo:" + event.data); + }); + return new Response(null, { status: 101, webSocket: webSocket2 }); + } + } + export default { + fetch(request, env) { return new Response(null, { status: 404 }); } + } + `, + durableObjects: { WEBSOCKET: "WebSocketObject" }, + }); + ns = await mf.getDurableObjectNamespace("WEBSOCKET"); + id = ns.newUniqueId(); + stub = ns.get(id); + res = await stub.fetch("http://placeholder/", { + headers: { Upgrade: "websocket" }, + }); + assert(res.webSocket !== null); + const eventPromise = new DeferredPromise(); + res.webSocket.addEventListener("message", eventPromise.resolve); + res.webSocket.accept(); + res.webSocket.send("hello"); + const event = await eventPromise; + t.is(event.data, "echo:hello"); +}); diff --git a/packages/miniflare/test/plugins/queues/index.spec.ts b/packages/miniflare/test/plugins/queues/index.spec.ts index 62181479d..970cdfe5b 100644 --- a/packages/miniflare/test/plugins/queues/index.spec.ts +++ b/packages/miniflare/test/plugins/queues/index.spec.ts @@ -41,7 +41,7 @@ test("flushes partial and full batches", async (t) => { } else if (url.pathname === "/batch") { await env.QUEUE.sendBatch(body); } - return new Response(); + return new Response(null, { status: 204 }); } }`, }, @@ -66,6 +66,8 @@ test("flushes partial and full batches", async (t) => { }, ], }); + t.teardown(() => mf.dispose()); + async function send(message: unknown) { await mf.dispatchFetch("http://localhost/send", { method: "POST", @@ -232,7 +234,7 @@ test("sends all structured cloneable types", async (t) => { await env.QUEUE.sendBatch(Object.entries(VALUES).map( ([key, value]) => ({ body: { name: key, value } }) )); - return new Response(); + return new Response(null, { status: 204 }); }, async queue(batch, env, ctx) { let error; @@ -255,6 +257,7 @@ test("sends all structured cloneable types", async (t) => { }, ], }); + t.teardown(() => mf.dispose()); await mf.dispatchFetch("http://localhost"); timers.timestamp += 1000; @@ -305,7 +308,7 @@ test("retries messages", async (t) => { const url = new URL(request.url); const body = await request.json(); await env.QUEUE.sendBatch(body); - return new Response(); + return new Response(null, { status: 204 }); }, async queue(batch, env, ctx) { const res = await env.RETRY_FILTER.fetch("http://localhost", { @@ -326,6 +329,8 @@ test("retries messages", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); + async function sendBatch(...messages: string[]) { await mf.dispatchFetch("http://localhost", { method: "POST", @@ -532,7 +537,7 @@ test("moves to dead letter queue", async (t) => { const url = new URL(request.url); const body = await request.json(); await env.BAD_QUEUE.sendBatch(body); - return new Response(); + return new Response(null, { status: 204 }); }, async queue(batch, env, ctx) { const res = await env.RETRY_FILTER.fetch("http://localhost", { @@ -546,6 +551,8 @@ test("moves to dead letter queue", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); + async function sendBatch(...messages: string[]) { await mf.dispatchFetch("http://localhost", { method: "POST", @@ -638,7 +645,7 @@ test("operations permit strange queue names", async (t) => { async fetch(request, env, ctx) { await env.QUEUE.send("msg1"); await env.QUEUE.sendBatch([{ body: "msg2" }]); - return new Response(); + return new Response(null, { status: 204 }); }, async queue(batch, env, ctx) { await env.REPORTER.fetch("http://localhost", { @@ -648,6 +655,8 @@ test("operations permit strange queue names", async (t) => { } }`, }); + t.teardown(() => mf.dispose()); + await mf.dispatchFetch("http://localhost"); timers.timestamp += 1000; await timers.waitForTasks(); @@ -718,7 +727,10 @@ test("supports message contentTypes", async (t) => { }, };`, }); - await mf.dispatchFetch("http://localhost"); + t.teardown(() => mf.dispose()); + + const res = await mf.dispatchFetch("http://localhost"); + await res.arrayBuffer(); // (drain) timers.timestamp += 1000; await timers.waitForTasks(); const batch = await promise; diff --git a/packages/miniflare/test/plugins/r2/index.spec.ts b/packages/miniflare/test/plugins/r2/index.spec.ts index 5c29b6292..812aef9e4 100644 --- a/packages/miniflare/test/plugins/r2/index.spec.ts +++ b/packages/miniflare/test/plugins/r2/index.spec.ts @@ -1,49 +1,28 @@ // noinspection TypeScriptValidateJSTypes import assert from "assert"; -import { Blob } from "buffer"; import crypto from "crypto"; import path from "path"; -import { blob, text } from "stream/consumers"; -import { ReadableStream } from "stream/web"; +import { text } from "stream/consumers"; import type { R2Bucket, - R2Checksums, R2Conditional, - R2GetOptions, - R2HTTPMetadata, R2ListOptions, - R2MultipartOptions, - R2MultipartUpload, R2Object, - R2ObjectBody, R2Objects, - R2PutOptions, - R2Range, - R2StringChecksums, - R2UploadedPart, - Blob as WorkerBlob, - Headers as WorkerHeaders, - Response as WorkerResponse, } from "@cloudflare/workers-types/experimental"; import { Macro, ThrowsExpectation } from "ava"; import { - File, - FormData, Headers, - Miniflare, MiniflareOptions, MultipartPartRow, ObjectRow, R2Gateway, - Response, + ReplaceWorkersTypes, Storage, TypedDatabase, createFileStorage, - viewToArray, - viewToBuffer, } from "miniflare"; -import { z } from "zod"; import { MiniflareTestContext, isWithin, @@ -68,329 +47,40 @@ function hash(value: string, algorithm = "md5") { return crypto.createHash(algorithm).update(value).digest("hex"); } -// R2-like API for sending requests to the test worker. These tests were -// ported from Miniflare 2, which provided this API natively. - -type ReducedR2Object = Omit< - R2Object, - "checksums" | "uploaded" | "writeHttpMetadata" -> & { checksums: R2StringChecksums; uploaded: string }; -type ReducedR2ObjectBody = ReducedR2Object & { body: number }; - -async function deconstructResponse(res: Response): Promise { - const formData = await res.formData(); - const payload = formData.get("payload"); - assert(typeof payload === "string"); - return JSON.parse(payload, (key, value) => { - if (typeof value === "object" && value !== null && "$type" in value) { - if (value.$type === "R2Object") { - const object = value as ReducedR2Object; - return new TestR2Object(object); - } else if (value.$type === "R2ObjectBody") { - const objectBody = value as ReducedR2ObjectBody; - const body = formData.get(objectBody.body.toString()); - // noinspection SuspiciousTypeOfGuard - assert(body instanceof File); - return new TestR2ObjectBody(objectBody, body); - } else if (value.$type === "Date") { - return new Date(value.value); +type NamespacedR2Bucket = ReplaceWorkersTypes & { ns: string }; + +// Automatically prefix all keys with the specified namespace +function nsBucket( + ns: string, + bucket: ReplaceWorkersTypes +): NamespacedR2Bucket { + return new Proxy(bucket as NamespacedR2Bucket, { + get(target, key, receiver) { + if (key === "ns") return ns; + const value = Reflect.get(target, key, receiver); + if (typeof value === "function" && key !== "list") { + return (keys: string | string[], ...args: unknown[]) => { + if (typeof keys === "string") keys = ns + keys; + if (Array.isArray(keys)) keys = keys.map((key) => ns + key); + return value(keys, ...args); + }; } - } - return value; - }); -} - -function maybeJsonStringify(value: unknown): string { - if (value == null) return ""; - return JSON.stringify(value, (key, value) => { - const dateResult = z.string().datetime().safeParse(value); - if (dateResult.success) { - return { $type: "Date", value: new Date(dateResult.data).getTime() }; - } - if (value instanceof Headers) { - return { $type: "Headers", entries: [...value] }; - } - return value; + return value; + }, + set(target, key, newValue, receiver) { + if (key === "ns") { + ns = newValue; + return true; + } else { + return Reflect.set(target, key, newValue, receiver); + } + }, }); } -async function blobify( - value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob -) { - if (value === null) { - return new Blob([]); - } else if (value instanceof ArrayBuffer) { - return new Blob([new Uint8Array(value)]); - } else if (ArrayBuffer.isView(value)) { - return new Blob([viewToArray(value)]); - } else if (value instanceof ReadableStream) { - return await blob(value); - } else { - return new Blob([value]); - } -} - -class TestR2Bucket implements R2Bucket { - constructor(private readonly mf: Miniflare, public ns = "") {} - - async head(key: string): Promise { - const url = new URL(this.ns + key, "http://localhost"); - const res = await this.mf.dispatchFetch(url, { - method: "GET", - headers: { - Accept: "multipart/form-data", - "Test-Method": "HEAD", - }, - }); - return deconstructResponse(res); - } - - get( - key: string, - options: R2GetOptions & { - onlyIf: R2Conditional | Headers; - } - ): Promise; - get(key: string, options?: R2GetOptions): Promise; - async get( - key: string, - options?: R2GetOptions - ): Promise { - const url = `http://localhost/${encodeURIComponent(this.ns + key)}`; - const res = await this.mf.dispatchFetch(url, { - method: "GET", - headers: { - Accept: "multipart/form-data", - "Test-Options": maybeJsonStringify(options), - }, - }); - return deconstructResponse(res); - } - - // @ts-expect-error `@cloudflare/workers-type`'s `ReadableStream` type is - // incompatible with Node's - async put( - key: string, - value: - | ReadableStream - | ArrayBuffer - | ArrayBufferView - | string - | null - | Blob, - options?: R2PutOptions - ): Promise { - const url = `http://localhost/${encodeURIComponent(this.ns + key)}`; - - // We can't store options in headers as some put() tests include extended - // characters in them, and `undici` validates all headers are byte strings, - // so use a form data body instead - const formData = new FormData(); - formData.set("options", maybeJsonStringify(options)); - formData.set("value", await blobify(value)); - const res = await this.mf.dispatchFetch(url, { - method: "PUT", - headers: { Accept: "multipart/form-data" }, - body: formData, - }); - return deconstructResponse(res); - } - - async delete(keys: string | string[]): Promise { - if (Array.isArray(keys)) keys = keys.map((key) => this.ns + key); - else keys = this.ns + keys; - await this.mf.dispatchFetch("http://localhost", { - method: "DELETE", - body: JSON.stringify(keys), - headers: { Accept: "multipart/form-data" }, - }); - } - - async list(options?: R2ListOptions): Promise { - const res = await this.mf.dispatchFetch("http://localhost", { - method: "GET", - headers: { - Accept: "multipart/form-data", - "Test-Method": "LIST", - "Test-Options": maybeJsonStringify(options), - }, - }); - return deconstructResponse(res); - } - - async createMultipartUpload( - key: string, - options?: R2MultipartOptions - ): Promise { - const nsKey = this.ns + key; - const url = `http://localhost/${encodeURIComponent(nsKey)}`; - const res = await this.mf.dispatchFetch(url, { - method: "POST", - headers: { - Accept: "text/plain", - "Test-Method": "MULTIPART-CREATE", - "Test-Options": maybeJsonStringify(options), - }, - }); - const uploadId = await res.text(); - // @ts-expect-error `@cloudflare/workers-type`'s `ReadableStream` type is - // incompatible with Node's - return new TestR2MultipartUpload(this.mf, nsKey, uploadId); - } - - resumeMultipartUpload(key: string, uploadId: string): R2MultipartUpload { - // @ts-expect-error `@cloudflare/workers-type`'s `ReadableStream` type is - // incompatible with Node's - return new TestR2MultipartUpload(this.mf, this.ns + key, uploadId); - } -} - -class TestR2Checksums implements R2Checksums { - readonly md5?: ArrayBuffer; - readonly sha1?: ArrayBuffer; - readonly sha256?: ArrayBuffer; - readonly sha384?: ArrayBuffer; - readonly sha512?: ArrayBuffer; - - constructor(private readonly checksums: R2StringChecksums) { - this.md5 = this.#decode(checksums.md5); - this.sha1 = this.#decode(checksums.sha1); - this.sha256 = this.#decode(checksums.sha256); - this.sha384 = this.#decode(checksums.sha384); - this.sha512 = this.#decode(checksums.sha512); - } - - #decode(checksum?: string) { - return checksum === undefined - ? undefined - : viewToBuffer(Buffer.from(checksum, "hex")); - } - - toJSON(): R2StringChecksums { - return this.checksums; - } -} - -class TestR2Object implements R2Object { - readonly key: string; - readonly version: string; - readonly size: number; - readonly etag: string; - readonly httpEtag: string; - readonly checksums: R2Checksums; - readonly uploaded: Date; - readonly httpMetadata?: R2HTTPMetadata; - readonly customMetadata?: Record; - readonly range?: R2Range; - - constructor(object: ReducedR2Object) { - this.key = object.key; - this.version = object.version; - this.size = object.size; - this.etag = object.etag; - this.httpEtag = object.httpEtag; - this.checksums = new TestR2Checksums(object.checksums); - this.uploaded = new Date(object.uploaded); - this.httpMetadata = object.httpMetadata; - this.customMetadata = object.customMetadata; - this.range = object.range; - } - - writeHttpMetadata(_headers: Headers): void { - // Fully-implemented by `workerd` - assert.fail("TestR2Object#writeHttpMetadata() not implemented"); - } -} - -class TestR2ObjectBody extends TestR2Object implements R2ObjectBody { - constructor(object: ReducedR2Object, readonly body: Blob) { - super(object); - } - - get bodyUsed(): boolean { - // Fully-implemented by `workerd` - assert.fail("TestR2ObjectBody#bodyUsed() not implemented"); - return false; // TypeScript requires `get` accessors return - } - - arrayBuffer(): Promise { - return this.body.arrayBuffer(); - } - text(): Promise { - return this.body.text(); - } - async json(): Promise { - return JSON.parse(await this.body.text()); - } - // @ts-expect-error `@cloudflare/workers-type`'s `Blob` type is incompatible - // with Node's - blob(): Promise { - return Promise.resolve(this.body); - } -} - -class TestR2MultipartUpload implements R2MultipartUpload { - constructor( - private readonly mf: Miniflare, - readonly key: string, - readonly uploadId: string - ) {} - - // @ts-expect-error `@cloudflare/workers-type`'s `ReadableStream` type is - // incompatible with Node's - async uploadPart( - partNumber: number, - value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob - ): Promise { - const url = `http://localhost/${encodeURIComponent(this.key)}`; - const body = await blobify(value); - const res = await this.mf.dispatchFetch(url, { - method: "PUT", - headers: { - Accept: "application/json", - "Content-Length": String(body.size), - "Test-Method": "MULTIPART-UPLOAD", - "Test-Options": JSON.stringify({ uploadId: this.uploadId, partNumber }), - }, - // Could stream this, but R2 requires known-length streams, so have to - // buffer anyway - body, - }); - return (await res.json()) as R2UploadedPart; - } - - async abort(): Promise { - const url = `http://localhost/${encodeURIComponent(this.key)}`; - await this.mf.dispatchFetch(url, { - method: "DELETE", - headers: { - Accept: "text/plain", - "Test-Method": "MULTIPART-ABORT", - "Test-Options": JSON.stringify({ uploadId: this.uploadId }), - }, - }); - } - - async complete(uploadedParts: R2UploadedPart[]): Promise { - const url = `http://localhost/${encodeURIComponent(this.key)}`; - const res = await this.mf.dispatchFetch(url, { - method: "PUT", - headers: { - Accept: "multipart/form-data", - "Test-Method": "MULTIPART-COMPLETE", - "Test-Options": JSON.stringify({ - uploadId: this.uploadId, - uploadedParts, - }), - }, - }); - return deconstructResponse(res); - } -} - interface Context extends MiniflareTestContext { ns: string; - r2: TestR2Bucket; + r2: NamespacedR2Bucket; storage: Storage; } @@ -400,145 +90,19 @@ const opts: Partial = { }; const test = miniflareTest<{ BUCKET: R2Bucket }, Context>( opts, - async (global, request, env) => { - function maybeJsonParse(value: string | null): any { - if (value === null || value === "") return; - return JSON.parse(value, (key, value) => { - if (typeof value === "object" && value !== null && "$type" in value) { - if (value.$type === "Date") { - return new Date(value.value); - } - if (value.$type === "Headers") { - return new global.Headers(value.entries); - } - } - return value; - }); - } - - function reduceR2Object( - value: R2Object - ): ReducedR2Object & { $type: "R2Object" } { - return { - $type: "R2Object", - key: value.key, - version: value.version, - size: value.size, - etag: value.etag, - httpEtag: value.httpEtag, - checksums: value.checksums.toJSON(), - uploaded: value.uploaded.toISOString(), - httpMetadata: value.httpMetadata, - customMetadata: value.customMetadata, - range: value.range, - }; - } - async function constructResponse(thing: any): Promise { - // Stringify `thing` as JSON, replacing `R2Object(Body)`s with a - // plain-object representation. Reading bodies is asynchronous, but - // `JSON.stringify`-replacers must be synchronous, so record body - // reading `Promise`s, and attach the bodies in `FormData`. - const bodyPromises: Promise[] = []; - const payload = JSON.stringify(thing, (key, value) => { - if (typeof value === "object" && value !== null) { - // https://github.com/cloudflare/workerd/blob/c336d404a5fbe2c779b28a6ca54c338f89e2fea1/src/workerd/api/r2-bucket.h#L202 - if (value.constructor?.name === "HeadResult" /* R2Object */) { - const object = value as R2Object; - return reduceR2Object(object); - } - // https://github.com/cloudflare/workerd/blob/c336d404a5fbe2c779b28a6ca54c338f89e2fea1/src/workerd/api/r2-bucket.h#L255 - if (value.constructor?.name === "GetResult" /* R2ObjectBody */) { - const objectBody = value as R2ObjectBody; - const object = reduceR2Object(objectBody); - const bodyId = bodyPromises.length; - // Test bodies shouldn't be too big, so buffering them is fine - bodyPromises.push(objectBody.blob()); - return { ...object, $type: "R2ObjectBody", body: bodyId }; - } - } - - if ( - typeof value === "string" && - // https://github.com/colinhacks/zod/blob/981af6503ee1be530fe525ac77ba95e1904ce24a/src/types.ts#L562 - /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z$/.test(value) - ) { - return { $type: "Date", value: new Date(value).getTime() }; - } - - return value; - }); - - // Construct `FormData` containing JSON-payload and all bodies - const formData = new global.FormData(); - formData.set("payload", payload); - const bodies = await Promise.all(bodyPromises); - bodies.forEach((body, i) => formData.set(i.toString(), body)); - - return new global.Response(formData); - } - - // Actual `HEAD` requests can't return bodies, but we'd like them to. - // Also, `LIST` is not a valid HTTP method. - const method = request.headers.get("Test-Method") ?? request.method; - const { pathname } = new URL(request.url); - const key = decodeURIComponent(pathname.substring(1)); - if (method === "HEAD") { - return constructResponse(await env.BUCKET.head(key)); - } else if (method === "GET") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - return constructResponse(await env.BUCKET.get(key, options)); - } else if (method === "PUT") { - const formData = await request.formData(); - const optionsData = formData.get("options"); - if (typeof optionsData !== "string") throw new TypeError(); - const options = maybeJsonParse(optionsData); - const value = formData.get("value"); - return constructResponse(await env.BUCKET.put(key, value, options)); - } else if (method === "DELETE") { - const keys = await request.json(); - await env.BUCKET.delete(keys); - return new global.Response(null, { status: 204 }); - } else if (method === "LIST") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - return constructResponse(await env.BUCKET.list(options)); - } else if (method === "MULTIPART-CREATE") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - const upload = await env.BUCKET.createMultipartUpload(key, options); - return new global.Response(upload.uploadId); - } else if (method === "MULTIPART-UPLOAD") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - const upload = env.BUCKET.resumeMultipartUpload(key, options.uploadId); - const value = request.body ?? ""; - const part = await upload.uploadPart(options.partNumber, value); - return global.Response.json(part); - } else if (method === "MULTIPART-ABORT") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - const upload = env.BUCKET.resumeMultipartUpload(key, options.uploadId); - await upload.abort(); - return new global.Response(null, { status: 204 }); - } else if (method === "MULTIPART-COMPLETE") { - const optionsHeader = request.headers.get("Test-Options"); - const options = maybeJsonParse(optionsHeader); - const upload = env.BUCKET.resumeMultipartUpload(key, options.uploadId); - return constructResponse(await upload.complete(options.uploadedParts)); - } - - return new global.Response(null, { status: 405 }); + async (global) => { + return new global.Response(null, { status: 404 }); } ); -test.beforeEach((t) => { + +test.beforeEach(async (t) => { // Namespace keys so tests which are accessing the same Miniflare instance // and bucket don't have races from key collisions const ns = `${Date.now()}_${Math.floor( Math.random() * Number.MAX_SAFE_INTEGER )}`; t.context.ns = ns; - t.context.r2 = new TestR2Bucket(t.context.mf, ns); + t.context.r2 = nsBucket(ns, await t.context.mf.getR2Bucket("BUCKET")); t.context.storage = t.context.mf._getPluginStorage("r2", "bucket"); }); @@ -546,7 +110,7 @@ const validatesKeyMacro: Macro< [ { method: string; - f: (r2: TestR2Bucket, key?: any) => Promise; + f: (r2: NamespacedR2Bucket, key?: any) => Promise; } ], Context @@ -602,6 +166,13 @@ test("head: returns metadata for existing keys", async (t) => { t.deepEqual(object.customMetadata, { key: "value" }); t.deepEqual(object.range, { offset: 0, length: 5 }); isWithin(t, WITHIN_EPSILON, object.uploaded.getTime(), start); + + // Test proxying of `writeHttpMetadata()` + const headers = new Headers({ "X-Key": "value" }); + // noinspection JSVoidFunctionReturnValueUsed + t.is(object.writeHttpMetadata(headers), undefined); + t.is(headers.get("Content-Type"), "text/plain"); + t.is(headers.get("X-Key"), "value"); }); test(validatesKeyMacro, { method: "head", f: (r2, key) => r2.head(key) }); @@ -644,6 +215,13 @@ test("get: returns metadata and body for existing keys", async (t) => { t.deepEqual(body.customMetadata, { key: "value" }); t.deepEqual(body.range, { offset: 0, length: 5 }); isWithin(t, WITHIN_EPSILON, body.uploaded.getTime(), start); + + // Test proxying of `writeHttpMetadata()` + const headers = new Headers({ "X-Key": "value" }); + // noinspection JSVoidFunctionReturnValueUsed + t.is(body.writeHttpMetadata(headers), undefined); + t.is(headers.get("Content-Type"), "text/plain"); + t.is(headers.get("X-Key"), "value"); }); test(validatesKeyMacro, { method: "get", f: (r2, key) => r2.get(key) }); test("get: range using object", async (t) => { @@ -700,7 +278,7 @@ test('get: range using "Range" header', async (t) => { const { r2 } = t.context; const value = "abcdefghijklmnopqrstuvwxyz"; await r2.put("key", value); - const range = new Headers() as WorkerHeaders; + const range = new Headers(); // Check missing "Range" header returns full response let body = await r2.get("key", { range }); @@ -750,15 +328,14 @@ test("get: returns body only if passes onlyIf", async (t) => { const pass = async (cond: R2Conditional) => { const object = await r2.get("key", { onlyIf: cond }); - t.not(object, null); - t.true(object instanceof TestR2ObjectBody); + // R2ObjectBody + t.true(object !== null && "body" in object && object?.body !== undefined); }; const fail = async (cond: R2Conditional) => { const object = await r2.get("key", { onlyIf: cond }); t.not(object, null); - // Can't test if `object instanceof TestR2Object` as - // `TestR2ObjectBody extends TestR2Object` - t.false(object instanceof TestR2ObjectBody); + // R2Object + t.true(object !== null && !("body" in object)); }; await pass({ etagMatches: etag }); @@ -917,7 +494,7 @@ test("put: stores only if passes onlyIf", async (t) => { }; const fail = async (cond: R2Conditional) => { const object = await r2.put("key", "2", { onlyIf: cond }); - t.is(object as R2Object | null, null); + t.is(object as ReplaceWorkersTypes | null, null); t.is(await (await r2.get("key"))?.text(), "1"); // No `reset()` as we've just checked we didn't update anything }; @@ -1268,9 +845,9 @@ test("list: returns correct delimitedPrefixes for delimiter and prefix", async ( const allKeys = Object.keys(values); for (const [key, value] of Object.entries(values)) await r2.put(key, value); - const keys = (result: R2Objects) => + const keys = (result: ReplaceWorkersTypes) => result.objects.map(({ key }) => key.substring(ns.length)); - const delimitedPrefixes = (result: R2Objects) => + const delimitedPrefixes = (result: ReplaceWorkersTypes) => result.delimitedPrefixes.map((prefix) => prefix.substring(ns.length)); const allKeysWithout = (...exclude: string[]) => allKeys.filter((value) => !exclude.includes(value)); @@ -1346,7 +923,7 @@ test.serial("operations permit empty key", async (t) => { }); test.serial("operations persist stored data", async (t) => { - const { r2, ns } = t.context; + const { mf, ns } = t.context; // Create new temporary file-system persistence directory const tmp = await useTmp(t); @@ -1355,6 +932,7 @@ test.serial("operations persist stored data", async (t) => { // Set option, then reset after test await t.context.setOptions({ ...opts, r2Persist: tmp }); t.teardown(() => t.context.setOptions(opts)); + const r2 = nsBucket(ns, await mf.getR2Bucket("BUCKET")); // Check put respects persist await r2.put("key", "value"); @@ -1392,12 +970,13 @@ test.serial("operations persist stored data", async (t) => { }); test.serial("operations permit strange bucket names", async (t) => { - const { r2, ns } = t.context; + const { mf, ns } = t.context; // Set option, then reset after test const id = "my/ Bucket"; await t.context.setOptions({ ...opts, r2Buckets: { BUCKET: id } }); t.teardown(() => t.context.setOptions(opts)); + const r2 = nsBucket(ns, await mf.getR2Bucket("BUCKET")); // Check basic operations work await r2.put("key", "value"); @@ -1816,18 +1395,18 @@ test("get: is multipart aware", async (t) => { // Check ranged get accessing single part const halfPartSize = Math.floor(PART_SIZE / 2); const quarterPartSize = Math.floor(PART_SIZE / 4); - object = (await r2.get("key", { + object = await r2.get("key", { range: { offset: halfPartSize, length: quarterPartSize }, - })) as R2ObjectBody | null; + }); t.is(await object?.text(), "a".repeat(quarterPartSize)); // Check ranged get accessing multiple parts - object = (await r2.get("key", { + object = await r2.get("key", { range: { offset: halfPartSize, length: halfPartSize + PART_SIZE + quarterPartSize, }, - })) as R2ObjectBody | null; + }); t.is( await object?.text(), `${"a".repeat(halfPartSize)}${"b".repeat(PART_SIZE)}${"c".repeat( @@ -1836,9 +1415,9 @@ test("get: is multipart aware", async (t) => { ); // Check ranged get of suffix - object = (await r2.get("key", { + object = await r2.get("key", { range: { suffix: quarterPartSize + PART_SIZE }, - })) as R2ObjectBody | null; + }); t.is( await object?.text(), `${"b".repeat(quarterPartSize)}${"c".repeat(PART_SIZE)}` diff --git a/packages/miniflare/test/plugins/r2/validator.spec.ts b/packages/miniflare/test/plugins/r2/validator.spec.ts index 176fe7213..da1e881b4 100644 --- a/packages/miniflare/test/plugins/r2/validator.spec.ts +++ b/packages/miniflare/test/plugins/r2/validator.spec.ts @@ -1,5 +1,5 @@ import test from "ava"; -import { R2Conditional, R2Object, _testR2Conditional } from "miniflare"; +import { InternalR2Object, R2Conditional, _testR2Conditional } from "miniflare"; test("testR2Conditional: matches various conditions", (t) => { // Adapted from internal R2 gateway tests @@ -10,7 +10,7 @@ test("testR2Conditional: matches various conditions", (t) => { const pastDate = new Date(uploadedDate.getTime() - 30_000); const futureDate = new Date(uploadedDate.getTime() + 30_000); - const metadata: Pick = { + const metadata: Pick = { etag, uploaded: uploadedDate.getTime(), }; diff --git a/packages/miniflare/test/setup.mjs b/packages/miniflare/test/setup.mjs new file mode 100644 index 000000000..72677b9c2 --- /dev/null +++ b/packages/miniflare/test/setup.mjs @@ -0,0 +1,23 @@ +import { _initialiseInstanceRegistry } from "miniflare"; + +const registry = _initialiseInstanceRegistry(); +const bigSeparator = "=".repeat(80); +const separator = "-".repeat(80); + +// `process.on("exit")` is more like `worker_thread.on(`exit`)` here. It will +// be called once AVA's finished running tests and `after` hooks. Note we can't +// use an `after` hook here, as that would run before `miniflareTest`'s +// `after` hooks to dispose their `Miniflare` instances. +process.on("exit", () => { + if (registry.size === 0) return; + + // If there are Miniflare instances that weren't disposed, throw + const s = registry.size === 1 ? "" : "s"; + const was = registry.size === 1 ? "was" : "were"; + const message = `Found ${registry.size} Miniflare instance${s} that ${was} not dispose()d`; + const stacks = Array.from(registry.values()).join(`\n${separator}\n`); + console.log( + [bigSeparator, message, separator, stacks, bigSeparator].join("\n") + ); + throw new Error(message); +}); diff --git a/packages/miniflare/test/test-shared/miniflare.ts b/packages/miniflare/test/test-shared/miniflare.ts index d15572826..c3449d3b2 100644 --- a/packages/miniflare/test/test-shared/miniflare.ts +++ b/packages/miniflare/test/test-shared/miniflare.ts @@ -149,6 +149,6 @@ export function miniflareTest< t.context.mf.setOptions({ ...userOpts, ...opts } as MiniflareOptions); t.context.url = await t.context.mf.ready; }); - test.after((t) => t.context.mf.dispose()); + test.after.always((t) => t.context.mf.dispose()); return test; } diff --git a/patches/ava+5.2.0.patch b/patches/ava+5.2.0.patch new file mode 100644 index 000000000..f168208ec --- /dev/null +++ b/patches/ava+5.2.0.patch @@ -0,0 +1,17 @@ +diff --git a/node_modules/ava/lib/worker/base.js b/node_modules/ava/lib/worker/base.js +index cdd3c4a..b03152b 100644 +--- a/node_modules/ava/lib/worker/base.js ++++ b/node_modules/ava/lib/worker/base.js +@@ -126,7 +126,11 @@ const run = async options => { + + nowAndTimers.setImmediate(() => { + for (const rejection of currentlyUnhandled()) { +- channel.send({type: 'unhandled-rejection', err: serializeError('Unhandled rejection', true, rejection.reason, runner.file)}); ++ channel.send({type: 'unhandled-rejection', err: serializeError('Unhandled rejection', true, rejection.reason, runner.file)}); ++ // The `cause` is often much more useful than the error itself, especially for `TypeError: fetch failed`s from `undici` ++ if (rejection.reason?.cause !== undefined) { ++ channel.send({type: 'unhandled-rejection', err: serializeError('Unhandled rejection', true, rejection.reason.cause, runner.file)}); ++ } + } + + exit(0); diff --git a/types/env.d.ts b/types/env.d.ts index 413236632..ddf2f9ca2 100644 --- a/types/env.d.ts +++ b/types/env.d.ts @@ -2,5 +2,6 @@ declare namespace NodeJS { export interface ProcessEnv { NODE_ENV?: string; NODE_EXTRA_CA_CERTS?: string; + MINIFLARE_ASSERT_BODIES_CONSUMED?: string; } }