diff --git a/lib/fetch/body.js b/lib/fetch/body.js index 14af1251785..3fa70297948 100644 --- a/lib/fetch/body.js +++ b/lib/fetch/body.js @@ -258,6 +258,29 @@ function cloneBody (body) { } } +async function * consumeBody (body) { + if (body) { + if (isUint8Array(body)) { + yield body + } else { + const stream = body.stream + + if (util.isDisturbed(stream)) { + throw new TypeError('disturbed') + } + + if (stream.locked) { + throw new TypeError('locked') + } + + // Compat. + stream[kBodyUsed] = true + + yield * stream + } + } +} + function bodyMixinMethods (instance) { const methods = { async blob () { @@ -267,27 +290,10 @@ function bodyMixinMethods (instance) { const chunks = [] - if (this[kState].body) { - if (isUint8Array(this[kState].body)) { - chunks.push(this[kState].body) - } else { - const stream = this[kState].body.stream - - if (util.isDisturbed(stream)) { - throw new TypeError('disturbed') - } - - if (stream.locked) { - throw new TypeError('locked') - } - - // Compat. - stream[kBodyUsed] = true - - for await (const chunk of stream) { - chunks.push(chunk) - } - } + for await (const chunk of consumeBody(this[kState].body)) { + // Assemble one final large blob with Uint8Array's can exhaust memory. + // That's why we create create multiple blob's and using references + chunks.push(new Blob([chunk])) } return new Blob(chunks, { type: this.headers.get('Content-Type') || '' }) @@ -298,8 +304,46 @@ function bodyMixinMethods (instance) { throw new TypeError('Illegal invocation') } - const blob = await this.blob() - return await blob.arrayBuffer() + const contentLength = this.headers.get('content-length') + const encoded = this.headers.has('content-encoding') + + // if we have content length and no encoding, then we can + // pre allocate the buffer and just read the data into it + if (!encoded && contentLength) { + const buffer = new Uint8Array(contentLength) + let offset = 0 + + for await (const chunk of consumeBody(this[kState].body)) { + buffer.set(chunk, offset) + offset += chunk.length + } + + return buffer.buffer + } + + // if we don't have content length, then we have to allocate 2x the + // size of the body, once for consumed data, and once for the final buffer + + // This could be optimized by using growable ArrayBuffer, but it's not + // implemented yet. https://github.com/tc39/proposal-resizablearraybuffer + + const chunks = [] + let size = 0 + + for await (const chunk of consumeBody(this[kState].body)) { + chunks.push(chunk) + size += chunk.byteLength + } + + const buffer = new Uint8Array(size) + let offset = 0 + + for (const chunk of chunks) { + buffer.set(chunk, offset) + offset += chunk.byteLength + } + + return buffer.buffer }, async text () { @@ -307,8 +351,17 @@ function bodyMixinMethods (instance) { throw new TypeError('Illegal invocation') } - const blob = await this.blob() - return toUSVString(await blob.text()) + let result = '' + const textDecoder = new TextDecoder() + + for await (const chunk of consumeBody(this[kState].body)) { + result += textDecoder.decode(chunk, { stream: true }) + } + + // flush + result += textDecoder.decode() + + return result }, async json () {