diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index e20877de174d43..ae82134a616e56 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -24,7 +24,7 @@ Last update: - interfaces: https://github.com/web-platform-tests/wpt/tree/fc086c82d5/interfaces - performance-timeline: https://github.com/web-platform-tests/wpt/tree/17ebc3aea0/performance-timeline - resources: https://github.com/web-platform-tests/wpt/tree/c5b428f15a/resources -- streams: https://github.com/web-platform-tests/wpt/tree/8f60d94439/streams +- streams: https://github.com/web-platform-tests/wpt/tree/9e5ef42bd3/streams - url: https://github.com/web-platform-tests/wpt/tree/0e5b126cd0/url - user-timing: https://github.com/web-platform-tests/wpt/tree/df24fb604e/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/d8dbe6990b/wasm/jsapi diff --git a/test/fixtures/wpt/streams/idlharness-shadowrealm.window.js b/test/fixtures/wpt/streams/idlharness-shadowrealm.window.js new file mode 100644 index 00000000000000..099b2475ca7e87 --- /dev/null +++ b/test/fixtures/wpt/streams/idlharness-shadowrealm.window.js @@ -0,0 +1,2 @@ +// META: script=/resources/idlharness-shadowrealm.js +idl_test_shadowrealm(["streams"], ["dom"]); diff --git a/test/fixtures/wpt/streams/piping/abort.any.js b/test/fixtures/wpt/streams/piping/abort.any.js index 3fe029de95a1b8..503de9dcaf0893 100644 --- a/test/fixtures/wpt/streams/piping/abort.any.js +++ b/test/fixtures/wpt/streams/piping/abort.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/test-utils.js 'use strict'; @@ -53,29 +53,28 @@ promise_test(t => { }); }, 'an aborted signal should cause the writable stream to reject with an AbortError'); -promise_test(() => { - let error; - const rs = recordingReadableStream(errorOnPull, hwm0); - const ws = new WritableStream(); - const abortController = new AbortController(); - const signal = abortController.signal; - abortController.abort(); - return rs.pipeTo(ws, { signal }) - .catch(e => { - error = e; - }) - .then(() => Promise.all([ - rs.getReader().closed, - ws.getWriter().closed.catch(e => { - assert_equals(e, error, 'the writable should be errored with the same object'); - }) - ])) - .then(() => { +for (const reason of [null, undefined, error1]) { + promise_test(async t => { + const rs = recordingReadableStream(errorOnPull, hwm0); + const ws = new WritableStream(); + const abortController = new AbortController(); + const signal = abortController.signal; + abortController.abort(reason); + const pipeToPromise = rs.pipeTo(ws, { signal }); + if (reason !== undefined) { + await promise_rejects_exactly(t, reason, pipeToPromise, 'pipeTo rejects with abort reason'); + } else { + await promise_rejects_dom(t, 'AbortError', pipeToPromise, 'pipeTo rejects with AbortError'); + } + const error = await pipeToPromise.catch(e => e); + await rs.getReader().closed; + await promise_rejects_exactly(t, error, ws.getWriter().closed, 'the writable should be errored with the same object'); + assert_equals(signal.reason, error, 'signal.reason should be error'), assert_equals(rs.events.length, 2, 'cancel should have been called'); assert_equals(rs.events[0], 'cancel', 'first event should be cancel'); assert_equals(rs.events[1], error, 'the readable should be canceled with the same object'); - }); -}, 'all the AbortError objects should be the same object'); + }, `(reason: '${reason}') all the error objects should be the same object`); +} promise_test(t => { const rs = recordingReadableStream(errorOnPull, hwm0); @@ -115,61 +114,74 @@ promise_test(t => { }); }, 'preventCancel and preventAbort should prevent canceling the readable and aborting the readable'); -promise_test(t => { - const rs = new ReadableStream({ - start(controller) { - controller.enqueue('a'); - controller.enqueue('b'); - controller.close(); - } - }); - const abortController = new AbortController(); - const signal = abortController.signal; - const ws = recordingWritableStream({ - write() { - abortController.abort(); +for (const reason of [null, undefined, error1]) { + promise_test(async t => { + const rs = new ReadableStream({ + start(controller) { + controller.enqueue('a'); + controller.enqueue('b'); + controller.close(); + } + }); + const abortController = new AbortController(); + const signal = abortController.signal; + const ws = recordingWritableStream({ + write() { + abortController.abort(reason); + } + }); + const pipeToPromise = rs.pipeTo(ws, { signal }); + if (reason !== undefined) { + await promise_rejects_exactly(t, reason, pipeToPromise, 'pipeTo rejects with abort reason'); + } else { + await promise_rejects_dom(t, 'AbortError', pipeToPromise, 'pipeTo rejects with AbortError'); } - }); - return promise_rejects_dom(t, 'AbortError', rs.pipeTo(ws, { signal }), 'pipeTo should reject') - .then(() => { - assert_equals(ws.events.length, 4, 'only chunk "a" should have been written'); - assert_array_equals(ws.events.slice(0, 3), ['write', 'a', 'abort'], 'events should match'); - assert_equals(ws.events[3].name, 'AbortError', 'abort reason should be an AbortError'); - }); -}, 'abort should prevent further reads'); + const error = await pipeToPromise.catch(e => e); + assert_equals(signal.reason, error, 'signal.reason should be error'); + assert_equals(ws.events.length, 4, 'only chunk "a" should have been written'); + assert_array_equals(ws.events.slice(0, 3), ['write', 'a', 'abort'], 'events should match'); + assert_equals(ws.events[3], error, 'abort reason should be error'); + }, `(reason: '${reason}') abort should prevent further reads`); +} -promise_test(t => { - let readController; - const rs = new ReadableStream({ - start(c) { - readController = c; - c.enqueue('a'); - c.enqueue('b'); - } - }); - const abortController = new AbortController(); - const signal = abortController.signal; - let resolveWrite; - const writePromise = new Promise(resolve => { - resolveWrite = resolve; - }); - const ws = recordingWritableStream({ - write() { - return writePromise; +for (const reason of [null, undefined, error1]) { + promise_test(async t => { + let readController; + const rs = new ReadableStream({ + start(c) { + readController = c; + c.enqueue('a'); + c.enqueue('b'); + } + }); + const abortController = new AbortController(); + const signal = abortController.signal; + let resolveWrite; + const writePromise = new Promise(resolve => { + resolveWrite = resolve; + }); + const ws = recordingWritableStream({ + write() { + return writePromise; + } + }, new CountQueuingStrategy({ highWaterMark: Infinity })); + const pipeToPromise = rs.pipeTo(ws, { signal }); + await delay(0); + await abortController.abort(reason); + await readController.close(); // Make sure the test terminates when signal is not implemented. + await resolveWrite(); + if (reason !== undefined) { + await promise_rejects_exactly(t, reason, pipeToPromise, 'pipeTo rejects with abort reason'); + } else { + await promise_rejects_dom(t, 'AbortError', pipeToPromise, 'pipeTo rejects with AbortError'); } - }, new CountQueuingStrategy({ highWaterMark: Infinity })); - const pipeToPromise = rs.pipeTo(ws, { signal }); - return delay(0).then(() => { - abortController.abort(); - readController.close(); // Make sure the test terminates when signal is not implemented. - resolveWrite(); - return promise_rejects_dom(t, 'AbortError', pipeToPromise, 'pipeTo should reject'); - }).then(() => { + const error = await pipeToPromise.catch(e => e); + assert_equals(signal.reason, error, 'signal.reason should be error'); assert_equals(ws.events.length, 6, 'chunks "a" and "b" should have been written'); assert_array_equals(ws.events.slice(0, 5), ['write', 'a', 'write', 'b', 'abort'], 'events should match'); - assert_equals(ws.events[5].name, 'AbortError', 'abort reason should be an AbortError'); - }); -}, 'all pending writes should complete on abort'); + assert_equals(ws.events[5], error, 'abort reason should be error'); + }, `(reason: '${reason}') all pending writes should complete on abort`); +} promise_test(t => { const rs = new ReadableStream({ @@ -373,3 +385,24 @@ promise_test(t => { assert_array_equals(rs.events, ['pull'], 'cancel should not have been called'); }); }, 'abort should do nothing after the writable is errored'); + +promise_test(async t => { + const rs = new ReadableStream({ + pull(c) { + c.enqueue(new Uint8Array([])); + }, + type: "bytes", + }); + const ws = new WritableStream(); + const [first, second] = rs.tee(); + + let aborted = false; + first.pipeTo(ws, { signal: AbortSignal.abort() }).catch(() => { + aborted = true; + }); + await delay(0); + assert_true(!aborted, "pipeTo should not resolve yet"); + await second.cancel(); + await delay(0); + assert_true(aborted, "pipeTo should be aborted now"); +}, "pipeTo on a teed readable byte stream should only be aborted when both branches are aborted"); diff --git a/test/fixtures/wpt/streams/piping/close-propagation-backward.any.js b/test/fixtures/wpt/streams/piping/close-propagation-backward.any.js index bd1e9cb92657b1..5ea47ab85c0c1f 100644 --- a/test/fixtures/wpt/streams/piping/close-propagation-backward.any.js +++ b/test/fixtures/wpt/streams/piping/close-propagation-backward.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/close-propagation-forward.any.js b/test/fixtures/wpt/streams/piping/close-propagation-forward.any.js index fc3282eea74b40..71b6e262840090 100644 --- a/test/fixtures/wpt/streams/piping/close-propagation-forward.any.js +++ b/test/fixtures/wpt/streams/piping/close-propagation-forward.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/error-propagation-backward.any.js b/test/fixtures/wpt/streams/piping/error-propagation-backward.any.js index 6dc203066e3d7e..ec74592f86effe 100644 --- a/test/fixtures/wpt/streams/piping/error-propagation-backward.any.js +++ b/test/fixtures/wpt/streams/piping/error-propagation-backward.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/error-propagation-forward.any.js b/test/fixtures/wpt/streams/piping/error-propagation-forward.any.js index f35ec665eec22f..482da2f8a88e18 100644 --- a/test/fixtures/wpt/streams/piping/error-propagation-forward.any.js +++ b/test/fixtures/wpt/streams/piping/error-propagation-forward.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/flow-control.any.js b/test/fixtures/wpt/streams/piping/flow-control.any.js index db83c011f4a718..09c4420f872adc 100644 --- a/test/fixtures/wpt/streams/piping/flow-control.any.js +++ b/test/fixtures/wpt/streams/piping/flow-control.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/rs-utils.js // META: script=../resources/recording-streams.js diff --git a/test/fixtures/wpt/streams/piping/general.any.js b/test/fixtures/wpt/streams/piping/general.any.js index 2e02dfad78a0fa..bec3480f653944 100644 --- a/test/fixtures/wpt/streams/piping/general.any.js +++ b/test/fixtures/wpt/streams/piping/general.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/multiple-propagation.any.js b/test/fixtures/wpt/streams/piping/multiple-propagation.any.js index c9a486f3f9ac2f..a78652fc06795e 100644 --- a/test/fixtures/wpt/streams/piping/multiple-propagation.any.js +++ b/test/fixtures/wpt/streams/piping/multiple-propagation.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/pipe-through.any.js b/test/fixtures/wpt/streams/piping/pipe-through.any.js index 35dbb456b3e2c6..26b1cd26a3c82f 100644 --- a/test/fixtures/wpt/streams/piping/pipe-through.any.js +++ b/test/fixtures/wpt/streams/piping/pipe-through.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js @@ -266,3 +266,66 @@ test(() => { } }), 'pipeThrough should throw'); }, 'pipeThrough() should throw if an option getter grabs a writer'); + +test(() => { + const rs = new ReadableStream(); + const readable = new ReadableStream(); + const writable = new WritableStream(); + rs.pipeThrough({readable, writable}, null); +}, 'pipeThrough() should not throw if option is null'); + +test(() => { + const rs = new ReadableStream(); + const readable = new ReadableStream(); + const writable = new WritableStream(); + rs.pipeThrough({readable, writable}, {signal:undefined}); +}, 'pipeThrough() should not throw if signal is undefined'); + +function tryPipeThrough(pair, options) +{ + const rs = new ReadableStream(); + if (!pair) + pair = {readable:new ReadableStream(), writable:new WritableStream()}; + try { + rs.pipeThrough(pair, options) + } catch (e) { + return e; + } +} + +test(() => { + let result = tryPipeThrough({ + get readable() { + return new ReadableStream(); + }, + get writable() { + throw "writable threw"; + } + }, { }); + assert_equals(result, "writable threw"); + + result = tryPipeThrough({ + get readable() { + throw "readable threw"; + }, + get writable() { + throw "writable threw"; + } + }, { }); + assert_equals(result, "readable threw"); + + result = tryPipeThrough({ + get readable() { + throw "readable threw"; + }, + get writable() { + throw "writable threw"; + } + }, { + get preventAbort() { + throw "preventAbort threw"; + } + }); + assert_equals(result, "readable threw"); + +}, 'pipeThrough() should throw if readable/writable getters throw'); diff --git a/test/fixtures/wpt/streams/piping/then-interception.any.js b/test/fixtures/wpt/streams/piping/then-interception.any.js index 9f772ea5841d8f..543f916d940d9a 100644 --- a/test/fixtures/wpt/streams/piping/then-interception.any.js +++ b/test/fixtures/wpt/streams/piping/then-interception.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/piping/throwing-options.any.js b/test/fixtures/wpt/streams/piping/throwing-options.any.js index bc1cf328da61e6..b9f906778f632b 100644 --- a/test/fixtures/wpt/streams/piping/throwing-options.any.js +++ b/test/fixtures/wpt/streams/piping/throwing-options.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; class ThrowingOptions { diff --git a/test/fixtures/wpt/streams/piping/transform-streams.any.js b/test/fixtures/wpt/streams/piping/transform-streams.any.js index a368fecd6f00d6..caae9fbad8848a 100644 --- a/test/fixtures/wpt/streams/piping/transform-streams.any.js +++ b/test/fixtures/wpt/streams/piping/transform-streams.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; promise_test(() => { diff --git a/test/fixtures/wpt/streams/queuing-strategies.any.js b/test/fixtures/wpt/streams/queuing-strategies.any.js index 1846ea63e35459..fa959ebba28338 100644 --- a/test/fixtures/wpt/streams/queuing-strategies.any.js +++ b/test/fixtures/wpt/streams/queuing-strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const highWaterMarkConversions = new Map([ @@ -75,8 +75,23 @@ for (const QueuingStrategy of [CountQueuingStrategy, ByteLengthQueuingStrategy]) assert_equals(sc.size(), 2, 'size() on the subclass should override the parent'); assert_true(sc.subClassMethod(), 'subClassMethod() should work'); }, `${QueuingStrategy.name}: subclassing should work correctly`); + + test(() => { + const size = new QueuingStrategy({ highWaterMark: 5 }).size; + assert_false('prototype' in size); + }, `${QueuingStrategy.name}: size should not have a prototype property`); } +test(() => { + const size = new CountQueuingStrategy({ highWaterMark: 5 }).size; + assert_throws_js(TypeError, () => new size()); +}, `CountQueuingStrategy: size should not be a constructor`); + +test(() => { + const size = new ByteLengthQueuingStrategy({ highWaterMark: 5 }).size; + assert_throws_js(TypeError, () => new size({ byteLength: 1024 })); +}, `ByteLengthQueuingStrategy: size should not be a constructor`); + test(() => { const size = (new CountQueuingStrategy({ highWaterMark: 5 })).size; assert_equals(size.length, 0); diff --git a/test/fixtures/wpt/streams/readable-byte-streams/bad-buffers-and-views.any.js b/test/fixtures/wpt/streams/readable-byte-streams/bad-buffers-and-views.any.js index eed3a5ed4f8d02..3322116b191840 100644 --- a/test/fixtures/wpt/streams/readable-byte-streams/bad-buffers-and-views.any.js +++ b/test/fixtures/wpt/streams/readable-byte-streams/bad-buffers-and-views.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; promise_test(() => { @@ -203,6 +203,38 @@ async_test(t => { }, 'ReadableStream with byte source: respondWithNewView() throws if the supplied view is zero-length on a ' + 'non-zero-length buffer (in the readable state)'); +async_test(t => { + const stream = new ReadableStream({ + pull: t.step_func_done(c => { + const view = c.byobRequest.view.subarray(1, 2); + + assert_throws_js(RangeError, () => c.byobRequest.respondWithNewView(view)); + }), + type: 'bytes' + }); + const reader = stream.getReader({ mode: 'byob' }); + + reader.read(new Uint8Array([4, 5, 6])); +}, 'ReadableStream with byte source: respondWithNewView() throws if the supplied view has a different offset ' + + '(in the readable state)'); + +async_test(t => { + const stream = new ReadableStream({ + pull: t.step_func_done(c => { + c.close(); + + const view = c.byobRequest.view.subarray(1, 1); + + assert_throws_js(RangeError, () => c.byobRequest.respondWithNewView(view)); + }), + type: 'bytes' + }); + const reader = stream.getReader({ mode: 'byob' }); + + reader.read(new Uint8Array([4, 5, 6])); +}, 'ReadableStream with byte source: respondWithNewView() throws if the supplied view has a different offset ' + + '(in the closed state)'); + async_test(t => { const stream = new ReadableStream({ pull: t.step_func_done(c => { @@ -218,6 +250,23 @@ async_test(t => { }, 'ReadableStream with byte source: respondWithNewView() throws if the supplied view\'s buffer has a ' + 'different length (in the readable state)'); +async_test(t => { + // Tests https://github.com/nodejs/node/issues/41886 + const stream = new ReadableStream({ + pull: t.step_func_done(c => { + const view = new Uint8Array(new ArrayBuffer(11), 0, 3); + + assert_throws_js(RangeError, () => c.byobRequest.respondWithNewView(view)); + }), + type: 'bytes', + autoAllocateChunkSize: 10 + }); + const reader = stream.getReader(); + + reader.read(); +}, 'ReadableStream with byte source: respondWithNewView() throws if the supplied view\'s buffer has a ' + + 'different length (autoAllocateChunkSize)'); + async_test(t => { const stream = new ReadableStream({ pull: t.step_func_done(c => { diff --git a/test/fixtures/wpt/streams/readable-byte-streams/construct-byob-request.any.js b/test/fixtures/wpt/streams/readable-byte-streams/construct-byob-request.any.js index 1386d84599a4cd..8d460a1c81b7bd 100644 --- a/test/fixtures/wpt/streams/readable-byte-streams/construct-byob-request.any.js +++ b/test/fixtures/wpt/streams/readable-byte-streams/construct-byob-request.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/readable-byte-streams/general.any.js b/test/fixtures/wpt/streams/readable-byte-streams/general.any.js index 9aa508225865c8..dd4fdc855786f2 100644 --- a/test/fixtures/wpt/streams/readable-byte-streams/general.any.js +++ b/test/fixtures/wpt/streams/readable-byte-streams/general.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js 'use strict'; @@ -236,15 +236,27 @@ promise_test(t => { }); }, 'ReadableStream with byte source: Test that erroring a stream does not release a BYOB reader automatically'); -test(() => { +promise_test(async t => { const stream = new ReadableStream({ type: 'bytes' }); const reader = stream.getReader(); - reader.read(); - assert_throws_js(TypeError, () => reader.releaseLock(), 'reader.releaseLock() must throw'); -}, 'ReadableStream with byte source: releaseLock() on ReadableStreamDefaultReader with pending read() must throw'); + const read = reader.read(); + reader.releaseLock(); + await promise_rejects_js(t, TypeError, read, 'pending read must reject'); +}, 'ReadableStream with byte source: releaseLock() on ReadableStreamDefaultReader must reject pending read()'); + +promise_test(async t => { + const stream = new ReadableStream({ + type: 'bytes' + }); + + const reader = stream.getReader({ mode: 'byob' }); + const read = reader.read(new Uint8Array(1)); + reader.releaseLock(); + await promise_rejects_js(t, TypeError, read, 'pending read must reject'); +}, 'ReadableStream with byte source: releaseLock() on ReadableStreamBYOBReader must reject pending read()'); promise_test(() => { let pullCount = 0; @@ -2111,7 +2123,7 @@ promise_test(() => { }); }, 'calling respond() should throw when canceled'); -promise_test(() => { +promise_test(async t => { let resolvePullCalledPromise; const pullCalledPromise = new Promise(resolve => { resolvePullCalledPromise = resolve; @@ -2127,14 +2139,13 @@ promise_test(() => { type: 'bytes' }); const reader = rs.getReader({ mode: 'byob' }); - reader.read(new Uint8Array(16)); - return pullCalledPromise.then(() => { - resolvePull(); - return delay(0).then(() => { - assert_throws_js(TypeError, () => reader.releaseLock(), 'releaseLock() should throw'); - }); - }); -}, 'pull() resolving should not make releaseLock() possible'); + const read = reader.read(new Uint8Array(16)); + await pullCalledPromise; + resolvePull(); + await delay(0); + reader.releaseLock(); + await promise_rejects_js(t, TypeError, read, 'pending read should reject'); +}, 'pull() resolving should not resolve read()'); promise_test(() => { // Tests https://github.com/whatwg/streams/issues/686 @@ -2334,3 +2345,557 @@ promise_test(async t => { }, 'ReadableStream with byte source: respondWithNewView() with a transferred zero-length view ' + '(in the closed state)'); + +promise_test(async t => { + let controller; + let pullCount = 0; + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 10, + start: t.step_func((c) => { + controller = c; + }), + pull: t.step_func(() => { + ++pullCount; + }) + }); + + await flushAsyncEvents(); + assert_equals(pullCount, 0, 'pull() must not have been invoked yet'); + + const reader1 = rs.getReader(); + const read1 = reader1.read(); + assert_equals(pullCount, 1, 'pull() must have been invoked once'); + const byobRequest1 = controller.byobRequest; + assert_equals(byobRequest1.view.byteLength, 10, 'first byobRequest.view.byteLength'); + + // enqueue() must discard the auto-allocated BYOB request + controller.enqueue(new Uint8Array([1, 2, 3])); + assert_equals(byobRequest1.view, null, 'first byobRequest must be invalidated after enqueue()'); + + const result1 = await read1; + assert_false(result1.done, 'first result.done'); + const view1 = result1.value; + assert_equals(view1.byteOffset, 0, 'first result.value.byteOffset'); + assert_equals(view1.byteLength, 3, 'first result.value.byteLength'); + assert_array_equals([...new Uint8Array(view1.buffer)], [1, 2, 3], 'first result.value.buffer'); + + reader1.releaseLock(); + + // read(view) should work after discarding the auto-allocated BYOB request + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_equals(pullCount, 2, 'pull() must have been invoked twice'); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2.view.byteOffset, 0, 'second byobRequest.view.byteOffset'); + assert_equals(byobRequest2.view.byteLength, 3, 'second byobRequest.view.byteLength'); + assert_array_equals([...new Uint8Array(byobRequest2.view.buffer)], [4, 5, 6], 'second byobRequest.view.buffer'); + + byobRequest2.respond(3); + assert_equals(byobRequest2.view, null, 'second byobRequest must be invalidated after respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + const view2 = result2.value; + assert_equals(view2.byteOffset, 0, 'second result.value.byteOffset'); + assert_equals(view2.byteLength, 3, 'second result.value.byteLength'); + assert_array_equals([...new Uint8Array(view2.buffer)], [4, 5, 6], 'second result.value.buffer'); + + reader2.releaseLock(); + assert_equals(pullCount, 2, 'pull() must only have been invoked twice'); +}, 'ReadableStream with byte source: enqueue() discards auto-allocated BYOB request'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + assert_equals(controller.byobRequest, byobRequest1, 'byobRequest should be unchanged'); + assert_array_equals([...new Uint8Array(byobRequest1.view.buffer)], [1, 2, 3], 'byobRequest.view.buffer should be unchanged'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respond() should fulfill the *second* read() request + byobRequest1.view[0] = 11; + byobRequest1.respond(1); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 5, 6]).subarray(0, 1), 'second result.value'); + +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, respond()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint16Array(1)); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + assert_equals(controller.byobRequest, byobRequest1, 'byobRequest should be unchanged'); + assert_array_equals([...new Uint8Array(byobRequest1.view.buffer)], [1, 2, 3], 'byobRequest.view.buffer should be unchanged'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respond(1) should partially fill the second read(), but not yet fulfill it + byobRequest1.view[0] = 0x11; + byobRequest1.respond(1); + + // second BYOB request should use remaining buffer from the second read() + const byobRequest2 = controller.byobRequest; + assert_not_equals(byobRequest2, null, 'second byobRequest should exist'); + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11, 0]).subarray(1, 2), 'second byobRequest.view'); + + // second respond(1) should fill the read request and fulfill it + byobRequest2.view[0] = 0x22; + byobRequest2.respond(1); + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + const view2 = result2.value; + assert_equals(view2.byteOffset, 0, 'second result.value.byteOffset'); + assert_equals(view2.byteLength, 2, 'second result.value.byteLength'); + const dataView2 = new DataView(view2.buffer, view2.byteOffset, view2.byteLength); + assert_equals(dataView2.getUint16(0), 0x1122, 'second result.value[0]'); + +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader with ' + + '1 element Uint16Array, respond(1)'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + assert_equals(controller.byobRequest, byobRequest1, 'byobRequest should be unchanged'); + assert_array_equals([...new Uint8Array(byobRequest1.view.buffer)], [1, 2, 3], 'byobRequest.view.buffer should be unchanged'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respond(3) should fulfill the second read(), and put 1 remaining byte in the queue + byobRequest1.view[0] = 6; + byobRequest1.view[1] = 7; + byobRequest1.view[2] = 8; + byobRequest1.respond(3); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([6, 7]), 'second result.value'); + + // third read() should fulfill with the remaining byte + const result3 = await reader2.read(new Uint8Array([0, 0, 0])); + assert_false(result3.done, 'third result.done'); + assert_typed_array_equals(result3.value, new Uint8Array([8, 0, 0]).subarray(0, 1), 'third result.value'); + +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader with ' + + '2 element Uint8Array, respond(3)'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respondWithNewView() should fulfill the *second* read() request + byobRequest1.view[0] = 11; + byobRequest1.view[1] = 12; + byobRequest1.respondWithNewView(byobRequest1.view.subarray(0, 2)); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respondWithNewView()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 12, 6]).subarray(0, 2), 'second result.value'); + +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, respondWithNewView()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // enqueue() should fulfill the *second* read() request + controller.enqueue(new Uint8Array([11, 12])); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after enqueue()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 12, 6]).subarray(0, 2), 'second result.value'); + +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, enqueue()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint8Array([1, 2, 3])); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([1, 2, 3]), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // close() followed by respond(0) should fulfill the second read() + controller.close(); + byobRequest1.respond(0); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respond()'); + + const result2 = await read2; + assert_true(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([4, 5, 6]).subarray(0, 0), 'second result.value'); +}, 'ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, ' + + 'close(), respond(0)'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 4, + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader(); + const read1 = reader1.read(); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array(4), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader(); + const read2 = reader2.read(); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respond() should fulfill the *second* read() request + byobRequest1.view[0] = 11; + byobRequest1.respond(1); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 0, 0, 0]).subarray(0, 1), 'second result.value'); + +}, 'ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read() on second reader, respond()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 4, + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader(); + const read1 = reader1.read(); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array(4), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader(); + const read2 = reader2.read(); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // enqueue() should fulfill the *second* read() request + controller.enqueue(new Uint8Array([11])); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after enqueue()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11]), 'second result.value'); + +}, 'ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read() on second reader, enqueue()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 4, + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader(); + const read1 = reader1.read(); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array(4), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // respond() should fulfill the *second* read() request + byobRequest1.view[0] = 11; + byobRequest1.respond(1); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 5, 6]).subarray(0, 1), 'second result.value'); + +}, 'ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read(view) on second reader, respond()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 4, + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader(); + const read1 = reader1.read(); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array(4), 'first byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint8Array([4, 5, 6])); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // enqueue() should fulfill the *second* read() request + controller.enqueue(new Uint8Array([11])); + const byobRequest2 = controller.byobRequest; + assert_equals(byobRequest2, null, 'byobRequest should be null after enqueue()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([11, 5, 6]).subarray(0, 1), 'second result.value'); + +}, 'ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read(view) on second reader, enqueue()'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint16Array(1)); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([0, 0]), 'first byobRequest.view'); + + // respond(1) should partially fill the first read(), but not yet fulfill it + byobRequest1.view[0] = 0x11; + byobRequest1.respond(1); + const byobRequest2 = controller.byobRequest; + assert_not_equals(byobRequest2, null, 'second byobRequest should exist'); + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11, 0]).subarray(1, 2), 'second byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader({ mode: 'byob' }); + const read2 = reader2.read(new Uint16Array(1)); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + assert_equals(controller.byobRequest, byobRequest2, 'byobRequest should be unchanged'); + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11, 0]).subarray(1, 2), 'byobRequest.view should be unchanged'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // second respond(1) should fill the read request and fulfill it + byobRequest2.view[0] = 0x22; + byobRequest2.respond(1); + assert_equals(controller.byobRequest, null, 'byobRequest should be invalidated after second respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + const view2 = result2.value; + assert_equals(view2.byteOffset, 0, 'second result.value.byteOffset'); + assert_equals(view2.byteLength, 2, 'second result.value.byteLength'); + const dataView2 = new DataView(view2.buffer, view2.byteOffset, view2.byteLength); + assert_equals(dataView2.getUint16(0), 0x1122, 'second result.value[0]'); + +}, 'ReadableStream with byte source: read(view) with 1 element Uint16Array, respond(1), releaseLock(), read(view) on ' + + 'second reader with 1 element Uint16Array, respond(1)'); + +promise_test(async t => { + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start: t.step_func((c) => { + controller = c; + }) + }); + await flushAsyncEvents(); + + const reader1 = rs.getReader({ mode: 'byob' }); + const read1 = reader1.read(new Uint16Array(1)); + const byobRequest1 = controller.byobRequest; + assert_not_equals(byobRequest1, null, 'first byobRequest should exist'); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([0, 0]), 'first byobRequest.view'); + + // respond(1) should partially fill the first read(), but not yet fulfill it + byobRequest1.view[0] = 0x11; + byobRequest1.respond(1); + const byobRequest2 = controller.byobRequest; + assert_not_equals(byobRequest2, null, 'second byobRequest should exist'); + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11, 0]).subarray(1, 2), 'second byobRequest.view'); + + // releaseLock() should reject the pending read, but *not* invalidate the BYOB request + reader1.releaseLock(); + const reader2 = rs.getReader(); + const read2 = reader2.read(); + assert_not_equals(controller.byobRequest, null, 'byobRequest should not be invalidated after releaseLock()'); + assert_equals(controller.byobRequest, byobRequest2, 'byobRequest should be unchanged'); + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11, 0]).subarray(1, 2), 'byobRequest.view should be unchanged'); + await promise_rejects_js(t, TypeError, read1, 'pending read must reject after releaseLock()'); + + // enqueue() should fulfill the read request and put remaining byte in the queue + controller.enqueue(new Uint8Array([0x22])); + assert_equals(controller.byobRequest, null, 'byobRequest should be invalidated after second respond()'); + + const result2 = await read2; + assert_false(result2.done, 'second result.done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x11]), 'second result.value'); + + const result3 = await reader2.read(); + assert_false(result3.done, 'third result.done'); + assert_typed_array_equals(result3.value, new Uint8Array([0x22]), 'third result.value'); + +}, 'ReadableStream with byte source: read(view) with 1 element Uint16Array, respond(1), releaseLock(), read() on ' + + 'second reader, enqueue()'); + +promise_test(async t => { + // Tests https://github.com/nodejs/node/issues/41886 + const stream = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 10, + pull: t.step_func((c) => { + const newView = new Uint8Array(c.byobRequest.view.buffer, 0, 3); + newView.set([20, 21, 22]); + c.byobRequest.respondWithNewView(newView); + }) + }); + + const reader = stream.getReader(); + const result = await reader.read(); + assert_false(result.done, 'result.done'); + + const view = result.value; + assert_equals(view.byteOffset, 0, 'result.value.byteOffset'); + assert_equals(view.byteLength, 3, 'result.value.byteLength'); + assert_equals(view.buffer.byteLength, 10, 'result.value.buffer.byteLength'); + assert_array_equals([...new Uint8Array(view)], [20, 21, 22], 'result.value'); +}, 'ReadableStream with byte source: autoAllocateChunkSize, read(), respondWithNewView()'); diff --git a/test/fixtures/wpt/streams/readable-byte-streams/non-transferable-buffers.any.js b/test/fixtures/wpt/streams/readable-byte-streams/non-transferable-buffers.any.js index 7c0bffb78710fe..e8ea3c4f966763 100644 --- a/test/fixtures/wpt/streams/readable-byte-streams/non-transferable-buffers.any.js +++ b/test/fixtures/wpt/streams/readable-byte-streams/non-transferable-buffers.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; promise_test(async t => { diff --git a/test/fixtures/wpt/streams/readable-byte-streams/respond-after-enqueue.any.js b/test/fixtures/wpt/streams/readable-byte-streams/respond-after-enqueue.any.js new file mode 100644 index 00000000000000..b93cec97391e13 --- /dev/null +++ b/test/fixtures/wpt/streams/readable-byte-streams/respond-after-enqueue.any.js @@ -0,0 +1,55 @@ +// META: global=window,worker + +'use strict'; + +// Repro for Blink bug https://crbug.com/1255762. +promise_test(async () => { + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 10, + pull(controller) { + controller.enqueue(new Uint8Array([1, 2, 3])); + controller.byobRequest.respond(10); + } + }); + + const reader = rs.getReader(); + const {value, done} = await reader.read(); + assert_false(done, 'done should not be true'); + assert_array_equals(value, [1, 2, 3], 'value should be 3 bytes'); +}, 'byobRequest.respond() after enqueue() should not crash'); + +promise_test(async () => { + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 10, + pull(controller) { + const byobRequest = controller.byobRequest; + controller.enqueue(new Uint8Array([1, 2, 3])); + byobRequest.respond(10); + } + }); + + const reader = rs.getReader(); + const {value, done} = await reader.read(); + assert_false(done, 'done should not be true'); + assert_array_equals(value, [1, 2, 3], 'value should be 3 bytes'); +}, 'byobRequest.respond() with cached byobRequest after enqueue() should not crash'); + +promise_test(async () => { + const rs = new ReadableStream({ + type: 'bytes', + autoAllocateChunkSize: 10, + pull(controller) { + controller.enqueue(new Uint8Array([1, 2, 3])); + controller.byobRequest.respond(2); + } + }); + + const reader = rs.getReader(); + const [read1, read2] = await Promise.all([reader.read(), reader.read()]); + assert_false(read1.done, 'read1.done should not be true'); + assert_array_equals(read1.value, [1, 2, 3], 'read1.value should be 3 bytes'); + assert_false(read2.done, 'read2.done should not be true'); + assert_array_equals(read2.value, [0, 0], 'read2.value should be 2 bytes'); +}, 'byobRequest.respond() after enqueue() with double read should not crash'); diff --git a/test/fixtures/wpt/streams/readable-byte-streams/tee.any.js b/test/fixtures/wpt/streams/readable-byte-streams/tee.any.js new file mode 100644 index 00000000000000..85844669cd9088 --- /dev/null +++ b/test/fixtures/wpt/streams/readable-byte-streams/tee.any.js @@ -0,0 +1,936 @@ +// META: global=window,worker +// META: script=../resources/rs-utils.js +// META: script=../resources/test-utils.js +// META: script=../resources/recording-streams.js +// META: script=../resources/rs-test-templates.js +'use strict'; + +test(() => { + + const rs = new ReadableStream({ type: 'bytes' }); + const result = rs.tee(); + + assert_true(Array.isArray(result), 'return value should be an array'); + assert_equals(result.length, 2, 'array should have length 2'); + assert_equals(result[0].constructor, ReadableStream, '0th element should be a ReadableStream'); + assert_equals(result[1].constructor, ReadableStream, '1st element should be a ReadableStream'); + +}, 'ReadableStream teeing with byte source: rs.tee() returns an array of two ReadableStreams'); + +promise_test(async t => { + + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + c.enqueue(new Uint8Array([0x01])); + c.enqueue(new Uint8Array([0x02])); + c.close(); + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + + reader2.closed.then(t.unreached_func('branch2 should not be closed')); + + { + const result = await reader1.read(new Uint8Array(1)); + assert_equals(result.done, false, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value'); + } + + { + const result = await reader1.read(new Uint8Array(1)); + assert_equals(result.done, false, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0x02]), 'value'); + } + + { + const result = await reader1.read(new Uint8Array(1)); + assert_equals(result.done, true, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0]).subarray(0, 0), 'value'); + } + + { + const result = await reader2.read(new Uint8Array(1)); + assert_equals(result.done, false, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value'); + } + + await reader1.closed; + +}, 'ReadableStream teeing with byte source: should be able to read one branch to the end without affecting the other'); + +promise_test(async () => { + + let pullCount = 0; + const enqueuedChunk = new Uint8Array([0x01]); + const rs = new ReadableStream({ + type: 'bytes', + pull(c) { + ++pullCount; + if (pullCount === 1) { + c.enqueue(enqueuedChunk); + } + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader(); + const reader2 = branch2.getReader(); + + const [result1, result2] = await Promise.all([reader1.read(), reader2.read()]); + assert_equals(result1.done, false, 'reader1 done'); + assert_equals(result2.done, false, 'reader2 done'); + + const view1 = result1.value; + const view2 = result2.value; + assert_typed_array_equals(view1, new Uint8Array([0x01]), 'reader1 value'); + assert_typed_array_equals(view2, new Uint8Array([0x01]), 'reader2 value'); + + assert_not_equals(view1.buffer, view2.buffer, 'chunks should have different buffers'); + assert_not_equals(enqueuedChunk.buffer, view1.buffer, 'enqueued chunk and branch1\'s chunk should have different buffers'); + assert_not_equals(enqueuedChunk.buffer, view2.buffer, 'enqueued chunk and branch2\'s chunk should have different buffers'); + +}, 'ReadableStream teeing with byte source: chunks should be cloned for each branch'); + +promise_test(async () => { + + let pullCount = 0; + const rs = new ReadableStream({ + type: 'bytes', + pull(c) { + ++pullCount; + if (pullCount === 1) { + c.byobRequest.view[0] = 0x01; + c.byobRequest.respond(1); + } + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader(); + const buffer = new Uint8Array([42, 42, 42]).buffer; + + { + const result = await reader1.read(new Uint8Array(buffer, 0, 1)); + assert_equals(result.done, false, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0x01, 42, 42]).subarray(0, 1), 'value'); + } + + { + const result = await reader2.read(); + assert_equals(result.done, false, 'done'); + assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value'); + } + +}, 'ReadableStream teeing with byte source: chunks for BYOB requests from branch 1 should be cloned to branch 2'); + +promise_test(async t => { + + const theError = { name: 'boo!' }; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + c.enqueue(new Uint8Array([0x01])); + c.enqueue(new Uint8Array([0x02])); + }, + pull() { + throw theError; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + + { + const result = await reader1.read(new Uint8Array(1)); + assert_equals(result.done, false, 'first read from branch1 should not be done'); + assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'first read from branch1'); + } + + { + const result = await reader1.read(new Uint8Array(1)); + assert_equals(result.done, false, 'second read from branch1 should not be done'); + assert_typed_array_equals(result.value, new Uint8Array([0x02]), 'second read from branch1'); + } + + await promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1))); + await promise_rejects_exactly(t, theError, reader2.read(new Uint8Array(1))); + + await Promise.all([ + promise_rejects_exactly(t, theError, reader1.closed), + promise_rejects_exactly(t, theError, reader2.closed) + ]); + +}, 'ReadableStream teeing with byte source: errors in the source should propagate to both branches'); + +promise_test(async () => { + + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + c.enqueue(new Uint8Array([0x01])); + c.enqueue(new Uint8Array([0x02])); + c.close(); + } + }); + + const [branch1, branch2] = rs.tee(); + branch1.cancel(); + + const [chunks1, chunks2] = await Promise.all([readableStreamToArray(branch1), readableStreamToArray(branch2)]); + assert_array_equals(chunks1, [], 'branch1 should have no chunks'); + assert_equals(chunks2.length, 2, 'branch2 should have two chunks'); + assert_typed_array_equals(chunks2[0], new Uint8Array([0x01]), 'first chunk from branch2'); + assert_typed_array_equals(chunks2[1], new Uint8Array([0x02]), 'second chunk from branch2'); + +}, 'ReadableStream teeing with byte source: canceling branch1 should not impact branch2'); + +promise_test(async () => { + + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + c.enqueue(new Uint8Array([0x01])); + c.enqueue(new Uint8Array([0x02])); + c.close(); + } + }); + + const [branch1, branch2] = rs.tee(); + branch2.cancel(); + + const [chunks1, chunks2] = await Promise.all([readableStreamToArray(branch1), readableStreamToArray(branch2)]); + assert_equals(chunks1.length, 2, 'branch1 should have two chunks'); + assert_typed_array_equals(chunks1[0], new Uint8Array([0x01]), 'first chunk from branch1'); + assert_typed_array_equals(chunks1[1], new Uint8Array([0x02]), 'second chunk from branch1'); + assert_array_equals(chunks2, [], 'branch2 should have no chunks'); + +}, 'ReadableStream teeing with byte source: canceling branch2 should not impact branch1'); + +templatedRSTeeCancel('ReadableStream teeing with byte source', (extras) => { + return new ReadableStream({ type: 'bytes', ...extras }); +}); + +promise_test(async () => { + + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + + const promise = Promise.all([reader1.closed, reader2.closed]); + + controller.close(); + + // The branches are created with HWM 0, so we need to read from at least one of them + // to observe the stream becoming closed. + const read1 = await reader1.read(new Uint8Array(1)); + assert_equals(read1.done, true, 'first read from branch1 should be done'); + + await promise; + +}, 'ReadableStream teeing with byte source: closing the original should close the branches'); + +promise_test(async t => { + + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + + const theError = { name: 'boo!' }; + const promise = Promise.all([ + promise_rejects_exactly(t, theError, reader1.closed), + promise_rejects_exactly(t, theError, reader2.closed) + ]); + + controller.error(theError); + await promise; + +}, 'ReadableStream teeing with byte source: erroring the original should immediately error the branches'); + +promise_test(async t => { + + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader(); + const reader2 = branch2.getReader(); + + const theError = { name: 'boo!' }; + const promise = Promise.all([ + promise_rejects_exactly(t, theError, reader1.read()), + promise_rejects_exactly(t, theError, reader2.read()) + ]); + + controller.error(theError); + await promise; + +}, 'ReadableStream teeing with byte source: erroring the original should error pending reads from default reader'); + +promise_test(async t => { + + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + + const theError = { name: 'boo!' }; + const promise = Promise.all([ + promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1))), + promise_rejects_exactly(t, theError, reader2.read(new Uint8Array(1))) + ]); + + controller.error(theError); + await promise; + +}, 'ReadableStream teeing with byte source: erroring the original should error pending reads from BYOB reader'); + +promise_test(async () => { + + let controller; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + const cancelPromise = reader2.cancel(); + + controller.enqueue(new Uint8Array([0x01])); + + const read1 = await reader1.read(new Uint8Array(1)); + assert_equals(read1.done, false, 'first read() from branch1 should not be done'); + assert_typed_array_equals(read1.value, new Uint8Array([0x01]), 'first read() from branch1'); + + controller.close(); + + const read2 = await reader1.read(new Uint8Array(1)); + assert_equals(read2.done, true, 'second read() from branch1 should be done'); + + await Promise.all([ + reader1.closed, + cancelPromise + ]); + +}, 'ReadableStream teeing with byte source: canceling branch1 should finish when branch2 reads until end of stream'); + +promise_test(async t => { + + let controller; + const theError = { name: 'boo!' }; + const rs = new ReadableStream({ + type: 'bytes', + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader({ mode: 'byob' }); + const cancelPromise = reader2.cancel(); + + controller.error(theError); + + await Promise.all([ + promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1))), + cancelPromise + ]); + +}, 'ReadableStream teeing with byte source: canceling branch1 should finish when original stream errors'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + // Create two branches, each with a HWM of 0. This should result in no chunks being pulled. + rs.tee(); + + await flushAsyncEvents(); + assert_array_equals(rs.events, [], 'pull should not be called'); + +}, 'ReadableStream teeing with byte source: should not pull any chunks if no branches are reading'); + +promise_test(async () => { + + const rs = recordingReadableStream({ + type: 'bytes', + pull(controller) { + controller.enqueue(new Uint8Array([0x01])); + } + }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + await Promise.all([ + reader1.read(new Uint8Array(1)), + reader2.read(new Uint8Array(1)) + ]); + assert_array_equals(rs.events, ['pull'], 'pull should be called once'); + +}, 'ReadableStream teeing with byte source: should only pull enough to fill the emptiest queue'); + +promise_test(async t => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const theError = { name: 'boo!' }; + + rs.controller.error(theError); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + await flushAsyncEvents(); + assert_array_equals(rs.events, [], 'pull should not be called'); + + await Promise.all([ + promise_rejects_exactly(t, theError, reader1.closed), + promise_rejects_exactly(t, theError, reader2.closed) + ]); + +}, 'ReadableStream teeing with byte source: should not pull when original is already errored'); + +for (const branch of [1, 2]) { + promise_test(async t => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const theError = { name: 'boo!' }; + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + await flushAsyncEvents(); + assert_array_equals(rs.events, [], 'pull should not be called'); + + const reader = (branch === 1) ? reader1 : reader2; + const read1 = reader.read(new Uint8Array(1)); + + await flushAsyncEvents(); + assert_array_equals(rs.events, ['pull'], 'pull should be called once'); + + rs.controller.error(theError); + + await Promise.all([ + promise_rejects_exactly(t, theError, read1), + promise_rejects_exactly(t, theError, reader1.closed), + promise_rejects_exactly(t, theError, reader2.closed) + ]); + + await flushAsyncEvents(); + assert_array_equals(rs.events, ['pull'], 'pull should be called once'); + + }, `ReadableStream teeing with byte source: stops pulling when original stream errors while branch ${branch} is reading`); +} + +promise_test(async t => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const theError = { name: 'boo!' }; + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + await flushAsyncEvents(); + assert_array_equals(rs.events, [], 'pull should not be called'); + + const read1 = reader1.read(new Uint8Array(1)); + const read2 = reader2.read(new Uint8Array(1)); + + await flushAsyncEvents(); + assert_array_equals(rs.events, ['pull'], 'pull should be called once'); + + rs.controller.error(theError); + + await Promise.all([ + promise_rejects_exactly(t, theError, read1), + promise_rejects_exactly(t, theError, read2), + promise_rejects_exactly(t, theError, reader1.closed), + promise_rejects_exactly(t, theError, reader2.closed) + ]); + + await flushAsyncEvents(); + assert_array_equals(rs.events, ['pull'], 'pull should be called once'); + +}, 'ReadableStream teeing with byte source: stops pulling when original stream errors while both branches are reading'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + const read2 = reader2.read(new Uint8Array([0x22])); + + const cancel1 = reader1.cancel(); + await flushAsyncEvents(); + const cancel2 = reader2.cancel(); + + const result1 = await read1; + assert_object_equals(result1, { value: undefined, done: true }); + const result2 = await read2; + assert_object_equals(result2, { value: undefined, done: true }); + + await Promise.all([cancel1, cancel2]); + +}, 'ReadableStream teeing with byte source: canceling both branches in sequence with delay'); + +promise_test(async t => { + + const theError = { name: 'boo!' }; + const rs = new ReadableStream({ + type: 'bytes', + cancel() { + throw theError; + } + }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + const read2 = reader2.read(new Uint8Array([0x22])); + + const cancel1 = reader1.cancel(); + await flushAsyncEvents(); + const cancel2 = reader2.cancel(); + + const result1 = await read1; + assert_object_equals(result1, { value: undefined, done: true }); + const result2 = await read2; + assert_object_equals(result2, { value: undefined, done: true }); + + await Promise.all([ + promise_rejects_exactly(t, theError, cancel1), + promise_rejects_exactly(t, theError, cancel2) + ]); + +}, 'ReadableStream teeing with byte source: failing to cancel when canceling both branches in sequence with delay'); + +promise_test(async () => { + + let cancelResolve; + const cancelCalled = new Promise((resolve) => { + cancelResolve = resolve; + }); + const rs = recordingReadableStream({ + type: 'bytes', + cancel() { + cancelResolve(); + } + }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // We are reading into branch1's buffer. + const byobRequest1 = rs.controller.byobRequest; + assert_not_equals(byobRequest1, null); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([0x11]), 'byobRequest1.view'); + + // Cancelling branch1 should not affect the BYOB request. + const cancel1 = reader1.cancel(); + const result1 = await read1; + assert_equals(result1.done, true); + assert_equals(result1.value, undefined); + await flushAsyncEvents(); + const byobRequest2 = rs.controller.byobRequest; + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11]), 'byobRequest2.view'); + + // Cancelling branch1 should invalidate the BYOB request. + const cancel2 = reader2.cancel(); + await cancelCalled; + const byobRequest3 = rs.controller.byobRequest; + assert_equals(byobRequest3, null); + const result2 = await read2; + assert_equals(result2.done, true); + assert_equals(result2.value, undefined); + + await Promise.all([cancel1, cancel2]); + +}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, cancel branch2'); + +promise_test(async () => { + + let cancelResolve; + const cancelCalled = new Promise((resolve) => { + cancelResolve = resolve; + }); + const rs = recordingReadableStream({ + type: 'bytes', + cancel() { + cancelResolve(); + } + }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // We are reading into branch1's buffer. + const byobRequest1 = rs.controller.byobRequest; + assert_not_equals(byobRequest1, null); + assert_typed_array_equals(byobRequest1.view, new Uint8Array([0x11]), 'byobRequest1.view'); + + // Cancelling branch2 should not affect the BYOB request. + const cancel2 = reader2.cancel(); + const result2 = await read2; + assert_equals(result2.done, true); + assert_equals(result2.value, undefined); + await flushAsyncEvents(); + const byobRequest2 = rs.controller.byobRequest; + assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11]), 'byobRequest2.view'); + + // Cancelling branch1 should invalidate the BYOB request. + const cancel1 = reader1.cancel(); + await cancelCalled; + const byobRequest3 = rs.controller.byobRequest; + assert_equals(byobRequest3, null); + const result1 = await read1; + assert_equals(result1.done, true); + assert_equals(result1.value, undefined); + + await Promise.all([cancel1, cancel2]); + +}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, cancel branch1'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // We are reading into branch1's buffer. + assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'first byobRequest.view'); + + // Cancelling branch2 should not affect the BYOB request. + reader2.cancel(); + const result2 = await read2; + assert_equals(result2.done, true); + assert_equals(result2.value, undefined); + await flushAsyncEvents(); + assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'second byobRequest.view'); + + // Respond to the BYOB request. + rs.controller.byobRequest.view[0] = 0x33; + rs.controller.byobRequest.respond(1); + + // branch1 should receive the read chunk. + const result1 = await read1; + assert_equals(result1.done, false); + assert_typed_array_equals(result1.value, new Uint8Array([0x33]), 'first read() from branch1'); + +}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, enqueue to branch1'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // We are reading into branch1's buffer. + assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'first byobRequest.view'); + + // Cancelling branch1 should not affect the BYOB request. + reader1.cancel(); + const result1 = await read1; + assert_equals(result1.done, true); + assert_equals(result1.value, undefined); + await flushAsyncEvents(); + assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'second byobRequest.view'); + + // Respond to the BYOB request. + rs.controller.byobRequest.view[0] = 0x33; + rs.controller.byobRequest.respond(1); + + // branch2 should receive the read chunk. + const result2 = await read2; + assert_equals(result2.done, false); + assert_typed_array_equals(result2.value, new Uint8Array([0x33]), 'first read() from branch2'); + +}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, respond to branch2'); + +promise_test(async () => { + + let pullCount = 0; + const byobRequestDefined = []; + const rs = new ReadableStream({ + type: 'bytes', + pull(c) { + ++pullCount; + byobRequestDefined.push(c.byobRequest !== null); + c.enqueue(new Uint8Array([pullCount])); + } + }); + + const [branch1, _] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + + const result1 = await reader1.read(new Uint8Array([0x11])); + assert_equals(result1.done, false, 'first read should not be done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read'); + assert_equals(pullCount, 1, 'pull() should be called once'); + assert_equals(byobRequestDefined[0], true, 'should have created a BYOB request for first read'); + + reader1.releaseLock(); + const reader2 = branch1.getReader(); + + const result2 = await reader2.read(); + assert_equals(result2.done, false, 'second read should not be done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x2]), 'second read'); + assert_equals(pullCount, 2, 'pull() should be called twice'); + assert_equals(byobRequestDefined[1], false, 'should not have created a BYOB request for second read'); + +}, 'ReadableStream teeing with byte source: pull with BYOB reader, then pull with default reader'); + +promise_test(async () => { + + let pullCount = 0; + const byobRequestDefined = []; + const rs = new ReadableStream({ + type: 'bytes', + pull(c) { + ++pullCount; + byobRequestDefined.push(c.byobRequest !== null); + c.enqueue(new Uint8Array([pullCount])); + } + }); + + const [branch1, _] = rs.tee(); + const reader1 = branch1.getReader(); + + const result1 = await reader1.read(); + assert_equals(result1.done, false, 'first read should not be done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read'); + assert_equals(pullCount, 1, 'pull() should be called once'); + assert_equals(byobRequestDefined[0], false, 'should not have created a BYOB request for first read'); + + reader1.releaseLock(); + const reader2 = branch1.getReader({ mode: 'byob' }); + + const result2 = await reader2.read(new Uint8Array([0x22])); + assert_equals(result2.done, false, 'second read should not be done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x2]), 'second read'); + assert_equals(pullCount, 2, 'pull() should be called twice'); + assert_equals(byobRequestDefined[1], true, 'should have created a BYOB request for second read'); + +}, 'ReadableStream teeing with byte source: pull with default reader, then pull with BYOB reader'); + +promise_test(async () => { + + const rs = recordingReadableStream({ + type: 'bytes' + }); + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + + // Wait for each branch's start() promise to resolve. + await flushAsyncEvents(); + + const read2 = reader2.read(new Uint8Array([0x22])); + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + + // branch2 should provide the BYOB request. + const byobRequest = rs.controller.byobRequest; + assert_typed_array_equals(byobRequest.view, new Uint8Array([0x22]), 'first BYOB request'); + byobRequest.view[0] = 0x01; + byobRequest.respond(1); + + const result1 = await read1; + assert_equals(result1.done, false, 'first read should not be done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read'); + + const result2 = await read2; + assert_equals(result2.done, false, 'second read should not be done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x1]), 'second read'); + +}, 'ReadableStream teeing with byte source: read from branch2, then read from branch1'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader(); + const reader2 = branch2.getReader({ mode: 'byob' }); + await flushAsyncEvents(); + + const read1 = reader1.read(); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // There should be no BYOB request. + assert_equals(rs.controller.byobRequest, null, 'first BYOB request'); + + // Close the stream. + rs.controller.close(); + + const result1 = await read1; + assert_equals(result1.done, true, 'read from branch1 should be done'); + assert_equals(result1.value, undefined, 'read from branch1'); + + // branch2 should get its buffer back. + const result2 = await read2; + assert_equals(result2.done, true, 'read from branch2 should be done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x22]).subarray(0, 0), 'read from branch2'); + +}, 'ReadableStream teeing with byte source: read from branch1 with default reader, then close while branch2 has pending BYOB read'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const [branch1, branch2] = rs.tee(); + const reader1 = branch1.getReader({ mode: 'byob' }); + const reader2 = branch2.getReader(); + await flushAsyncEvents(); + + const read2 = reader2.read(); + const read1 = reader1.read(new Uint8Array([0x11])); + await flushAsyncEvents(); + + // There should be no BYOB request. + assert_equals(rs.controller.byobRequest, null, 'first BYOB request'); + + // Close the stream. + rs.controller.close(); + + const result2 = await read2; + assert_equals(result2.done, true, 'read from branch2 should be done'); + assert_equals(result2.value, undefined, 'read from branch2'); + + // branch1 should get its buffer back. + const result1 = await read1; + assert_equals(result1.done, true, 'read from branch1 should be done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x11]).subarray(0, 0), 'read from branch1'); + +}, 'ReadableStream teeing with byte source: read from branch2 with default reader, then close while branch1 has pending BYOB read'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + await flushAsyncEvents(); + + const read1 = reader1.read(new Uint8Array([0x11])); + const read2 = reader2.read(new Uint8Array([0x22])); + await flushAsyncEvents(); + + // branch1 should provide the BYOB request. + const byobRequest = rs.controller.byobRequest; + assert_typed_array_equals(byobRequest.view, new Uint8Array([0x11]), 'first BYOB request'); + + // Close the stream. + rs.controller.close(); + byobRequest.respond(0); + + // Both branches should get their buffers back. + const result1 = await read1; + assert_equals(result1.done, true, 'first read should be done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x11]).subarray(0, 0), 'first read'); + + const result2 = await read2; + assert_equals(result2.done, true, 'second read should be done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x22]).subarray(0, 0), 'second read'); + +}, 'ReadableStream teeing with byte source: close when both branches have pending BYOB reads'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader()); + const branch1Reads = [reader1.read(), reader1.read()]; + const branch2Reads = [reader2.read(), reader2.read()]; + + await flushAsyncEvents(); + rs.controller.enqueue(new Uint8Array([0x11])); + rs.controller.close(); + + const result1 = await branch1Reads[0]; + assert_equals(result1.done, false, 'first read() from branch1 should be not done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x11]), 'first chunk from branch1 should be correct'); + const result2 = await branch2Reads[0]; + assert_equals(result2.done, false, 'first read() from branch2 should be not done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x11]), 'first chunk from branch2 should be correct'); + + assert_object_equals(await branch1Reads[1], { value: undefined, done: true }, 'second read() from branch1 should be done'); + assert_object_equals(await branch2Reads[1], { value: undefined, done: true }, 'second read() from branch2 should be done'); + +}, 'ReadableStream teeing with byte source: enqueue() and close() while both branches are pulling'); + +promise_test(async () => { + + const rs = recordingReadableStream({ type: 'bytes' }); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' })); + const branch1Reads = [reader1.read(new Uint8Array(1)), reader1.read(new Uint8Array(1))]; + const branch2Reads = [reader2.read(new Uint8Array(1)), reader2.read(new Uint8Array(1))]; + + await flushAsyncEvents(); + rs.controller.byobRequest.view[0] = 0x11; + rs.controller.byobRequest.respond(1); + rs.controller.close(); + + const result1 = await branch1Reads[0]; + assert_equals(result1.done, false, 'first read() from branch1 should be not done'); + assert_typed_array_equals(result1.value, new Uint8Array([0x11]), 'first chunk from branch1 should be correct'); + const result2 = await branch2Reads[0]; + assert_equals(result2.done, false, 'first read() from branch2 should be not done'); + assert_typed_array_equals(result2.value, new Uint8Array([0x11]), 'first chunk from branch2 should be correct'); + + const result3 = await branch1Reads[1]; + assert_equals(result3.done, true, 'second read() from branch1 should be done'); + assert_typed_array_equals(result3.value, new Uint8Array([0]).subarray(0, 0), 'second chunk from branch1 should be correct'); + const result4 = await branch2Reads[1]; + assert_equals(result4.done, true, 'second read() from branch2 should be done'); + assert_typed_array_equals(result4.value, new Uint8Array([0]).subarray(0, 0), 'second chunk from branch2 should be correct'); + +}, 'ReadableStream teeing with byte source: respond() and close() while both branches are pulling'); diff --git a/test/fixtures/wpt/streams/readable-streams/async-iterator.any.js b/test/fixtures/wpt/streams/readable-streams/async-iterator.any.js index 7669a35d9a7a0c..3ccaca17bc1ac7 100644 --- a/test/fixtures/wpt/streams/readable-streams/async-iterator.any.js +++ b/test/fixtures/wpt/streams/readable-streams/async-iterator.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js diff --git a/test/fixtures/wpt/streams/readable-streams/bad-strategies.any.js b/test/fixtures/wpt/streams/readable-streams/bad-strategies.any.js index b795360fb7c7bb..521fbffe3ab479 100644 --- a/test/fixtures/wpt/streams/readable-streams/bad-strategies.any.js +++ b/test/fixtures/wpt/streams/readable-streams/bad-strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; test(() => { diff --git a/test/fixtures/wpt/streams/readable-streams/bad-underlying-sources.any.js b/test/fixtures/wpt/streams/readable-streams/bad-underlying-sources.any.js index 6f59197a49b18b..e9cf4c924930b2 100644 --- a/test/fixtures/wpt/streams/readable-streams/bad-underlying-sources.any.js +++ b/test/fixtures/wpt/streams/readable-streams/bad-underlying-sources.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; diff --git a/test/fixtures/wpt/streams/readable-streams/cancel.any.js b/test/fixtures/wpt/streams/readable-streams/cancel.any.js index c3723a465c9988..800bd994417241 100644 --- a/test/fixtures/wpt/streams/readable-streams/cancel.any.js +++ b/test/fixtures/wpt/streams/readable-streams/cancel.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/rs-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/readable-streams/constructor.any.js b/test/fixtures/wpt/streams/readable-streams/constructor.any.js index dcfd9e9c33861f..608dc48cfa39d7 100644 --- a/test/fixtures/wpt/streams/readable-streams/constructor.any.js +++ b/test/fixtures/wpt/streams/readable-streams/constructor.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const error1 = new Error('error1'); diff --git a/test/fixtures/wpt/streams/readable-streams/count-queuing-strategy-integration.any.js b/test/fixtures/wpt/streams/readable-streams/count-queuing-strategy-integration.any.js index 78a25318b2dd5a..02ac5bae5c2f8a 100644 --- a/test/fixtures/wpt/streams/readable-streams/count-queuing-strategy-integration.any.js +++ b/test/fixtures/wpt/streams/readable-streams/count-queuing-strategy-integration.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; test(() => { diff --git a/test/fixtures/wpt/streams/readable-streams/crashtests/empty.js b/test/fixtures/wpt/streams/readable-streams/crashtests/empty.js new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker-terminate.html b/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker-terminate.html new file mode 100644 index 00000000000000..a75c3c66b688df --- /dev/null +++ b/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker-terminate.html @@ -0,0 +1,10 @@ + + + + diff --git a/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker.js b/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker.js new file mode 100644 index 00000000000000..dd0ab03b55f5f9 --- /dev/null +++ b/test/fixtures/wpt/streams/readable-streams/crashtests/strategy-worker.js @@ -0,0 +1,4 @@ +var b = new CountQueuingStrategy({ highWaterMark: 3 }); + +importScripts("empty.js"); +postMessage("done"); diff --git a/test/fixtures/wpt/streams/readable-streams/cross-realm-crash.window.js b/test/fixtures/wpt/streams/readable-streams/cross-realm-crash.window.js new file mode 100644 index 00000000000000..5fc7ce37a5f6d2 --- /dev/null +++ b/test/fixtures/wpt/streams/readable-streams/cross-realm-crash.window.js @@ -0,0 +1,13 @@ +// This is a repro for a crash bug that existed in Blink. See +// https://crbug.com/1290014. If there's no crash then the test passed. + +test(t => { + const iframeTag = document.createElement('iframe'); + document.body.appendChild(iframeTag); + + const readableStream = new ReadableStream(); + const reader = new iframeTag.contentWindow.ReadableStreamDefaultReader(readableStream); + iframeTag.remove(); + reader.cancel(); + reader.read(); +}, 'should not crash on reading from stream cancelled in destroyed realm'); diff --git a/test/fixtures/wpt/streams/readable-streams/default-reader.any.js b/test/fixtures/wpt/streams/readable-streams/default-reader.any.js index 60c740a8288631..59d7ab2f74db63 100644 --- a/test/fixtures/wpt/streams/readable-streams/default-reader.any.js +++ b/test/fixtures/wpt/streams/readable-streams/default-reader.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js 'use strict'; @@ -512,3 +512,28 @@ promise_test(() => { reader.releaseLock(); }); }, 'controller.close() should clear the list of pending read requests'); + +promise_test(t => { + + let controller; + const rs = new ReadableStream({ + start(c) { + controller = c; + } + }); + + const reader1 = rs.getReader(); + const promise1 = promise_rejects_js(t, TypeError, reader1.read(), 'read() from reader1 should reject when reader1 is released'); + reader1.releaseLock(); + + controller.enqueue('a'); + + const reader2 = rs.getReader(); + const promise2 = reader2.read().then(r => { + assert_object_equals(r, { value: 'a', done: false }, 'read() from reader2 should resolve with enqueued chunk'); + }) + reader2.releaseLock(); + + return Promise.all([promise1, promise2]); + +}, 'Second reader can read chunks after first reader was released with pending read requests'); diff --git a/test/fixtures/wpt/streams/readable-streams/floating-point-total-queue-size.any.js b/test/fixtures/wpt/streams/readable-streams/floating-point-total-queue-size.any.js index 400482a450cad2..50cca3d951a942 100644 --- a/test/fixtures/wpt/streams/readable-streams/floating-point-total-queue-size.any.js +++ b/test/fixtures/wpt/streams/readable-streams/floating-point-total-queue-size.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; // Due to the limitations of floating-point precision, the calculation of desiredSize sometimes gives different answers diff --git a/test/fixtures/wpt/streams/readable-streams/garbage-collection.any.js b/test/fixtures/wpt/streams/readable-streams/garbage-collection.any.js index dad0ad1535a5bf..f7e2d06ae5cdf3 100644 --- a/test/fixtures/wpt/streams/readable-streams/garbage-collection.any.js +++ b/test/fixtures/wpt/streams/readable-streams/garbage-collection.any.js @@ -1,8 +1,8 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js 'use strict'; -promise_test(() => { +promise_test(async () => { let controller; new ReadableStream({ @@ -11,7 +11,7 @@ promise_test(() => { } }); - garbageCollect(); + await garbageCollect(); return delay(50).then(() => { controller.close(); @@ -22,7 +22,7 @@ promise_test(() => { }, 'ReadableStreamController methods should continue working properly when scripts lose their reference to the ' + 'readable stream'); -promise_test(() => { +promise_test(async () => { let controller; @@ -32,13 +32,13 @@ promise_test(() => { } }).getReader().closed; - garbageCollect(); + await garbageCollect(); return delay(50).then(() => controller.close()).then(() => closedPromise); }, 'ReadableStream closed promise should fulfill even if the stream and reader JS references are lost'); -promise_test(t => { +promise_test(async t => { const theError = new Error('boo'); let controller; @@ -49,20 +49,20 @@ promise_test(t => { } }).getReader().closed; - garbageCollect(); + await garbageCollect(); return delay(50).then(() => controller.error(theError)) .then(() => promise_rejects_exactly(t, theError, closedPromise)); }, 'ReadableStream closed promise should reject even if stream and reader JS references are lost'); -promise_test(() => { +promise_test(async () => { const rs = new ReadableStream({}); rs.getReader(); - garbageCollect(); + await garbageCollect(); return delay(50).then(() => assert_throws_js(TypeError, () => rs.getReader(), 'old reader should still be locking the stream even after garbage collection')); diff --git a/test/fixtures/wpt/streams/readable-streams/general.any.js b/test/fixtures/wpt/streams/readable-streams/general.any.js index efe7da74ad6c08..2a32b27943c82f 100644 --- a/test/fixtures/wpt/streams/readable-streams/general.any.js +++ b/test/fixtures/wpt/streams/readable-streams/general.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/rs-utils.js 'use strict'; @@ -630,6 +630,7 @@ promise_test(() => { let pullCalled = 0; let cancelCalled = 0; + /* eslint-disable no-use-before-define */ class Source { start(c) { startCalled++; diff --git a/test/fixtures/wpt/streams/readable-streams/global.html b/test/fixtures/wpt/streams/readable-streams/global.html new file mode 100644 index 00000000000000..08665d318eac88 --- /dev/null +++ b/test/fixtures/wpt/streams/readable-streams/global.html @@ -0,0 +1,162 @@ + + +Ensure Stream objects are created in expected globals. + + + + + + diff --git a/test/fixtures/wpt/streams/readable-streams/patched-global.any.js b/test/fixtures/wpt/streams/readable-streams/patched-global.any.js index d26dc56cfe6953..a64a054a97f1f5 100644 --- a/test/fixtures/wpt/streams/readable-streams/patched-global.any.js +++ b/test/fixtures/wpt/streams/readable-streams/patched-global.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; // Tests which patch the global environment are kept separate to avoid @@ -22,6 +22,7 @@ test(t => { const trappedProperties = ['highWaterMark', 'size', 'start', 'type', 'mode']; for (const property of trappedProperties) { + // eslint-disable-next-line no-extend-native, accessor-pairs Object.defineProperty(Object.prototype, property, { get() { throw new Error(`${property} getter called`); }, configurable: true diff --git a/test/fixtures/wpt/streams/readable-streams/reentrant-strategies.any.js b/test/fixtures/wpt/streams/readable-streams/reentrant-strategies.any.js index a02d08b0acc50a..b4988bc2433fd5 100644 --- a/test/fixtures/wpt/streams/readable-streams/reentrant-strategies.any.js +++ b/test/fixtures/wpt/streams/readable-streams/reentrant-strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js diff --git a/test/fixtures/wpt/streams/readable-streams/tee.any.js b/test/fixtures/wpt/streams/readable-streams/tee.any.js index 761f6e9c3599c1..00397932f4b6e3 100644 --- a/test/fixtures/wpt/streams/readable-streams/tee.any.js +++ b/test/fixtures/wpt/streams/readable-streams/tee.any.js @@ -1,7 +1,8 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js +// META: script=../resources/rs-test-templates.js 'use strict'; test(() => { @@ -161,92 +162,9 @@ promise_test(() => { }, 'ReadableStream teeing: canceling branch2 should not impact branch1'); -promise_test(() => { - - const reason1 = new Error('We\'re wanted men.'); - const reason2 = new Error('I have the death sentence on twelve systems.'); - - let resolve; - const promise = new Promise(r => resolve = r); - const rs = new ReadableStream({ - cancel(reason) { - assert_array_equals(reason, [reason1, reason2], - 'the cancel reason should be an array containing those from the branches'); - resolve(); - } - }); - - const branch = rs.tee(); - const branch1 = branch[0]; - const branch2 = branch[1]; - branch1.cancel(reason1); - branch2.cancel(reason2); - - return promise; - -}, 'ReadableStream teeing: canceling both branches should aggregate the cancel reasons into an array'); - -promise_test(() => { - - const reason1 = new Error('This little one\'s not worth the effort.'); - const reason2 = new Error('Come, let me get you something.'); - - let resolve; - const promise = new Promise(r => resolve = r); - const rs = new ReadableStream({ - cancel(reason) { - assert_array_equals(reason, [reason1, reason2], - 'the cancel reason should be an array containing those from the branches'); - resolve(); - } - }); - - const branch = rs.tee(); - const branch1 = branch[0]; - const branch2 = branch[1]; - return Promise.all([ - branch2.cancel(reason2), - branch1.cancel(reason1), - promise - ]); - -}, 'ReadableStream teeing: canceling both branches in reverse order should aggregate the cancel reasons into an array'); - -promise_test(t => { - - const theError = { name: 'I\'ll be careful.' }; - const rs = new ReadableStream({ - cancel() { - throw theError; - } - }); - - const branch = rs.tee(); - const branch1 = branch[0]; - const branch2 = branch[1]; - - return Promise.all([ - promise_rejects_exactly(t, theError, branch1.cancel()), - promise_rejects_exactly(t, theError, branch2.cancel()) - ]); - -}, 'ReadableStream teeing: failing to cancel the original stream should cause cancel() to reject on branches'); - -promise_test(t => { - - const theError = { name: 'You just watch yourself!' }; - let controller; - const stream = new ReadableStream({ start(c) { controller = c; } }); - const [branch1, branch2] = stream.tee(); - - controller.error(theError); - - return Promise.all([ - promise_rejects_exactly(t, theError, branch1.cancel()), - promise_rejects_exactly(t, theError, branch2.cancel()) - ]); - -}, 'ReadableStream teeing: erroring a teed stream should properly handle canceled branches'); +templatedRSTeeCancel('ReadableStream teeing', (extras) => { + return new ReadableStream({ ...extras }); +}); promise_test(t => { @@ -539,3 +457,23 @@ promise_test(t => { }); }, 'ReadableStreamTee stops pulling when original stream errors while both branches are reading'); + +promise_test(async () => { + + const rs = recordingReadableStream(); + + const [reader1, reader2] = rs.tee().map(branch => branch.getReader()); + const branch1Reads = [reader1.read(), reader1.read()]; + const branch2Reads = [reader2.read(), reader2.read()]; + + await flushAsyncEvents(); + rs.controller.enqueue('a'); + rs.controller.close(); + + assert_object_equals(await branch1Reads[0], { value: 'a', done: false }, 'first chunk from branch1 should be correct'); + assert_object_equals(await branch2Reads[0], { value: 'a', done: false }, 'first chunk from branch2 should be correct'); + + assert_object_equals(await branch1Reads[1], { value: undefined, done: true }, 'second read() from branch1 should be done'); + assert_object_equals(await branch2Reads[1], { value: undefined, done: true }, 'second read() from branch2 should be done'); + +}, 'ReadableStream teeing: enqueue() and close() while both branches are pulling'); diff --git a/test/fixtures/wpt/streams/readable-streams/templated.any.js b/test/fixtures/wpt/streams/readable-streams/templated.any.js index 4d524e69fee19e..ecae3f4d8b129f 100644 --- a/test/fixtures/wpt/streams/readable-streams/templated.any.js +++ b/test/fixtures/wpt/streams/readable-streams/templated.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/rs-test-templates.js 'use strict'; diff --git a/test/fixtures/wpt/streams/resources/recording-streams.js b/test/fixtures/wpt/streams/resources/recording-streams.js index 34d02a143dccdb..661fe512f516c6 100644 --- a/test/fixtures/wpt/streams/resources/recording-streams.js +++ b/test/fixtures/wpt/streams/resources/recording-streams.js @@ -3,6 +3,7 @@ self.recordingReadableStream = (extras = {}, strategy) => { let controllerToCopyOver; const stream = new ReadableStream({ + type: extras.type, start(controller) { controllerToCopyOver = controller; diff --git a/test/fixtures/wpt/streams/resources/rs-test-templates.js b/test/fixtures/wpt/streams/resources/rs-test-templates.js index 700bd9c3ca9f9e..25751c477f5dc8 100644 --- a/test/fixtures/wpt/streams/resources/rs-test-templates.js +++ b/test/fixtures/wpt/streams/resources/rs-test-templates.js @@ -251,34 +251,27 @@ self.templatedRSEmptyReader = (label, factory) => { }, label + ': getReader() again on the stream should fail'); - promise_test(t => { + promise_test(async t => { const streamAndReader = factory(); const stream = streamAndReader.stream; const reader = streamAndReader.reader; - reader.read().then( - t.unreached_func('first read() should not fulfill'), - t.unreached_func('first read() should not reject') - ); - - reader.read().then( - t.unreached_func('second read() should not fulfill'), - t.unreached_func('second read() should not reject') - ); + const read1 = reader.read(); + const read2 = reader.read(); + const closed = reader.closed; - reader.closed.then( - t.unreached_func('closed should not fulfill'), - t.unreached_func('closed should not reject') - ); - - assert_throws_js(TypeError, () => reader.releaseLock(), 'releaseLock should throw a TypeError'); + reader.releaseLock(); - assert_true(stream.locked, 'the stream should still be locked'); + assert_false(stream.locked, 'the stream should be unlocked'); - return delay(500); + await Promise.all([ + promise_rejects_js(t, TypeError, read1, 'first read should reject'), + promise_rejects_js(t, TypeError, read2, 'second read should reject'), + promise_rejects_js(t, TypeError, closed, 'closed should reject') + ]); - }, label + ': releasing the lock with pending read requests should throw but the read requests should stay pending'); + }, label + ': releasing the lock should reject all pending read requests'); promise_test(t => { @@ -636,3 +629,93 @@ self.templatedRSTwoChunksClosedReader = function (label, factory, chunks) { }, label + ': reader\'s closed property always returns the same promise'); }; + +self.templatedRSTeeCancel = (label, factory) => { + test(() => {}, `Running templatedRSTeeCancel with ${label}`); + + promise_test(async () => { + + const reason1 = new Error('We\'re wanted men.'); + const reason2 = new Error('I have the death sentence on twelve systems.'); + + let resolve; + const promise = new Promise(r => resolve = r); + const rs = factory({ + cancel(reason) { + assert_array_equals(reason, [reason1, reason2], + 'the cancel reason should be an array containing those from the branches'); + resolve(); + } + }); + + const [branch1, branch2] = rs.tee(); + await Promise.all([ + branch1.cancel(reason1), + branch2.cancel(reason2), + promise + ]); + + }, `${label}: canceling both branches should aggregate the cancel reasons into an array`); + + promise_test(async () => { + + const reason1 = new Error('This little one\'s not worth the effort.'); + const reason2 = new Error('Come, let me get you something.'); + + let resolve; + const promise = new Promise(r => resolve = r); + const rs = factory({ + cancel(reason) { + assert_array_equals(reason, [reason1, reason2], + 'the cancel reason should be an array containing those from the branches'); + resolve(); + } + }); + + const [branch1, branch2] = rs.tee(); + await Promise.all([ + branch2.cancel(reason2), + branch1.cancel(reason1), + promise + ]); + + }, `${label}: canceling both branches in reverse order should aggregate the cancel reasons into an array`); + + promise_test(async t => { + + const theError = { name: 'I\'ll be careful.' }; + const rs = factory({ + cancel() { + throw theError; + } + }); + + const [branch1, branch2] = rs.tee(); + await Promise.all([ + promise_rejects_exactly(t, theError, branch1.cancel()), + promise_rejects_exactly(t, theError, branch2.cancel()) + ]); + + }, `${label}: failing to cancel the original stream should cause cancel() to reject on branches`); + + promise_test(async t => { + + const theError = { name: 'You just watch yourself!' }; + let controller; + const stream = factory({ + start(c) { + controller = c; + } + }); + + const [branch1, branch2] = stream.tee(); + controller.error(theError); + + await Promise.all([ + promise_rejects_exactly(t, theError, branch1.cancel()), + promise_rejects_exactly(t, theError, branch2.cancel()) + ]); + + }, `${label}: erroring a teed stream should properly handle canceled branches`); + +}; diff --git a/test/fixtures/wpt/streams/resources/test-utils.js b/test/fixtures/wpt/streams/resources/test-utils.js index 97d4261aa0507a..fb34e270ff3718 100644 --- a/test/fixtures/wpt/streams/resources/test-utils.js +++ b/test/fixtures/wpt/streams/resources/test-utils.js @@ -47,8 +47,11 @@ self.constructorThrowsForAll = (constructor, firstArgs) => { 'constructor should throw a TypeError')); }; -self.garbageCollect = () => { - if (self.gc) { +self.garbageCollect = async () => { + if (self.TestUtils?.gc) { + // https://testutils.spec.whatwg.org/#the-testutils-namespace + await TestUtils.gc(); + } else if (self.gc) { // Use --expose_gc for V8 (and Node.js) // to pass this flag at chrome launch use: --js-flags="--expose-gc" // Exposed in SpiderMonkey shell as well @@ -57,6 +60,7 @@ self.garbageCollect = () => { // Present in some WebKit development environments GCController.collect(); } else { + /* eslint-disable no-console */ console.warn('Tests are running without the ability to do manual garbage collection. They will still work, but ' + 'coverage will be suboptimal.'); /* eslint-enable no-console */ @@ -71,3 +75,20 @@ self.delay = ms => new Promise(resolve => step_timeout(resolve, ms)); // Some tests include promise resolutions which may mean the test code takes a couple of event loop visits itself. So go // around an extra 2 times to avoid complicating those tests. self.flushAsyncEvents = () => delay(0).then(() => delay(0)).then(() => delay(0)).then(() => delay(0)); + +self.assert_typed_array_equals = (actual, expected, message) => { + const prefix = message === undefined ? '' : `${message} `; + assert_equals(typeof actual, 'object', `${prefix}type is object`); + assert_equals(actual.constructor, expected.constructor, `${prefix}constructor`); + assert_equals(actual.byteOffset, expected.byteOffset, `${prefix}byteOffset`); + assert_equals(actual.byteLength, expected.byteLength, `${prefix}byteLength`); + assert_equals(actual.buffer.byteLength, expected.buffer.byteLength, `${prefix}buffer.byteLength`); + assert_array_equals([...actual], [...expected], `${prefix}contents`); + assert_array_equals([...new Uint8Array(actual.buffer)], [...new Uint8Array(expected.buffer)], `${prefix}buffer contents`); +}; + +self.makePromiseAndResolveFunc = () => { + let resolve; + const promise = new Promise(r => { resolve = r; }); + return [promise, resolve]; +}; diff --git a/test/fixtures/wpt/streams/transferable/readable-stream.html b/test/fixtures/wpt/streams/transferable/readable-stream.html index 59b57ce6723c10..b1ede4695bf4cd 100644 --- a/test/fixtures/wpt/streams/transferable/readable-stream.html +++ b/test/fixtures/wpt/streams/transferable/readable-stream.html @@ -135,10 +135,23 @@ assert_array_equals(rs.events, ['pull'], 'pull() should have been called'); }, 'the extra queue from transferring is counted in chunks'); +async function transferredReadableStreamWithCancelPromise() { + let resolveCancelCalled; + const cancelCalled = new Promise(resolve => { + resolveCancelCalled = resolve; + }); + const rs = await recordingTransferredReadableStream({ + cancel() { + resolveCancelCalled(); + } + }); + return { rs, cancelCalled }; +} + promise_test(async () => { - const rs = await recordingTransferredReadableStream(); + const { rs, cancelCalled } = await transferredReadableStreamWithCancelPromise(); rs.cancel('message'); - await delay(0); + await cancelCalled; assert_array_equals(rs.events, ['pull', 'cancel', 'message'], 'cancel() should have been called'); const reader = rs.getReader(); @@ -147,15 +160,7 @@ }, 'cancel should be propagated to the original'); promise_test(async () => { - let resolveCancelCalled; - const cancelCalled = new Promise(resolve => { - resolveCancelCalled = resolve; - }); - const rs = await recordingTransferredReadableStream({ - cancel() { - resolveCancelCalled(); - } - }); + const { rs, cancelCalled } = await transferredReadableStreamWithCancelPromise(); const reader = rs.getReader(); const readPromise = reader.read(); reader.cancel('done'); diff --git a/test/fixtures/wpt/streams/transferable/transfer-with-messageport.window.js b/test/fixtures/wpt/streams/transferable/transfer-with-messageport.window.js new file mode 100644 index 00000000000000..37f8c9df169607 --- /dev/null +++ b/test/fixtures/wpt/streams/transferable/transfer-with-messageport.window.js @@ -0,0 +1,219 @@ +"use strict"; + +function receiveEventOnce(target, name) { + return new Promise(resolve => { + target.addEventListener( + name, + ev => { + resolve(ev); + }, + { once: true } + ); + }); +} + +async function postAndTestMessageEvent(data, transfer, title) { + postMessage(data, "*", transfer); + const messagePortCount = transfer.filter(i => i instanceof MessagePort) + .length; + const ev = await receiveEventOnce(window, "message"); + assert_equals( + ev.ports.length, + messagePortCount, + `Correct number of ports ${title}` + ); + for (const [i, port] of ev.ports.entries()) { + assert_true( + port instanceof MessagePort, + `ports[${i}] include MessagePort ${title}` + ); + } + for (const [key, value] of Object.entries(data)) { + assert_true( + ev.data[key] instanceof value.constructor, + `data.${key} has correct interface ${value.constructor.name} ${title}` + ); + } +} + +async function transferMessagePortWithOrder1(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { stream, port2: channel.port2 }, + [stream, channel.port2], + `when transferring [${stream.constructor.name}, MessagePort]` + ); +} + +async function transferMessagePortWithOrder2(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { stream, port2: channel.port2 }, + [channel.port2, stream], + `when transferring [MessagePort, ${stream.constructor.name}]` + ); +} + +async function transferMessagePortWithOrder3(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { port1: channel.port1, stream, port2: channel.port2 }, + [channel.port1, stream, channel.port2], + `when transferring [MessagePort, ${stream.constructor.name}, MessagePort]` + ); +} + +async function transferMessagePortWithOrder4(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + {}, + [channel.port1, stream, channel.port2], + `when transferring [MessagePort, ${stream.constructor.name}, MessagePort] but with empty data` + ); +} + +async function transferMessagePortWithOrder5(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { port2: channel.port2, port1: channel.port1, stream }, + [channel.port1, stream, channel.port2], + `when transferring [MessagePort, ${stream.constructor.name}, MessagePort] but with data having different order` + ); +} + +async function transferMessagePortWithOrder6(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { port2: channel.port2, port1: channel.port1 }, + [channel.port1, stream, channel.port2], + `when transferring [MessagePort, ${stream.constructor.name}, MessagePort] but with stream not being in the data` + ); +} + +async function transferMessagePortWithOrder7(stream) { + const channel = new MessageChannel(); + await postAndTestMessageEvent( + { stream }, + [channel.port1, stream, channel.port2], + `when transferring [MessagePort, ${stream.constructor.name}, MessagePort] but with ports not being in the data` + ); +} + +async function transferMessagePortWith(constructor) { + await transferMessagePortWithOrder1(new constructor()); + await transferMessagePortWithOrder2(new constructor()); + await transferMessagePortWithOrder3(new constructor()); +} + +async function advancedTransferMesagePortWith(constructor) { + await transferMessagePortWithOrder4(new constructor()); + await transferMessagePortWithOrder5(new constructor()); + await transferMessagePortWithOrder6(new constructor()); + await transferMessagePortWithOrder7(new constructor()); +} + +async function mixedTransferMessagePortWithOrder1() { + const channel = new MessageChannel(); + const readable = new ReadableStream(); + const writable = new WritableStream(); + const transform = new TransformStream(); + await postAndTestMessageEvent( + { + readable, + writable, + transform, + port1: channel.port1, + port2: channel.port2, + }, + [readable, writable, transform, channel.port1, channel.port2], + `when transferring [ReadableStream, WritableStream, TransformStream, MessagePort, MessagePort]` + ); +} + +async function mixedTransferMessagePortWithOrder2() { + const channel = new MessageChannel(); + const readable = new ReadableStream(); + const writable = new WritableStream(); + const transform = new TransformStream(); + await postAndTestMessageEvent( + { readable, writable, transform }, + [transform, channel.port1, readable, channel.port2, writable], + `when transferring [TransformStream, MessagePort, ReadableStream, MessagePort, WritableStream]` + ); +} + +async function mixedTransferMessagePortWithOrder3() { + const channel = new MessageChannel(); + const readable1 = new ReadableStream(); + const readable2 = new ReadableStream(); + const writable1 = new WritableStream(); + const writable2 = new WritableStream(); + const transform1 = new TransformStream(); + const transform2 = new TransformStream(); + await postAndTestMessageEvent( + { readable1, writable1, transform1, readable2, writable2, transform2 }, + [ + transform2, + channel.port1, + readable1, + channel.port2, + writable2, + readable2, + writable1, + transform1, + ], + `when transferring [TransformStream, MessagePort, ReadableStream, MessagePort, WritableStream, ReadableStream, WritableStream, TransformStream] but with the data having different order` + ); +} + +async function mixedTransferMesagePortWith() { + await mixedTransferMessagePortWithOrder1(); + await mixedTransferMessagePortWithOrder2(); + await mixedTransferMessagePortWithOrder3(); +} + +promise_test(async t => { + await transferMessagePortWith(ReadableStream); +}, "Transferring a MessagePort with a ReadableStream should set `.ports`"); + +promise_test(async t => { + await transferMessagePortWith(WritableStream); +}, "Transferring a MessagePort with a WritableStream should set `.ports`"); + +promise_test(async t => { + await transferMessagePortWith(TransformStream); +}, "Transferring a MessagePort with a TransformStream should set `.ports`"); + +promise_test(async t => { + await transferMessagePortWith(ReadableStream); +}, "Transferring a MessagePort with a ReadableStream should set `.ports`, advanced"); + +promise_test(async t => { + await transferMessagePortWith(WritableStream); +}, "Transferring a MessagePort with a WritableStream should set `.ports`, advanced"); + +promise_test(async t => { + await transferMessagePortWith(TransformStream); +}, "Transferring a MessagePort with a TransformStream should set `.ports`, advanced"); + +promise_test(async t => { + await mixedTransferMesagePortWith(); +}, "Transferring a MessagePort with multiple streams should set `.ports`"); + +test(() => { + assert_throws_dom("DataCloneError", () => + postMessage({ stream: new ReadableStream() }, "*") + ); +}, "ReadableStream must not be serializable"); + +test(() => { + assert_throws_dom("DataCloneError", () => + postMessage({ stream: new WritableStream() }, "*") + ); +}, "WritableStream must not be serializable"); + +test(() => { + assert_throws_dom("DataCloneError", () => + postMessage({ stream: new TransformStream() }, "*") + ); +}, "TransformStream must not be serializable"); diff --git a/test/fixtures/wpt/streams/transferable/transform-stream.html b/test/fixtures/wpt/streams/transferable/transform-stream.html index fbfbfe8fc1347a..355d5d807433d7 100644 --- a/test/fixtures/wpt/streams/transferable/transform-stream.html +++ b/test/fixtures/wpt/streams/transferable/transform-stream.html @@ -66,9 +66,14 @@ controller.close(); } }); + let resolve; + const ready = new Promise(r => resolve = r); let result = ''; const sink = new WritableStream({ write(chunk) { + if (result) { + resolve(); + } result += chunk; } }); @@ -93,8 +98,7 @@ }); postMessage({source, sink, transform1, transform2}, '*', [source, transform1, sink, transform2]); - return promise - .then(() => delay(0)) + return ready .then(() => { assert_equals(result, 'HELLO HELLO THERE THERE ', 'transforms should have been applied'); diff --git a/test/fixtures/wpt/streams/transferable/window.html b/test/fixtures/wpt/streams/transferable/window.html index beaf548fe641c5..11c868356b69a1 100644 --- a/test/fixtures/wpt/streams/transferable/window.html +++ b/test/fixtures/wpt/streams/transferable/window.html @@ -16,15 +16,10 @@ promise_test(t => { const orig = createOriginalReadableStream(); - const promise = new Promise(resolve => { - window.addEventListener('message', msg => { - const port = msg.data; - resolve(testMessageEvent(port)); - port.start(); - }, {once: true}); - }); const mc = new MessageChannel(); - postMessage(mc.port1, '*', [mc.port1]); + const promise = testMessageEvent(mc.port1); + mc.port1.start(); + mc.port2.postMessage(orig, [orig]); mc.port2.close(); assert_true(orig.locked, 'the original stream should be locked'); diff --git a/test/fixtures/wpt/streams/transferable/writable-stream.html b/test/fixtures/wpt/streams/transferable/writable-stream.html index adc6f457c27e87..7e25dad94d4cfd 100644 --- a/test/fixtures/wpt/streams/transferable/writable-stream.html +++ b/test/fixtures/wpt/streams/transferable/writable-stream.html @@ -87,9 +87,11 @@ }, 'effective queue size of a transferred writable should be 2'); promise_test(async () => { + const [writeCalled, resolveWriteCalled] = makePromiseAndResolveFunc(); let resolveWrite; const orig = new WritableStream({ write() { + resolveWriteCalled(); return new Promise(resolve => { resolveWrite = resolve; }); @@ -99,35 +101,43 @@ const writer = transferred.getWriter(); await writer.write('a'); let writeDone = false; - writer.write('b').then(() => { + const writePromise = writer.write('b').then(() => { writeDone = true; }); - await flushAsyncEvents(); + await writeCalled; assert_false(writeDone, 'second write should not have resolved yet'); resolveWrite(); - await delay(0); - assert_true(writeDone, 'second write should have resolved'); + await writePromise; // (makes sure this resolves) }, 'second write should wait for first underlying write to complete'); -promise_test(async t => { - const orig = recordingWritableStream(); +async function transferredWritableStreamWithAbortPromise() { + const [abortCalled, resolveAbortCalled] = makePromiseAndResolveFunc(); + const orig = recordingWritableStream({ + abort() { + resolveAbortCalled(); + } + }); const transferred = await transfer(orig); + return { orig, transferred, abortCalled }; +} + +promise_test(async t => { + const { orig, transferred, abortCalled } = await transferredWritableStreamWithAbortPromise(); transferred.abort('p'); - await delay(0); + await abortCalled; assert_array_equals(orig.events, ['abort', 'p'], 'abort() should have been called'); }, 'abort() should work'); promise_test(async t => { - const orig = recordingWritableStream(); - const transferred = await transfer(orig); + const { orig, transferred, abortCalled } = await transferredWritableStreamWithAbortPromise(); const writer = transferred.getWriter(); // A WritableStream object cannot be cloned. await promise_rejects_dom(t, 'DataCloneError', writer.write(new WritableStream()), 'the write should reject'); await promise_rejects_dom(t, 'DataCloneError', writer.closed, 'the stream should be errored'); - await delay(0); + await abortCalled; assert_equals(orig.events.length, 2, 'abort should have been called'); assert_equals(orig.events[0], 'abort', 'first event should be abort'); assert_equals(orig.events[1].name, 'DataCloneError', diff --git a/test/fixtures/wpt/streams/transform-streams/backpressure.any.js b/test/fixtures/wpt/streams/transform-streams/backpressure.any.js index 64c9d0930ed2f2..6befba41b79542 100644 --- a/test/fixtures/wpt/streams/transform-streams/backpressure.any.js +++ b/test/fixtures/wpt/streams/transform-streams/backpressure.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/test-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/transform-streams/errors.any.js b/test/fixtures/wpt/streams/transform-streams/errors.any.js index ba26b32b75a6a4..0cca4c75479d6d 100644 --- a/test/fixtures/wpt/streams/transform-streams/errors.any.js +++ b/test/fixtures/wpt/streams/transform-streams/errors.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/transform-streams/flush.any.js b/test/fixtures/wpt/streams/transform-streams/flush.any.js index dc40532957b14b..9287f6f5eb78eb 100644 --- a/test/fixtures/wpt/streams/transform-streams/flush.any.js +++ b/test/fixtures/wpt/streams/transform-streams/flush.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/transform-streams/general.any.js b/test/fixtures/wpt/streams/transform-streams/general.any.js index d4f2a1d5a29cf6..c95691f7bf49df 100644 --- a/test/fixtures/wpt/streams/transform-streams/general.any.js +++ b/test/fixtures/wpt/streams/transform-streams/general.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/rs-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/transform-streams/lipfuzz.any.js b/test/fixtures/wpt/streams/transform-streams/lipfuzz.any.js index c8c3803c6dfb4b..f9f148aaf1c6a4 100644 --- a/test/fixtures/wpt/streams/transform-streams/lipfuzz.any.js +++ b/test/fixtures/wpt/streams/transform-streams/lipfuzz.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; class LipFuzzTransformer { diff --git a/test/fixtures/wpt/streams/transform-streams/patched-global.any.js b/test/fixtures/wpt/streams/transform-streams/patched-global.any.js index 5bce0cb41ce437..2d04e3b948b324 100644 --- a/test/fixtures/wpt/streams/transform-streams/patched-global.any.js +++ b/test/fixtures/wpt/streams/transform-streams/patched-global.any.js @@ -1,15 +1,17 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; // Tests which patch the global environment are kept separate to avoid // interfering with other tests. test(t => { + // eslint-disable-next-line no-extend-native, accessor-pairs Object.defineProperty(Object.prototype, 'highWaterMark', { set() { throw new Error('highWaterMark setter called'); }, configurable: true }); + // eslint-disable-next-line no-extend-native, accessor-pairs Object.defineProperty(Object.prototype, 'size', { set() { throw new Error('size setter called'); }, configurable: true diff --git a/test/fixtures/wpt/streams/transform-streams/properties.any.js b/test/fixtures/wpt/streams/transform-streams/properties.any.js index f2ac482e0de223..02981b8bc76a5f 100644 --- a/test/fixtures/wpt/streams/transform-streams/properties.any.js +++ b/test/fixtures/wpt/streams/transform-streams/properties.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const transformerMethods = { diff --git a/test/fixtures/wpt/streams/transform-streams/reentrant-strategies.any.js b/test/fixtures/wpt/streams/transform-streams/reentrant-strategies.any.js index 31e53949f3c26e..fc2f91886659f6 100644 --- a/test/fixtures/wpt/streams/transform-streams/reentrant-strategies.any.js +++ b/test/fixtures/wpt/streams/transform-streams/reentrant-strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/rs-utils.js // META: script=../resources/test-utils.js diff --git a/test/fixtures/wpt/streams/transform-streams/strategies.any.js b/test/fixtures/wpt/streams/transform-streams/strategies.any.js index d465d31ab09736..94055ad99dc94b 100644 --- a/test/fixtures/wpt/streams/transform-streams/strategies.any.js +++ b/test/fixtures/wpt/streams/transform-streams/strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/test-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/transform-streams/terminate.any.js b/test/fixtures/wpt/streams/transform-streams/terminate.any.js index 8cb10679348b50..670006366db2af 100644 --- a/test/fixtures/wpt/streams/transform-streams/terminate.any.js +++ b/test/fixtures/wpt/streams/transform-streams/terminate.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/recording-streams.js // META: script=../resources/test-utils.js 'use strict'; diff --git a/test/fixtures/wpt/streams/writable-streams/aborting.any.js b/test/fixtures/wpt/streams/writable-streams/aborting.any.js index ab154a705ed0e9..e016cd191b876f 100644 --- a/test/fixtures/wpt/streams/writable-streams/aborting.any.js +++ b/test/fixtures/wpt/streams/writable-streams/aborting.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; @@ -1384,10 +1384,10 @@ test(t => { assert_true(ctrl.signal instanceof AbortSignal); assert_false(ctrl.signal.aborted); - assert_equals(ctrl.abortReason, undefined); + assert_equals(ctrl.signal.reason, undefined, 'signal.reason before abort'); ws.abort(e); assert_true(ctrl.signal.aborted); - assert_equals(ctrl.abortReason, e); + assert_equals(ctrl.signal.reason, e); }, 'WritableStreamDefaultController.signal'); promise_test(async t => { @@ -1405,10 +1405,11 @@ promise_test(async t => { await called; assert_false(ctrl.signal.aborted); - assert_equals(ctrl.abortReason, undefined); + assert_equals(ctrl.signal.reason, undefined, 'signal.reason before abort'); writer.abort(); assert_true(ctrl.signal.aborted); - assert_equals(ctrl.abortReason, undefined); + assert_true(ctrl.signal.reason instanceof DOMException, 'signal.reason is a DOMException'); + assert_equals(ctrl.signal.reason.name, 'AbortError', 'signal.reason is an AbortError'); }, 'the abort signal is signalled synchronously - write'); promise_test(async t => { diff --git a/test/fixtures/wpt/streams/writable-streams/bad-strategies.any.js b/test/fixtures/wpt/streams/writable-streams/bad-strategies.any.js index b180bae57c0585..63fa443065ee41 100644 --- a/test/fixtures/wpt/streams/writable-streams/bad-strategies.any.js +++ b/test/fixtures/wpt/streams/writable-streams/bad-strategies.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const error1 = new Error('a unique string'); diff --git a/test/fixtures/wpt/streams/writable-streams/bad-underlying-sinks.any.js b/test/fixtures/wpt/streams/writable-streams/bad-underlying-sinks.any.js index 0bfc036246a870..d0b3467978ea05 100644 --- a/test/fixtures/wpt/streams/writable-streams/bad-underlying-sinks.any.js +++ b/test/fixtures/wpt/streams/writable-streams/bad-underlying-sinks.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/writable-streams/byte-length-queuing-strategy.any.js b/test/fixtures/wpt/streams/writable-streams/byte-length-queuing-strategy.any.js index 9a61dd7cc69787..ce1962e8917f32 100644 --- a/test/fixtures/wpt/streams/writable-streams/byte-length-queuing-strategy.any.js +++ b/test/fixtures/wpt/streams/writable-streams/byte-length-queuing-strategy.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; promise_test(t => { diff --git a/test/fixtures/wpt/streams/writable-streams/close.any.js b/test/fixtures/wpt/streams/writable-streams/close.any.js index cf997ed84cdcac..88855a92efd550 100644 --- a/test/fixtures/wpt/streams/writable-streams/close.any.js +++ b/test/fixtures/wpt/streams/writable-streams/close.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/writable-streams/constructor.any.js b/test/fixtures/wpt/streams/writable-streams/constructor.any.js index 75eed2a993fe5e..eaac90e48b8f86 100644 --- a/test/fixtures/wpt/streams/writable-streams/constructor.any.js +++ b/test/fixtures/wpt/streams/writable-streams/constructor.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const error1 = new Error('error1'); diff --git a/test/fixtures/wpt/streams/writable-streams/count-queuing-strategy.any.js b/test/fixtures/wpt/streams/writable-streams/count-queuing-strategy.any.js index 30edb3eb315c62..064e16e81506f1 100644 --- a/test/fixtures/wpt/streams/writable-streams/count-queuing-strategy.any.js +++ b/test/fixtures/wpt/streams/writable-streams/count-queuing-strategy.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; test(() => { diff --git a/test/fixtures/wpt/streams/writable-streams/error.any.js b/test/fixtures/wpt/streams/writable-streams/error.any.js index be986fccc6eac6..faf3fdd9521430 100644 --- a/test/fixtures/wpt/streams/writable-streams/error.any.js +++ b/test/fixtures/wpt/streams/writable-streams/error.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const error1 = new Error('error1'); diff --git a/test/fixtures/wpt/streams/writable-streams/floating-point-total-queue-size.any.js b/test/fixtures/wpt/streams/writable-streams/floating-point-total-queue-size.any.js index 8e77ba0bb31185..bd34cc53a69579 100644 --- a/test/fixtures/wpt/streams/writable-streams/floating-point-total-queue-size.any.js +++ b/test/fixtures/wpt/streams/writable-streams/floating-point-total-queue-size.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; // Due to the limitations of floating-point precision, the calculation of desiredSize sometimes gives different answers diff --git a/test/fixtures/wpt/streams/writable-streams/general.any.js b/test/fixtures/wpt/streams/writable-streams/general.any.js index fdd10b29aa0ebc..cede7fd0845b74 100644 --- a/test/fixtures/wpt/streams/writable-streams/general.any.js +++ b/test/fixtures/wpt/streams/writable-streams/general.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; test(() => { diff --git a/test/fixtures/wpt/streams/writable-streams/properties.any.js b/test/fixtures/wpt/streams/writable-streams/properties.any.js index 0f7f876d8b6fc4..c95bd7d0c080ba 100644 --- a/test/fixtures/wpt/streams/writable-streams/properties.any.js +++ b/test/fixtures/wpt/streams/writable-streams/properties.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker 'use strict'; const sinkMethods = { diff --git a/test/fixtures/wpt/streams/writable-streams/reentrant-strategy.any.js b/test/fixtures/wpt/streams/writable-streams/reentrant-strategy.any.js index afde413b4252d1..eb05cc068043ea 100644 --- a/test/fixtures/wpt/streams/writable-streams/reentrant-strategy.any.js +++ b/test/fixtures/wpt/streams/writable-streams/reentrant-strategy.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/writable-streams/start.any.js b/test/fixtures/wpt/streams/writable-streams/start.any.js index 02b5f2a387625a..82d869430dd700 100644 --- a/test/fixtures/wpt/streams/writable-streams/start.any.js +++ b/test/fixtures/wpt/streams/writable-streams/start.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/streams/writable-streams/write.any.js b/test/fixtures/wpt/streams/writable-streams/write.any.js index e3defa834820d4..f0246f6cad39fd 100644 --- a/test/fixtures/wpt/streams/writable-streams/write.any.js +++ b/test/fixtures/wpt/streams/writable-streams/write.any.js @@ -1,4 +1,4 @@ -// META: global=window,worker,jsshell +// META: global=window,worker // META: script=../resources/test-utils.js // META: script=../resources/recording-streams.js 'use strict'; diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index ca900307b7a597..8c82af0b88c42c 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -56,7 +56,7 @@ "path": "resources" }, "streams": { - "commit": "8f60d9443949c323522a2009518d54d5d6ab5541", + "commit": "9e5ef42bd34b5b19b76d0d4cb19012e52c222664", "path": "streams" }, "url": { diff --git a/test/wpt/status/streams.json b/test/wpt/status/streams.json index e6fd391a620885..bbda0bacc1a94a 100644 --- a/test/wpt/status/streams.json +++ b/test/wpt/status/streams.json @@ -1,9 +1,32 @@ { + "piping/abort.any.js": { + "fail": { + "expected": [ + "(reason: 'null') all the error objects should be the same object", + "(reason: 'undefined') all the error objects should be the same object", + "(reason: 'error1: error1') all the error objects should be the same object", + "(reason: 'null') abort should prevent further reads", + "(reason: 'undefined') abort should prevent further reads", + "(reason: 'error1: error1') abort should prevent further reads", + "(reason: 'null') all pending writes should complete on abort", + "(reason: 'undefined') all pending writes should complete on abort", + "(reason: 'error1: error1') all pending writes should complete on abort", + "pipeTo on a teed readable byte stream should only be aborted when both branches are aborted" + ] + } + }, "queuing-strategies-size-function-per-global.window.js": { "skip": "Browser-specific test" }, - "transferable/deserialize-error.window.js": { - "skip": "Browser-specific test" + "queuing-strategies.any.js": { + "fail": { + "expected": [ + "CountQueuingStrategy: size should not have a prototype property", + "ByteLengthQueuingStrategy: size should not have a prototype property", + "CountQueuingStrategy: size should not be a constructor", + "ByteLengthQueuingStrategy: size should not be a constructor" + ] + } }, "readable-byte-streams/bad-buffers-and-views.any.js": { "fail": { @@ -12,5 +35,89 @@ "ReadableStream with byte source: respondWithNewView() throws if the supplied view's buffer is zero-length (in the readable state)" ] } + }, + "readable-byte-streams/general.any.js": { + "fail": { + "expected": [ + "ReadableStream with byte source: releaseLock() on ReadableStreamDefaultReader must reject pending read()", + "ReadableStream with byte source: releaseLock() on ReadableStreamBYOBReader must reject pending read()", + "pull() resolving should not resolve read()", + "ReadableStream with byte source: enqueue() discards auto-allocated BYOB request", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, respond()", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader with 1 element Uint16Array, respond(1)", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader with 2 element Uint8Array, respond(3)", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, respondWithNewView()", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, enqueue()", + "ReadableStream with byte source: releaseLock() with pending read(view), read(view) on second reader, close(), respond(0)", + "ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read() on second reader, respond()", + "ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read() on second reader, enqueue()", + "ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read(view) on second reader, respond()", + "ReadableStream with byte source: autoAllocateChunkSize, releaseLock() with pending read(), read(view) on second reader, enqueue()", + "ReadableStream with byte source: read(view) with 1 element Uint16Array, respond(1), releaseLock(), read(view) on second reader with 1 element Uint16Array, respond(1)", + "ReadableStream with byte source: read(view) with 1 element Uint16Array, respond(1), releaseLock(), read() on second reader, enqueue()" + ] + } + }, + "readable-byte-streams/tee.any.js": { + "fail": { + "expected": [ + "ReadableStream teeing with byte source: should be able to read one branch to the end without affecting the other", + "ReadableStream teeing with byte source: chunks should be cloned for each branch", + "ReadableStream teeing with byte source: chunks for BYOB requests from branch 1 should be cloned to branch 2", + "ReadableStream teeing with byte source: errors in the source should propagate to both branches", + "ReadableStream teeing with byte source: closing the original should close the branches", + "ReadableStream teeing with byte source: erroring the original should immediately error the branches", + "ReadableStream teeing with byte source: erroring the original should error pending reads from BYOB reader", + "ReadableStream teeing with byte source: canceling branch1 should finish when branch2 reads until end of stream", + "ReadableStream teeing with byte source: canceling branch1 should finish when original stream errors", + "ReadableStream teeing with byte source: should not pull any chunks if no branches are reading", + "ReadableStream teeing with byte source: should only pull enough to fill the emptiest queue", + "ReadableStream teeing with byte source: should not pull when original is already errored", + "ReadableStream teeing with byte source: stops pulling when original stream errors while branch 1 is reading", + "ReadableStream teeing with byte source: stops pulling when original stream errors while branch 2 is reading", + "ReadableStream teeing with byte source: stops pulling when original stream errors while both branches are reading", + "ReadableStream teeing with byte source: canceling both branches in sequence with delay", + "ReadableStream teeing with byte source: failing to cancel when canceling both branches in sequence with delay", + "ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, cancel branch2", + "ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, cancel branch1", + "ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, enqueue to branch1", + "ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, respond to branch2", + "ReadableStream teeing with byte source: pull with BYOB reader, then pull with default reader", + "ReadableStream teeing with byte source: pull with default reader, then pull with BYOB reader", + "ReadableStream teeing with byte source: read from branch2, then read from branch1", + "ReadableStream teeing with byte source: read from branch1 with default reader, then close while branch2 has pending BYOB read", + "ReadableStream teeing with byte source: read from branch2 with default reader, then close while branch1 has pending BYOB read", + "ReadableStream teeing with byte source: close when both branches have pending BYOB reads", + "ReadableStream teeing with byte source: respond() and close() while both branches are pulling" + ] + } + }, + "readable-streams/cross-realm-crash.window.js": { + "skip": "Browser-specific test" + }, + "readable-streams/default-reader.any.js": { + "fail": { + "expected": [ + "Second reader can read chunks after first reader was released with pending read requests" + ] + } + }, + "readable-streams/templated.any.js": { + "fail": { + "expected": [ + "ReadableStream (empty) reader: releasing the lock should reject all pending read requests" + ] + } + }, + "transferable/deserialize-error.window.js": { + "skip": "Browser-specific test" + }, + "transferable/transfer-with-messageport.window.js": { + "skip": "Browser-specific test" + }, + "writable-streams/aborting.any.js": { + "fail": { + "expected": ["WritableStreamDefaultController.signal"] + } } }