diff --git a/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js b/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js index 8b9db25f4a800..b310271291e2e 100644 --- a/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js +++ b/packages/react-dom/src/server/ReactDOMFizzServerBrowser.js @@ -18,6 +18,7 @@ import { type Options = { signal?: AbortSignal, + progressiveChunkSize?: number, }; function renderToReadableStream( @@ -35,7 +36,11 @@ function renderToReadableStream( } return new ReadableStream({ start(controller) { - request = createRequest(children, controller); + request = createRequest( + children, + controller, + options ? options.progressiveChunkSize : undefined, + ); startWork(request); }, pull(controller) { diff --git a/packages/react-dom/src/server/ReactDOMFizzServerNode.js b/packages/react-dom/src/server/ReactDOMFizzServerNode.js index e79cbf0986912..1bc2506927dbb 100644 --- a/packages/react-dom/src/server/ReactDOMFizzServerNode.js +++ b/packages/react-dom/src/server/ReactDOMFizzServerNode.js @@ -21,6 +21,10 @@ function createDrainHandler(destination, request) { return () => startFlowing(request); } +type Options = { + progressiveChunkSize?: number, +}; + type Controls = { // Cancel any pending I/O and put anything remaining into // client rendered mode. @@ -30,8 +34,13 @@ type Controls = { function pipeToNodeWritable( children: ReactNodeList, destination: Writable, + options?: Options, ): Controls { - const request = createRequest(children, destination); + const request = createRequest( + children, + destination, + options ? options.progressiveChunkSize : undefined, + ); let hasStartedFlowing = false; startWork(request); return { diff --git a/packages/react-noop-renderer/src/ReactNoopServer.js b/packages/react-noop-renderer/src/ReactNoopServer.js index 9e0c712661146..99392ebed6db5 100644 --- a/packages/react-noop-renderer/src/ReactNoopServer.js +++ b/packages/react-noop-renderer/src/ReactNoopServer.js @@ -210,7 +210,11 @@ const ReactNoopServer = ReactFizzServer({ }, }); -function render(children: React$Element): Destination { +type Options = { + progressiveChunkSize?: number, +}; + +function render(children: React$Element, options?: Options): Destination { const destination: Destination = { root: null, placeholders: new Map(), @@ -220,7 +224,11 @@ function render(children: React$Element): Destination { ReactNoopServer.abort(request); }, }; - const request = ReactNoopServer.createRequest(children, destination); + const request = ReactNoopServer.createRequest( + children, + destination, + options ? options.progressiveChunkSize : undefined, + ); ReactNoopServer.startWork(request); ReactNoopServer.startFlowing(request); return destination; diff --git a/packages/react-server/src/ReactFizzServer.js b/packages/react-server/src/ReactFizzServer.js index f03da08ab3e30..54049da40b39f 100644 --- a/packages/react-server/src/ReactFizzServer.js +++ b/packages/react-server/src/ReactFizzServer.js @@ -99,7 +99,7 @@ const CLOSED = 2; type Request = { +destination: Destination, +responseState: ResponseState, - +maxBoundarySize: number, + +progressiveChunkSize: number, status: 0 | 1 | 2, nextSegmentId: number, allPendingWork: number, // when it reaches zero, we can close the connection. @@ -113,16 +113,34 @@ type Request = { partialBoundaries: Array, // Partially completed boundaries that can flush its segments early. }; +// This is a default heuristic for how to split up the HTML content into progressive +// loading. Our goal is to be able to display additional new content about every 500ms. +// Faster than that is unnecessary and should be throttled on the client. It also +// adds unnecessary overhead to do more splits. We don't know if it's a higher or lower +// end device but higher end suffer less from the overhead than lower end does from +// not getting small enough pieces. We error on the side of low end. +// We base this on low end 3G speeds which is about 500kbits per second. We assume +// that there can be a reasonable drop off from max bandwidth which leaves you with +// as little as 80%. We can receive half of that each 500ms - at best. In practice, +// a little bandwidth is lost to processing and contention - e.g. CSS and images that +// are downloaded along with the main content. So we estimate about half of that to be +// the lower end throughput. In other words, we expect that you can at least show +// about 12.5kb of content per 500ms. Not counting starting latency for the first +// paint. +// 500 * 1024 / 8 * .8 * 0.5 / 2 +const DEFAULT_PROGRESSIVE_CHUNK_SIZE = 12800; + export function createRequest( children: ReactNodeList, destination: Destination, + progressiveChunkSize: number = DEFAULT_PROGRESSIVE_CHUNK_SIZE, ): Request { const pingedWork = []; const abortSet: Set = new Set(); const request = { destination, responseState: createResponseState(), - maxBoundarySize: 1024, + progressiveChunkSize, status: BUFFERING, nextSegmentId: 0, allPendingWork: 0, @@ -642,7 +660,7 @@ function flushSegment( flushSubtree(request, destination, segment); return writeEndSuspenseBoundary(destination); - } else if (boundary.byteSize > request.maxBoundarySize) { + } else if (boundary.byteSize > request.progressiveChunkSize) { // This boundary is large and will be emitted separately so that we can progressively show // other content. We add it to the queue during the flush because we have to ensure that // the parent flushes first so that there's something to inject it into.