Skip to content

Commit

Permalink
Retain in-progress caches on the root, per lane
Browse files Browse the repository at this point in the history
All the data that loaded as a result of a single transition/update
should share the same cache. This includes nested content that gets
progressively "filled in" after the initial shell is displayed.

If the shell itself were wrapped in a Cache boundary, such that the
cache can commit with suspending, then this is easy: once the boundary
mounts, the cache is attached the React tree.

The tricky part is when the shell does not include a cache boundary. In
the naive approach, since the cache is not part of the initial tree, it
does not get retained; during the retry, a fresh cache is created,
leading to duplicate requests and possibly an infinite loop as requests
are endlessly created then discarded.

This is the essential problem we faced several years ago when building
Simple Cache Provider (later the react-cache package).

Our solution is to retain in-flight caches on the root, associated by
lane. The cache cleared from the root once all of the lanes that depend
on it finish rendering.

Because progressively rendering nested boundaries ("retry" updates) uses
a different lane from the update that spawned it, we must take extra
care to transfer the cache to the new lane when scheduling the retry.
  • Loading branch information
acdlite committed Dec 14, 2020
1 parent 299405f commit f2e24bd
Show file tree
Hide file tree
Showing 10 changed files with 560 additions and 61 deletions.
30 changes: 23 additions & 7 deletions packages/react-reconciler/src/ReactFiberCommitWork.new.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import type {FunctionComponentUpdateQueue} from './ReactFiberHooks.new';
import type {Wakeable} from 'shared/ReactTypes';
import type {ReactPriorityLevel} from './ReactInternalTypes';
import type {OffscreenState} from './ReactFiberOffscreenComponent';
import type {Cache} from './ReactFiberCacheComponent';

import {unstable_wrap as Schedule_tracing_wrap} from 'scheduler/tracing';
import {
Expand Down Expand Up @@ -55,6 +56,7 @@ import {
ScopeComponent,
OffscreenComponent,
LegacyHiddenComponent,
CacheComponent,
} from './ReactWorkTags';
import {
invokeGuardedCallback,
Expand Down Expand Up @@ -1488,7 +1490,11 @@ function commitDeletion(
}
}

function commitWork(current: Fiber | null, finishedWork: Fiber): void {
function commitWork(
current: Fiber | null,
finishedWork: Fiber,
cache: Cache | null,
): void {
if (!supportsMutation) {
switch (finishedWork.tag) {
case FunctionComponent:
Expand Down Expand Up @@ -1524,11 +1530,11 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
}
case SuspenseComponent: {
commitSuspenseComponent(finishedWork);
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case SuspenseListComponent: {
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case HostRoot: {
Expand Down Expand Up @@ -1639,11 +1645,11 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
}
case SuspenseComponent: {
commitSuspenseComponent(finishedWork);
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case SuspenseListComponent: {
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case IncompleteClassComponent: {
Expand Down Expand Up @@ -1672,6 +1678,8 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
hideOrUnhideAllChildren(finishedWork, isHidden);
return;
}
case CacheComponent:
return;
}
invariant(
false,
Expand Down Expand Up @@ -1747,7 +1755,10 @@ function commitSuspenseHydrationCallbacks(
}
}

function attachSuspenseRetryListeners(finishedWork: Fiber) {
function attachSuspenseRetryListeners(
finishedWork: Fiber,
cache: Cache | null,
) {
// If this boundary just timed out, then it will have a set of wakeables.
// For each wakeable, attach a listener so that when it resolves, React
// attempts to re-render the boundary in the primary (pre-timeout) state.
Expand All @@ -1760,7 +1771,12 @@ function attachSuspenseRetryListeners(finishedWork: Fiber) {
}
wakeables.forEach(wakeable => {
// Memoize using the boundary fiber to prevent redundant listeners.
let retry = resolveRetryWakeable.bind(null, finishedWork, wakeable);
let retry = resolveRetryWakeable.bind(
null,
finishedWork,
wakeable,
cache,
);
if (!retryCache.has(wakeable)) {
if (enableSchedulerTracing) {
if (wakeable.__reactDoNotTraceInteractions !== true) {
Expand Down
30 changes: 23 additions & 7 deletions packages/react-reconciler/src/ReactFiberCommitWork.old.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import type {FunctionComponentUpdateQueue} from './ReactFiberHooks.old';
import type {Wakeable} from 'shared/ReactTypes';
import type {ReactPriorityLevel} from './ReactInternalTypes';
import type {OffscreenState} from './ReactFiberOffscreenComponent';
import type {Cache} from './ReactFiberCacheComponent';

import {unstable_wrap as Schedule_tracing_wrap} from 'scheduler/tracing';
import {
Expand Down Expand Up @@ -56,6 +57,7 @@ import {
ScopeComponent,
OffscreenComponent,
LegacyHiddenComponent,
CacheComponent,
} from './ReactWorkTags';
import {
invokeGuardedCallback,
Expand Down Expand Up @@ -1489,7 +1491,11 @@ function commitDeletion(
}
}

function commitWork(current: Fiber | null, finishedWork: Fiber): void {
function commitWork(
current: Fiber | null,
finishedWork: Fiber,
cache: Cache | null,
): void {
if (!supportsMutation) {
switch (finishedWork.tag) {
case FunctionComponent:
Expand Down Expand Up @@ -1525,11 +1531,11 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
}
case SuspenseComponent: {
commitSuspenseComponent(finishedWork);
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case SuspenseListComponent: {
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case HostRoot: {
Expand Down Expand Up @@ -1640,11 +1646,11 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
}
case SuspenseComponent: {
commitSuspenseComponent(finishedWork);
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case SuspenseListComponent: {
attachSuspenseRetryListeners(finishedWork);
attachSuspenseRetryListeners(finishedWork, cache);
return;
}
case IncompleteClassComponent: {
Expand Down Expand Up @@ -1673,6 +1679,8 @@ function commitWork(current: Fiber | null, finishedWork: Fiber): void {
hideOrUnhideAllChildren(finishedWork, isHidden);
return;
}
case CacheComponent:
return;
}
invariant(
false,
Expand Down Expand Up @@ -1748,7 +1756,10 @@ function commitSuspenseHydrationCallbacks(
}
}

function attachSuspenseRetryListeners(finishedWork: Fiber) {
function attachSuspenseRetryListeners(
finishedWork: Fiber,
cache: Cache | null,
) {
// If this boundary just timed out, then it will have a set of wakeables.
// For each wakeable, attach a listener so that when it resolves, React
// attempts to re-render the boundary in the primary (pre-timeout) state.
Expand All @@ -1761,7 +1772,12 @@ function attachSuspenseRetryListeners(finishedWork: Fiber) {
}
wakeables.forEach(wakeable => {
// Memoize using the boundary fiber to prevent redundant listeners.
let retry = resolveRetryWakeable.bind(null, finishedWork, wakeable);
let retry = resolveRetryWakeable.bind(
null,
finishedWork,
wakeable,
cache,
);
if (!retryCache.has(wakeable)) {
if (enableSchedulerTracing) {
if (wakeable.__reactDoNotTraceInteractions !== true) {
Expand Down
146 changes: 132 additions & 14 deletions packages/react-reconciler/src/ReactFiberLane.new.js
Original file line number Diff line number Diff line change
Expand Up @@ -742,6 +742,7 @@ export function markRootFinished(root: FiberRoot, remainingLanes: Lanes) {
const entanglements = root.entanglements;
const eventTimes = root.eventTimes;
const expirationTimes = root.expirationTimes;
const pooledCache = root.pooledCache;

// Clear the lanes that no longer have pending work
let lanes = noLongerPendingLanes;
Expand All @@ -753,15 +754,31 @@ export function markRootFinished(root: FiberRoot, remainingLanes: Lanes) {
eventTimes[index] = NoTimestamp;
expirationTimes[index] = NoTimestamp;

lanes &= ~lane;
}
if (enableCache) {
// Subsequent loads in this lane should use a fresh cache.
// TODO: If a cache is no longer associated with any lane, we should issue
// an abort signal.
const caches = root.caches;
if (caches !== null) {
if (remainingLanes === 0) {
// Fast path. Clear all caches at once.
root.caches = createLaneMap(null);
root.pooledCache = null;
} else {
const cache = caches[index];
if (cache !== null) {
caches[index] = null;
if (cache === pooledCache) {
// The pooled cache is now part of the committed tree. We'll now
// clear it so that the next transition gets a fresh cache.
root.pooledCache = null;
}
}
}
}
}

if (enableCache) {
// Clear the pooled cache so subsequent updates get fresh data.
// TODO: This is very naive and only works if the shell of a cache boundary
// doesn't suspend. The next, key feature is to preserve caches across
// multiple attempts (suspend -> ping) to render a new tree.
root.pooledCache = null;
lanes &= ~lane;
}
}

Expand All @@ -785,12 +802,62 @@ export function requestFreshCache(root: FiberRoot, renderLanes: Lanes): Cache {
return (null: any);
}

// Check if there's a pooled cache. This is really just a batching heuristic
// so that two transitions that happen in a similar timeframe can share the
// same cache.
const pooledCache = root.pooledCache;
if (pooledCache !== null) {
return pooledCache;
// 1. Check if the currently rendering lanes already have a pending cache
// associated with them. If so, use this cache. If for some reason two or
// more lanes have different caches, pick the highest priority one.
// 2. Otherwise, check the root's `pooledCache`. This the oldest cache
// that has not yet been committed. This is really just a batching
// heuristic so that two transitions that happen in a similar timeframe can
// share the same cache. If it exists, use this cache.
// 3. If there's no pooled cache, create a fresh cache. This is now the
// pooled cache.

let caches = root.caches;

// TODO: There should be a primary render lane, and we should use whatever
// cache is associated with that one.
if (caches === null) {
caches = root.caches = createLaneMap(null);
} else {
let lanes = renderLanes;
while (lanes > 0) {
const lane = getHighestPriorityLanes(lanes);
const index = laneToIndex(lane);
const inProgressCache: Cache | null = caches[index];
if (inProgressCache !== null) {
// This render lane already has a cache associated with it. Reuse it.

// If the other render lanes are not already associated with a cache,
// associate them with this one.
let otherRenderLanes = renderLanes & ~lane;
while (otherRenderLanes > 0) {
const otherIndex = pickArbitraryLaneIndex(otherRenderLanes);
const otherLane = 1 << otherIndex;
// We shouldn't overwrite a cache that already exists, since that could
// lead to dropped requests or data, i.e. if the current render suspends.
if (caches[otherIndex] === null) {
caches[otherIndex] = inProgressCache;
}
otherRenderLanes &= ~otherLane;
}
return inProgressCache;
}
lanes &= ~lane;
}
// There are no in-progress caches associated with the current render. Check
// if there's a pooled cache.
const pooledCache = root.pooledCache;
if (pooledCache !== null) {
// Associate the pooled cache with each of the render lanes.
lanes = renderLanes;
while (lanes > 0) {
const index = pickArbitraryLaneIndex(lanes);
const lane = 1 << index;
caches[index] = pooledCache;
lanes &= ~lane;
}
return pooledCache;
}
}

// Create a fresh cache.
Expand All @@ -801,8 +868,59 @@ export function requestFreshCache(root: FiberRoot, renderLanes: Lanes): Cache {

// This is now the pooled cache.
root.pooledCache = freshCache;

// Associate the new cache with each of the render lanes.
let lanes = renderLanes;
while (lanes > 0) {
const index = pickArbitraryLaneIndex(lanes);
const lane = 1 << index;
caches[index] = freshCache;
lanes &= ~lane;
}

return freshCache;
}

export function getWorkInProgressCache(
root: FiberRoot,
renderLanes: Lanes,
): Cache | null {
// TODO: There should be a primary render lane, and we should use whatever
// cache is associated with that one.
const caches = root.caches;
if (caches !== null) {
let lanes = renderLanes;
while (lanes > 0) {
const lane = getHighestPriorityLanes(lanes);
const index = laneToIndex(lane);
const inProgressCache: Cache | null = caches[index];
if (inProgressCache !== null) {
return inProgressCache;
}
lanes &= ~lane;
}
}
return null;
}

export function transferCacheToSpawnedLane(
root: FiberRoot,
cache: Cache,
lane: Lane,
) {
const index = laneToIndex(lane);
let caches = root.caches;
if (caches !== null) {
const existingCache: Cache | null = caches[index];
if (existingCache === null) {
caches[index] = cache;
}
} else {
caches = root.caches = createLaneMap(null);
caches[index] = cache;
}
}

export function getBumpedLaneForHydration(
root: FiberRoot,
renderLanes: Lanes,
Expand Down
Loading

0 comments on commit f2e24bd

Please sign in to comment.