From fab1ce58f372fc185a18fb2399948ffd3ba19ba3 Mon Sep 17 00:00:00 2001 From: Patrick Hulce Date: Wed, 6 Sep 2017 14:31:41 -0700 Subject: [PATCH] feat(predictive-perf): add network estimation (#3187) * feat(predictive-perf): add network estimation * feedback * address feedback * brendan feedback * remove unkown type --- lighthouse-core/audits/predictive-perf.js | 75 ++++- .../dependency-graph/estimator/estimator.js | 306 ++++++++++++++++++ .../estimator/tcp-connection.js | 147 +++++++++ .../computed/dependency-graph/network-node.js | 55 ++++ .../gather/computed/dependency-graph/node.js | 39 ++- .../gather/computed/page-dependency-graph.js | 50 +-- .../test/audits/predictive-perf-test.js | 12 +- .../estimator/estimator-test.js | 104 ++++++ .../estimator/tcp-connection-test.js | 214 ++++++++++++ .../computed/dependency-graph/node-test.js | 26 ++ .../computed/page-dependency-graph-test.js | 31 -- 11 files changed, 997 insertions(+), 62 deletions(-) create mode 100644 lighthouse-core/gather/computed/dependency-graph/estimator/estimator.js create mode 100644 lighthouse-core/gather/computed/dependency-graph/estimator/tcp-connection.js create mode 100644 lighthouse-core/gather/computed/dependency-graph/network-node.js create mode 100644 lighthouse-core/test/gather/computed/dependency-graph/estimator/estimator-test.js create mode 100644 lighthouse-core/test/gather/computed/dependency-graph/estimator/tcp-connection-test.js diff --git a/lighthouse-core/audits/predictive-perf.js b/lighthouse-core/audits/predictive-perf.js index 8068af3ea7b4..38917b9390d5 100644 --- a/lighthouse-core/audits/predictive-perf.js +++ b/lighthouse-core/audits/predictive-perf.js @@ -8,6 +8,7 @@ const Audit = require('./audit'); const Util = require('../report/v2/renderer/util.js'); const PageDependencyGraph = require('../gather/computed/page-dependency-graph.js'); +const Node = require('../gather/computed/dependency-graph/node.js'); // Parameters (in ms) for log-normal CDF scoring. To see the curve: // https://www.desmos.com/calculator/rjp0lbit8y @@ -30,6 +31,52 @@ class PredictivePerf extends Audit { }; } + /** + * @param {!Node} dependencyGraph + * @param {!TraceOfTabArtifact} traceOfTab + * @return {!Node} + */ + static getOptimisticFMPGraph(dependencyGraph, traceOfTab) { + const fmp = traceOfTab.timestamps.firstMeaningfulPaint; + return dependencyGraph.cloneWithRelationships(node => { + if (node.endTime > fmp) return false; + if (node.type !== Node.TYPES.NETWORK) return true; + return node.record.priority() === 'VeryHigh'; // proxy for render-blocking + }); + } + + /** + * @param {!Node} dependencyGraph + * @param {!TraceOfTabArtifact} traceOfTab + * @return {!Node} + */ + static getPessimisticFMPGraph(dependencyGraph, traceOfTab) { + const fmp = traceOfTab.timestamps.firstMeaningfulPaint; + return dependencyGraph.cloneWithRelationships(node => { + return node.endTime <= fmp; + }); + } + + /** + * @param {!Node} dependencyGraph + * @return {!Node} + */ + static getOptimisticTTCIGraph(dependencyGraph) { + return dependencyGraph.cloneWithRelationships(node => { + return node.record._resourceType && node.record._resourceType._name === 'script' || + node.record.priority() === 'High' || + node.record.priority() === 'VeryHigh'; + }); + } + + /** + * @param {!Node} dependencyGraph + * @return {!Node} + */ + static getPessimisticTTCIGraph(dependencyGraph) { + return dependencyGraph; + } + /** * @param {!Artifacts} artifacts * @return {!AuditResult} @@ -37,18 +84,36 @@ class PredictivePerf extends Audit { static audit(artifacts) { const trace = artifacts.traces[Audit.DEFAULT_PASS]; const devtoolsLogs = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; - return artifacts.requestPageDependencyGraph(trace, devtoolsLogs).then(graph => { - const rawValue = PageDependencyGraph.computeGraphDuration(graph); + return Promise.all([ + artifacts.requestPageDependencyGraph(trace, devtoolsLogs), + artifacts.requestTraceOfTab(trace), + ]).then(([graph, traceOfTab]) => { + const graphs = { + optimisticFMP: PredictivePerf.getOptimisticFMPGraph(graph, traceOfTab), + pessimisticFMP: PredictivePerf.getPessimisticFMPGraph(graph, traceOfTab), + optimisticTTCI: PredictivePerf.getOptimisticTTCIGraph(graph, traceOfTab), + pessimisticTTCI: PredictivePerf.getPessimisticTTCIGraph(graph, traceOfTab), + }; + + let sum = 0; + const values = {}; + Object.keys(graphs).forEach(key => { + values[key] = PageDependencyGraph.computeGraphDuration(graphs[key]); + sum += values[key]; + }); + + const meanDuration = sum / Object.keys(values).length; const score = Audit.computeLogNormalScore( - rawValue, + meanDuration, SCORING_POINT_OF_DIMINISHING_RETURNS, SCORING_MEDIAN ); return { score, - rawValue, - displayValue: Util.formatMilliseconds(rawValue), + rawValue: meanDuration, + displayValue: Util.formatMilliseconds(meanDuration), + extendedInfo: {value: values}, }; }); } diff --git a/lighthouse-core/gather/computed/dependency-graph/estimator/estimator.js b/lighthouse-core/gather/computed/dependency-graph/estimator/estimator.js new file mode 100644 index 000000000000..c647a5456f7a --- /dev/null +++ b/lighthouse-core/gather/computed/dependency-graph/estimator/estimator.js @@ -0,0 +1,306 @@ +/** + * @license Copyright 2017 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const Node = require('../node'); +const TcpConnection = require('./tcp-connection'); +const emulation = require('../../../../lib/emulation').settings; + +// see https://cs.chromium.org/search/?q=kDefaultMaxNumDelayableRequestsPerClient&sq=package:chromium&type=cs +const DEFAULT_MAXIMUM_CONCURRENT_REQUESTS = 10; +const DEFAULT_FALLBACK_TTFB = 30; +const DEFAULT_RTT = emulation.TYPICAL_MOBILE_THROTTLING_METRICS.targetLatency; +const DEFAULT_THROUGHPUT = emulation.TYPICAL_MOBILE_THROTTLING_METRICS.targetDownloadThroughput * 8; + +const TLS_SCHEMES = ['https', 'wss']; + +function groupBy(items, keyFunc) { + const grouped = new Map(); + items.forEach(item => { + const key = keyFunc(item); + const group = grouped.get(key) || []; + group.push(item); + grouped.set(key, group); + }); + + return grouped; +} + +class Estimator { + /** + * @param {!Node} graph + * @param {{rtt: number, throughput: number, fallbackTTFB: number, + * maximumConcurrentRequests: number}=} options + */ + constructor(graph, options) { + this._graph = graph; + this._options = Object.assign( + { + rtt: DEFAULT_RTT, + throughput: DEFAULT_THROUGHPUT, + fallbackTTFB: DEFAULT_FALLBACK_TTFB, + maximumConcurrentRequests: DEFAULT_MAXIMUM_CONCURRENT_REQUESTS, + }, + options + ); + + this._rtt = this._options.rtt; + this._throughput = this._options.throughput; + this._fallbackTTFB = this._options.fallbackTTFB; + this._maximumConcurrentRequests = Math.min( + TcpConnection.maximumSaturatedConnections(this._rtt, this._throughput), + this._options.maximumConcurrentRequests + ); + } + + /** + * Computes the time to first byte of a network record. Returns Infinity if not available. + * @param {!WebInspector.NetworkRequest} record + * @return {number} + */ + static getTTFB(record) { + const timing = record._timing; + return (timing && timing.receiveHeadersEnd - timing.sendEnd) || Infinity; + } + + /** + * Initializes this._networkRecords with the array of network records from the graph. + */ + _initializeNetworkRecords() { + this._networkRecords = []; + + this._graph.getRootNode().traverse(node => { + if (node.type === Node.TYPES.NETWORK) { + this._networkRecords.push(node.record); + } + }); + } + + /** + * Initializes this._connections with the map of available TcpConnections by connectionId. + */ + _initializeNetworkConnections() { + const connections = new Map(); + const recordsByConnection = groupBy( + this._networkRecords, + record => record.connectionId + ); + + for (const [connectionId, records] of recordsByConnection.entries()) { + const isTLS = TLS_SCHEMES.includes(records[0].parsedURL.scheme); + + // We'll approximate how much time the server for a connection took to respond after receiving + // the request by computing the minimum TTFB time for requests on that connection. + // TTFB = one way latency + server response time + one way latency + // Even though TTFB is greater than server response time, the RTT is underaccounted for by + // not varying per-server and so the difference roughly evens out. + // TODO(patrickhulce): investigate a way to identify per-server RTT + let estimatedResponseTime = Math.min(...records.map(Estimator.getTTFB)); + + // If we couldn't find a TTFB for the requests, use the fallback TTFB instead. + if (!Number.isFinite(estimatedResponseTime)) { + estimatedResponseTime = this._fallbackTTFB; + } + + const connection = new TcpConnection( + this._rtt, + this._throughput, + estimatedResponseTime, + isTLS + ); + + connections.set(connectionId, connection); + } + + this._connections = connections; + return connections; + } + + /** + * Initializes the various state data structures such as _nodesInQueue and _nodesCompleted. + */ + _initializeAuxiliaryData() { + this._nodeTiming = new Map(); + this._nodesCompleted = new Set(); + this._nodesInProgress = new Set(); + this._nodesInQueue = new Set(); // TODO: replace this with priority queue + this._connectionsInUse = new Set(); + } + + /** + * @param {!Node} node + */ + _enqueueNodeIfPossible(node) { + const dependencies = node.getDependencies(); + if ( + !this._nodesCompleted.has(node) && + dependencies.every(dependency => this._nodesCompleted.has(dependency)) + ) { + this._nodesInQueue.add(node); + } + } + + /** + * @param {!Node} node + * @param {number} totalElapsedTime + */ + _startNodeIfPossible(node, totalElapsedTime) { + if (node.type !== Node.TYPES.NETWORK) return; + + const connection = this._connections.get(node.record.connectionId); + + if ( + this._nodesInProgress.size >= this._maximumConcurrentRequests || + this._connectionsInUse.has(connection) + ) { + return; + } + + this._nodesInQueue.delete(node); + this._nodesInProgress.add(node); + this._nodeTiming.set(node, { + startTime: totalElapsedTime, + timeElapsed: 0, + timeElapsedOvershoot: 0, + bytesDownloaded: 0, + }); + + this._connectionsInUse.add(connection); + } + + /** + * Updates each connection in use with the available throughput based on the number of network requests + * currently in flight. + */ + _updateNetworkCapacity() { + for (const connection of this._connectionsInUse) { + connection.setThroughput(this._throughput / this._nodesInProgress.size); + } + } + + /** + * Estimates the number of milliseconds remaining given current condidtions before the node is complete. + * @param {!Node} node + * @return {number} + */ + _estimateTimeRemaining(node) { + if (node.type !== Node.TYPES.NETWORK) throw new Error('Unsupported'); + + const timingData = this._nodeTiming.get(node); + const connection = this._connections.get(node.record.connectionId); + const calculation = connection.simulateDownloadUntil( + node.record.transferSize - timingData.bytesDownloaded, + timingData.timeElapsed + ); + + const estimate = calculation.timeElapsed + timingData.timeElapsedOvershoot; + timingData.estimatedTimeElapsed = estimate; + return estimate; + } + + /** + * Computes and returns the minimum estimated completion time of the nodes currently in progress. + * @return {number} + */ + _findNextNodeCompletionTime() { + let minimumTime = Infinity; + for (const node of this._nodesInProgress) { + minimumTime = Math.min(minimumTime, this._estimateTimeRemaining(node)); + } + + return minimumTime; + } + + /** + * Given a time period, computes the progress toward completion that the node made durin that time. + * @param {!Node} node + * @param {number} timePeriodLength + * @param {number} totalElapsedTime + */ + _updateProgressMadeInTimePeriod(node, timePeriodLength, totalElapsedTime) { + if (node.type !== Node.TYPES.NETWORK) throw new Error('Unsupported'); + + const timingData = this._nodeTiming.get(node); + const connection = this._connections.get(node.record.connectionId); + const calculation = connection.simulateDownloadUntil( + node.record.transferSize - timingData.bytesDownloaded, + timingData.timeElapsed, + timePeriodLength - timingData.timeElapsedOvershoot + ); + + connection.setCongestionWindow(calculation.congestionWindow); + + if (timingData.estimatedTimeElapsed === timePeriodLength) { + timingData.endTime = totalElapsedTime; + + connection.setWarmed(true); + this._connectionsInUse.delete(connection); + + this._nodesCompleted.add(node); + this._nodesInProgress.delete(node); + + for (const dependent of node.getDependents()) { + this._enqueueNodeIfPossible(dependent); + } + } else { + timingData.timeElapsed += calculation.timeElapsed; + timingData.timeElapsedOvershoot += calculation.timeElapsed - timePeriodLength; + timingData.bytesDownloaded += calculation.bytesDownloaded; + } + } + + /** + * Estimates the time taken to process all of the graph's nodes. + * @return {number} + */ + estimate() { + // initialize all the necessary data containers + this._initializeNetworkRecords(); + this._initializeNetworkConnections(); + this._initializeAuxiliaryData(); + + const nodesInQueue = this._nodesInQueue; + const nodesInProgress = this._nodesInProgress; + + // add root node to queue + nodesInQueue.add(this._graph.getRootNode()); + + let depth = 0; + let totalElapsedTime = 0; + while (nodesInQueue.size || nodesInProgress.size) { + depth++; + + // move all possible queued nodes to in progress + for (const node of nodesInQueue) { + this._startNodeIfPossible(node, totalElapsedTime); + } + + // set the available throughput for all connections based on # inflight + this._updateNetworkCapacity(); + + // find the time that the next node will finish + const minimumTime = this._findNextNodeCompletionTime(); + totalElapsedTime += minimumTime; + + // update how far each node will progress until that point + for (const node of nodesInProgress) { + this._updateProgressMadeInTimePeriod( + node, + minimumTime, + totalElapsedTime + ); + } + + if (depth > 10000) { + throw new Error('Maximum depth exceeded: estimate'); + } + } + + return totalElapsedTime; + } +} + +module.exports = Estimator; diff --git a/lighthouse-core/gather/computed/dependency-graph/estimator/tcp-connection.js b/lighthouse-core/gather/computed/dependency-graph/estimator/tcp-connection.js new file mode 100644 index 000000000000..fe90433e797d --- /dev/null +++ b/lighthouse-core/gather/computed/dependency-graph/estimator/tcp-connection.js @@ -0,0 +1,147 @@ +/** + * @license Copyright 2017 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const INITIAL_CONGESTION_WINDOW = 10; +const TCP_SEGMENT_SIZE = 1460; + +class TcpConnection { + /** + * @param {number} rtt + * @param {number} throughput + * @param {number=} serverLatency + * @param {boolean=} ssl + */ + constructor(rtt, throughput, serverLatency = 0, ssl = true) { + this._warmed = false; + this._ssl = ssl; + this._rtt = rtt; + this._throughput = throughput; + this._serverLatency = serverLatency; + this._congestionWindow = INITIAL_CONGESTION_WINDOW; + } + + /** + * @param {number} rtt + * @param {number} availableThroughput + * @return {number} + */ + static maximumSaturatedConnections(rtt, availableThroughput) { + const roundTripsPerSecond = 1000 / rtt; + const bytesPerRoundTrip = TCP_SEGMENT_SIZE; + const bytesPerSecond = roundTripsPerSecond * bytesPerRoundTrip; + const minimumThroughputRequiredPerRequest = bytesPerSecond * 8; + return Math.floor(availableThroughput / minimumThroughputRequiredPerRequest); + } + + /** + * @return {number} + */ + _computeMaximumCongestionWindowInSegments() { + const bytesPerSecond = this._throughput / 8; + const secondsPerRoundTrip = this._rtt / 1000; + const bytesPerRoundTrip = bytesPerSecond * secondsPerRoundTrip; + return Math.floor(bytesPerRoundTrip / TCP_SEGMENT_SIZE); + } + + /** + * @param {number} throughput + */ + setThroughput(throughput) { + this._throughput = throughput; + } + + /** + * @param {number} congestion + */ + setCongestionWindow(congestion) { + this._congestionWindow = congestion; + } + + /** + * @param {boolean} warmed + */ + setWarmed(warmed) { + this._warmed = warmed; + } + + /** + * Simulates a network download of a particular number of bytes over an optional maximum amount of time + * and returns information about the ending state. + * + * See https://hpbn.co/building-blocks-of-tcp/#three-way-handshake and + * https://hpbn.co/transport-layer-security-tls/#tls-handshake for details. + * + * @param {number} bytesToDownload + * @param {number=} timeAlreadyElapsed + * @param {number=} maximumTimeToElapse + * @return {{timeElapsed: number, roundTrips: number, bytesDownloaded: number, congestionWindow: number}} + */ + simulateDownloadUntil(bytesToDownload, timeAlreadyElapsed = 0, maximumTimeToElapse = Infinity) { + const twoWayLatency = this._rtt; + const oneWayLatency = twoWayLatency / 2; + const maximumCongestionWindow = this._computeMaximumCongestionWindowInSegments(); + + let handshakeAndRequest = oneWayLatency; + if (!this._warmed) { + handshakeAndRequest = + // SYN + oneWayLatency + + // SYN ACK + oneWayLatency + + // ACK + initial request + oneWayLatency + + // ClientHello/ServerHello assuming TLS False Start is enabled (https://istlsfastyet.com/#server-performance). + (this._ssl ? twoWayLatency : 0); + } + + let roundTrips = Math.ceil(handshakeAndRequest / twoWayLatency); + const timeToFirstByte = handshakeAndRequest + this._serverLatency + oneWayLatency; + const timeElapsedForTTFB = Math.max(timeToFirstByte - timeAlreadyElapsed, 0); + const maximumDownloadTimeToElapse = maximumTimeToElapse - timeElapsedForTTFB; + + let congestionWindow = Math.min(this._congestionWindow, maximumCongestionWindow); + let bytesDownloaded = 0; + if (timeElapsedForTTFB > 0) { + bytesDownloaded = congestionWindow * TCP_SEGMENT_SIZE; + } else { + roundTrips = 0; + } + + let downloadTimeElapsed = 0; + let bytesRemaining = bytesToDownload - bytesDownloaded; + while (bytesRemaining > 0 && downloadTimeElapsed <= maximumDownloadTimeToElapse) { + roundTrips++; + downloadTimeElapsed += twoWayLatency; + congestionWindow = Math.max(Math.min(maximumCongestionWindow, congestionWindow * 2), 1); + + const bytesDownloadedInWindow = congestionWindow * TCP_SEGMENT_SIZE; + bytesDownloaded += bytesDownloadedInWindow; + bytesRemaining -= bytesDownloadedInWindow; + } + + const timeElapsed = timeElapsedForTTFB + downloadTimeElapsed; + bytesDownloaded = Math.min(bytesDownloaded, bytesToDownload); + + if (Number.isFinite(maximumTimeToElapse)) { + return { + roundTrips, + timeElapsed, + bytesDownloaded, + congestionWindow, + }; + } + + return { + roundTrips, + timeElapsed, + bytesDownloaded, + congestionWindow, + }; + } +} + +module.exports = TcpConnection; diff --git a/lighthouse-core/gather/computed/dependency-graph/network-node.js b/lighthouse-core/gather/computed/dependency-graph/network-node.js new file mode 100644 index 000000000000..d5fc2ccf67ae --- /dev/null +++ b/lighthouse-core/gather/computed/dependency-graph/network-node.js @@ -0,0 +1,55 @@ +/** + * @license Copyright 2017 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const Node = require('./node'); + +class NetworkNode extends Node { + /** + * @param {!WebInspector.NetworkRequest} networkRecord + */ + constructor(networkRecord) { + super(networkRecord.requestId); + this._record = networkRecord; + } + + /** + * @return {string} + */ + get type() { + return Node.TYPES.NETWORK; + } + + /** + * @return {number} + */ + get startTime() { + return this._record.startTime * 1000 * 1000; + } + + /** + * @return {number} + */ + get endTime() { + return this._record.endTime * 1000 * 1000; + } + + /** + * @return {!WebInspector.NetworkRequest} + */ + get record() { + return this._record; + } + + /** + * @return {!NetworkNode} + */ + cloneWithoutRelationships() { + return new NetworkNode(this._record); + } +} + +module.exports = NetworkNode; diff --git a/lighthouse-core/gather/computed/dependency-graph/node.js b/lighthouse-core/gather/computed/dependency-graph/node.js index 5d1539b3f093..2ead8f2b6999 100644 --- a/lighthouse-core/gather/computed/dependency-graph/node.js +++ b/lighthouse-core/gather/computed/dependency-graph/node.js @@ -18,7 +18,6 @@ * these methods are called and we can always start traversal at the root node. */ class Node { - /** * @param {string|number} id */ @@ -35,6 +34,27 @@ class Node { return this._id; } + /** + * @return {string} + */ + get type() { + throw new Error('Unimplemented'); + } + + /** + * @return {number} + */ + get startTime() { + throw new Error('Unimplemented'); + } + + /** + * @return {number} + */ + get endTime() { + throw new Error('Unimplemented'); + } + /** * @return {!Array} */ @@ -56,8 +76,14 @@ class Node { */ getRootNode() { let rootNode = this; - while (rootNode._dependencies.length) { + let maxDepth = 1000; + while (rootNode._dependencies.length && maxDepth) { rootNode = rootNode._dependencies[0]; + maxDepth--; + } + + if (!maxDepth) { + throw new Error('Maximum depth exceeded: getRootNode'); } return rootNode; @@ -121,6 +147,11 @@ class Node { if (!shouldIncludeNode(originalNode)) return; const clonedNode = originalNode.cloneWithoutRelationships(); idToNodeMap.set(clonedNode.id, clonedNode); + }); + + rootNode.traverse(originalNode => { + if (!shouldIncludeNode(originalNode)) return; + const clonedNode = idToNodeMap.get(originalNode.id); for (const dependency of originalNode._dependencies) { const clonedDependency = idToNodeMap.get(dependency.id); @@ -177,4 +208,8 @@ class Node { } } +Node.TYPES = { + NETWORK: 'network', +}; + module.exports = Node; diff --git a/lighthouse-core/gather/computed/page-dependency-graph.js b/lighthouse-core/gather/computed/page-dependency-graph.js index 58bf76377f48..4c5a44d0ad23 100644 --- a/lighthouse-core/gather/computed/page-dependency-graph.js +++ b/lighthouse-core/gather/computed/page-dependency-graph.js @@ -6,8 +6,8 @@ 'use strict'; const ComputedArtifact = require('./computed-artifact'); -const Node = require('./dependency-graph/node'); -const Emulation = require('../../lib/emulation'); +const NetworkNode = require('./dependency-graph/network-node'); +const GraphEstimator = require('./dependency-graph/estimator/estimator'); class PageDependencyGraphArtifact extends ComputedArtifact { get name() { @@ -43,7 +43,7 @@ class PageDependencyGraphArtifact extends ComputedArtifact { const urlToNodeMap = new Map(); networkRecords.forEach(record => { - const node = new Node(record.requestId); + const node = new NetworkNode(record); idToNodeMap.set(record.requestId, node); if (urlToNodeMap.has(record.url)) { @@ -79,26 +79,34 @@ class PageDependencyGraphArtifact extends ComputedArtifact { * @return {number} */ static computeGraphDuration(rootNode) { - const depthByNodeId = new Map(); - const getMax = arr => Array.from(arr).reduce((max, next) => Math.max(max, next), 0); - - let startingMax = Infinity; - let endingMax = Infinity; - while (endingMax === Infinity || startingMax > endingMax) { - startingMax = endingMax; - endingMax = 0; - - rootNode.traverse(node => { - const dependencies = node.getDependencies(); - const dependencyDepths = dependencies.map(node => depthByNodeId.get(node.id) || Infinity); - const maxDepth = getMax(dependencyDepths); - endingMax = Math.max(endingMax, maxDepth); - depthByNodeId.set(node.id, maxDepth + 1); - }); + return new GraphEstimator(rootNode).estimate(); + } + + /** + * + * @param {!Node} rootNode + */ + static printGraph(rootNode, widthInCharacters = 100) { + function padRight(str, target, padChar = ' ') { + return str + padChar.repeat(Math.max(target - str.length, 0)); } - const maxDepth = getMax(depthByNodeId.values()); - return maxDepth * Emulation.settings.TYPICAL_MOBILE_THROTTLING_METRICS.latency; + const nodes = []; + rootNode.traverse(node => nodes.push(node)); + nodes.sort((a, b) => a.startTime - b.startTime); + + const min = nodes[0].startTime; + const max = nodes.reduce((max, node) => Math.max(max, node.endTime), 0); + + const totalTime = max - min; + const timePerCharacter = totalTime / widthInCharacters; + nodes.forEach(node => { + const offset = Math.round((node.startTime - min) / timePerCharacter); + const length = Math.ceil((node.endTime - node.startTime) / timePerCharacter); + const bar = padRight('', offset) + padRight('', length, '='); + // eslint-disable-next-line + console.log(padRight(bar, widthInCharacters), `| ${node.record._url.slice(0, 30)}`); + }); } /** diff --git a/lighthouse-core/test/audits/predictive-perf-test.js b/lighthouse-core/test/audits/predictive-perf-test.js index 14ba8634ffe3..2a4abac6ca0a 100644 --- a/lighthouse-core/test/audits/predictive-perf-test.js +++ b/lighthouse-core/test/audits/predictive-perf-test.js @@ -26,9 +26,15 @@ describe('Performance: predictive performance audit', () => { }, Runner.instantiateComputedArtifacts()); return PredictivePerf.audit(artifacts).then(output => { - assert.equal(output.score, 97); - assert.equal(Math.round(output.rawValue), 2250); - assert.equal(output.displayValue, '2,250\xa0ms'); + assert.equal(output.score, 66); + assert.equal(Math.round(output.rawValue), 7226); + assert.equal(output.displayValue, '7,230\xa0ms'); + + const valueOf = name => Math.round(output.extendedInfo.value[name]); + assert.equal(valueOf('optimisticFMP'), 1058); + assert.equal(valueOf('pessimisticFMP'), 4704); + assert.equal(valueOf('optimisticTTCI'), 4207); + assert.equal(valueOf('pessimisticTTCI'), 18935); }); }); }); diff --git a/lighthouse-core/test/gather/computed/dependency-graph/estimator/estimator-test.js b/lighthouse-core/test/gather/computed/dependency-graph/estimator/estimator-test.js new file mode 100644 index 000000000000..c6bb56ac3bf9 --- /dev/null +++ b/lighthouse-core/test/gather/computed/dependency-graph/estimator/estimator-test.js @@ -0,0 +1,104 @@ +/** + * @license Copyright 2017 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +const Node = require('../../../../../gather/computed/dependency-graph/network-node'); +const Estimator = require('../../../../../gather/computed/dependency-graph/estimator/estimator'); + +const assert = require('assert'); +let nextRequestId = 1; + +function request({requestId, connectionId, transferSize, scheme, timing}) { + requestId = requestId || nextRequestId++; + connectionId = connectionId || 1; + transferSize = transferSize || 1000; + scheme = scheme || 'http'; + + return { + requestId, + connectionId, + transferSize, + parsedURL: {scheme}, + _timing: timing, + }; +} + +/* eslint-env mocha */ +describe('DependencyGraph/Estimator', () => { + describe('.estimate', () => { + it('should estimate basic graphs', () => { + const rootNode = new Node(request({})); + const estimator = new Estimator(rootNode, {fallbackTTFB: 500}); + const result = estimator.estimate(); + // should be 2 RTTs and 500ms for the server response time + assert.equal(result, 300 + 500); + }); + + it('should estimate basic waterfall graphs', () => { + const nodeA = new Node(request({connectionId: 1})); + const nodeB = new Node(request({connectionId: 2})); + const nodeC = new Node(request({connectionId: 3})); + const nodeD = new Node(request({connectionId: 4})); + + nodeA.addDependent(nodeB); + nodeB.addDependent(nodeC); + nodeC.addDependent(nodeD); + + const estimator = new Estimator(nodeA, {fallbackTTFB: 500}); + const result = estimator.estimate(); + // should be 800ms each for A, B, C, D + assert.equal(result, 3200); + }); + + it('should estimate basic parallel requests', () => { + const nodeA = new Node(request({connectionId: 1})); + const nodeB = new Node(request({connectionId: 2})); + const nodeC = new Node(request({connectionId: 3, transferSize: 15000})); + const nodeD = new Node(request({connectionId: 4})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const estimator = new Estimator(nodeA, {fallbackTTFB: 500}); + const result = estimator.estimate(); + // should be 800ms for A and 950ms for C (2 round trips of downloading) + assert.equal(result, 800 + 950); + }); + + it('should not reuse connections', () => { + const nodeA = new Node(request({connectionId: 1})); + const nodeB = new Node(request({connectionId: 1})); + const nodeC = new Node(request({connectionId: 1})); + const nodeD = new Node(request({connectionId: 1})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const estimator = new Estimator(nodeA, {fallbackTTFB: 500}); + const result = estimator.estimate(); + // should be 800ms for A and 650ms for the next 3 + assert.equal(result, 800 + 650 * 3); + }); + + it('should adjust throughput based on number of requests', () => { + const nodeA = new Node(request({connectionId: 1})); + const nodeB = new Node(request({connectionId: 2})); + const nodeC = new Node(request({connectionId: 3, transferSize: 15000})); + const nodeD = new Node(request({connectionId: 4})); + + nodeA.addDependent(nodeB); + nodeA.addDependent(nodeC); + nodeA.addDependent(nodeD); + + const estimator = new Estimator(nodeA, {fallbackTTFB: 500}); + const result = estimator.estimate(); + // should be 800ms for A and 950ms for C (2 round trips of downloading) + assert.equal(result, 800 + 950); + }); + }); +}); diff --git a/lighthouse-core/test/gather/computed/dependency-graph/estimator/tcp-connection-test.js b/lighthouse-core/test/gather/computed/dependency-graph/estimator/tcp-connection-test.js new file mode 100644 index 000000000000..de46d1060f98 --- /dev/null +++ b/lighthouse-core/test/gather/computed/dependency-graph/estimator/tcp-connection-test.js @@ -0,0 +1,214 @@ +/** + * @license Copyright 2017 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ +'use strict'; + +// eslint-disable-next-line +const TcpConnection = require('../../../../../gather/computed/dependency-graph/estimator/tcp-connection'); + +const assert = require('assert'); + +/* eslint-env mocha */ +describe('DependencyGraph/Estimator/TcpConnection', () => { + describe('#constructor', () => { + it('should create the connection', () => { + const rtt = 150; + const throughput = 1600 * 1024; + const connection = new TcpConnection(rtt, throughput); + assert.ok(connection); + assert.equal(connection._rtt, rtt); + }); + }); + + describe('#maximumSaturatedConnections', () => { + it('should compute number of supported simulated requests', () => { + const availableThroughput = 1460 * 8 * 10; // 10 TCP segments/second + assert.equal(TcpConnection.maximumSaturatedConnections(100, availableThroughput), 1); + assert.equal(TcpConnection.maximumSaturatedConnections(300, availableThroughput), 3); + assert.equal(TcpConnection.maximumSaturatedConnections(1000, availableThroughput), 10); + }); + }); + + describe('.setWarmed', () => { + it('adjusts the time to download appropriately', () => { + const connection = new TcpConnection(100, Infinity); + assert.equal(connection.simulateDownloadUntil(0).timeElapsed, 300); + connection.setWarmed(true); + assert.equal(connection.simulateDownloadUntil(0).timeElapsed, 100); + }); + }); + + describe('.setCongestionWindow', () => { + it('adjusts the time to download appropriately', () => { + const connection = new TcpConnection(100, Infinity); + assert.deepEqual(connection.simulateDownloadUntil(50000), { + bytesDownloaded: 50000, + congestionWindow: 40, + roundTrips: 5, + timeElapsed: 500, + }); + connection.setCongestionWindow(80); // will download all in one round trip + assert.deepEqual(connection.simulateDownloadUntil(50000), { + bytesDownloaded: 50000, + congestionWindow: 80, + roundTrips: 3, + timeElapsed: 300, + }); + }); + }); + + describe('.simulateDownloadUntil', () => { + context('when maximumTime is not set', () => { + it('should provide the correct values small payload non-SSL', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + }); + }); + + it('should provide the correct values small payload SSL', () => { + const connection = new TcpConnection(100, Infinity, 0, true); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 300, + }); + }); + + it('should provide the correct values response time', () => { + const responseTime = 78; + const connection = new TcpConnection(100, Infinity, responseTime, true); + assert.deepEqual(connection.simulateDownloadUntil(7300), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 300 + responseTime, + }); + }); + + it('should provide the correct values large payload', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + assert.deepEqual(connection.simulateDownloadUntil(bytesToDownload), { + bytesDownloaded: bytesToDownload, + congestionWindow: 68, + roundTrips: 105, + timeElapsed: 10500, + }); + }); + + it('should provide the correct values resumed small payload', () => { + const connection = new TcpConnection(100, Infinity, 0, true); + assert.deepEqual(connection.simulateDownloadUntil(7300, 250), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 3, + timeElapsed: 50, + }); + }); + + it('should provide the correct values resumed large payload', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 5 * 1000 * 1000; // 5 mb + connection.setCongestionWindow(68); + assert.deepEqual(connection.simulateDownloadUntil(bytesToDownload, 5234), { + bytesDownloaded: bytesToDownload, + congestionWindow: 68, + roundTrips: 51, // 5 mb / (1460 * 68) + timeElapsed: 5100, + }); + }); + }); + + context('when maximumTime is set', () => { + it('should provide the correct values less than TTFB', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual(connection.simulateDownloadUntil(7300, 0, 68), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + }); + }); + + it('should provide the correct values just over TTFB', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual(connection.simulateDownloadUntil(7300, 0, 250), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 200, + }); + }); + + it('should provide the correct values with already elapsed', () => { + const connection = new TcpConnection(100, Infinity, 0, false); + assert.deepEqual(connection.simulateDownloadUntil(7300, 75, 250), { + bytesDownloaded: 7300, + congestionWindow: 10, + roundTrips: 2, + timeElapsed: 125, + }); + }); + + it('should provide the correct values large payloads', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + assert.deepEqual(connection.simulateDownloadUntil(bytesToDownload, 500, 740), { + bytesDownloaded: 683280, // should be less than 68 * 1460 * 8 + congestionWindow: 68, + roundTrips: 8, + timeElapsed: 800, // skips the handshake because time already elapsed + }); + }); + + it('should all add up', () => { + const connection = new TcpConnection(100, 8 * 1000 * 1000); + const bytesToDownload = 10 * 1000 * 1000; // 10 mb + const firstStoppingPoint = 5234; + const secondStoppingPoint = 315; + const thirdStoppingPoint = 10500 - firstStoppingPoint - secondStoppingPoint; + + const firstSegment = connection.simulateDownloadUntil( + bytesToDownload, + 0, + firstStoppingPoint + ); + const firstOvershoot = firstSegment.timeElapsed - firstStoppingPoint; + + connection.setCongestionWindow(firstSegment.congestionWindow); + const secondSegment = connection.simulateDownloadUntil( + bytesToDownload - firstSegment.bytesDownloaded, + firstSegment.timeElapsed, + secondStoppingPoint - firstOvershoot + ); + const secondOvershoot = firstOvershoot + secondSegment.timeElapsed - secondStoppingPoint; + + connection.setCongestionWindow(secondSegment.congestionWindow); + const thirdSegment = connection.simulateDownloadUntil( + bytesToDownload - firstSegment.bytesDownloaded - secondSegment.bytesDownloaded, + firstSegment.timeElapsed + secondSegment.timeElapsed + ); + const thirdOvershoot = secondOvershoot + thirdSegment.timeElapsed - thirdStoppingPoint; + + assert.equal(thirdOvershoot, 0); + assert.equal( + firstSegment.bytesDownloaded + + secondSegment.bytesDownloaded + + thirdSegment.bytesDownloaded, + bytesToDownload + ); + assert.equal( + firstSegment.timeElapsed + secondSegment.timeElapsed + thirdSegment.timeElapsed, + 10500 + ); + }); + }); + }); +}); diff --git a/lighthouse-core/test/gather/computed/dependency-graph/node-test.js b/lighthouse-core/test/gather/computed/dependency-graph/node-test.js index 0b5cb6a21a51..2fc53eafdc3d 100644 --- a/lighthouse-core/test/gather/computed/dependency-graph/node-test.js +++ b/lighthouse-core/test/gather/computed/dependency-graph/node-test.js @@ -147,6 +147,32 @@ describe('DependencyGraph/Node', () => { }); }); + it('should create a copy of a graph with long dependency chains', () => { + // C - D - E - F + // / \ + // A - - - - - - - B + const nodeA = new Node('A'); + const nodeB = new Node('B'); + const nodeC = new Node('C'); + const nodeD = new Node('D'); + const nodeE = new Node('E'); + const nodeF = new Node('F'); + + nodeA.addDependent(nodeB); + nodeF.addDependent(nodeB); + + nodeA.addDependent(nodeC); + nodeC.addDependent(nodeD); + nodeD.addDependent(nodeE); + nodeE.addDependent(nodeF); + + const clone = nodeA.cloneWithRelationships(); + + const clonedIdMap = new Map(); + clone.traverse(node => clonedIdMap.set(node.id, node)); + assert.equal(clonedIdMap.size, 6); + }); + it('should create a copy when not starting at root node', () => { const graph = createComplexGraph(); const cloneD = graph.nodeD.cloneWithRelationships(); diff --git a/lighthouse-core/test/gather/computed/page-dependency-graph-test.js b/lighthouse-core/test/gather/computed/page-dependency-graph-test.js index a909486856fa..eca31b54290b 100644 --- a/lighthouse-core/test/gather/computed/page-dependency-graph-test.js +++ b/lighthouse-core/test/gather/computed/page-dependency-graph-test.js @@ -80,35 +80,4 @@ describe('PageDependencyGraph computed artifact:', () => { assert.deepEqual(nodes[3].getDependencies(), [nodes[0]]); // should depend on rootNode instead }); }); - - describe('#computeGraphDuration', () => { - it('should compute graph duration', () => { - // B - C - D - E - F - // / / \ - // A - * - * - * - * G - H - - const nodeA = new Node('A'); - const nodeB = new Node('B'); - const nodeC = new Node('C'); - const nodeD = new Node('D'); - const nodeE = new Node('E'); - const nodeF = new Node('F'); - const nodeG = new Node('G'); - const nodeH = new Node('H'); - - nodeA.addDependent(nodeB); - nodeA.addDependent(nodeE); - - nodeB.addDependent(nodeC); - nodeC.addDependent(nodeD); - nodeD.addDependent(nodeE); - nodeE.addDependent(nodeF); - nodeF.addDependent(nodeG); - - nodeG.addDependent(nodeH); - - const result = PageDependencyGraph.computeGraphDuration(nodeA); - assert.equal(result, 4500); // 7 hops * ~560ms latency/hop - }); - }); });