Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core(optimized-images): cap execution to 5 seconds #7237

Merged
merged 4 commits into from
Mar 7, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 41 additions & 6 deletions lighthouse-core/audits/byte-efficiency/uses-optimized-images.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class UsesOptimizedImages extends ByteEfficiencyAudit {
title: str_(UIStrings.title),
description: str_(UIStrings.description),
scoreDisplayMode: ByteEfficiencyAudit.SCORING_MODES.NUMERIC,
requiredArtifacts: ['OptimizedImages', 'devtoolsLogs', 'traces'],
requiredArtifacts: ['OptimizedImages', 'ImageElements', 'devtoolsLogs', 'traces'],
};
}

Expand All @@ -49,12 +49,31 @@ class UsesOptimizedImages extends ByteEfficiencyAudit {
return {bytes, percent};
}

/**
* @param {LH.Artifacts.ImageElement} imageElement
* @return {number}
*/
static estimateJPEGSizeFromDimensions(imageElement) {
const totalPixels = imageElement.naturalWidth * imageElement.naturalHeight;
// Even JPEGs with lots of detail can usually be compressed down to <1 byte per pixel
// Using 4:2:2 subsampling already gets an uncompressed bitmap to 2 bytes per pixel.
// The compression ratio for JPEG is usually somewhere around 10:1 depending on content, so
// 8:1 is a reasonable expectation for web content which is 1.5MB for a 6MP image.
const expectedBytesPerPixel = 2 * 1 / 8;
return Math.round(totalPixels * expectedBytesPerPixel);
}

/**
* @param {LH.Artifacts} artifacts
* @return {ByteEfficiencyAudit.ByteEfficiencyProduct}
*/
static audit_(artifacts) {
const pageURL = artifacts.URL.finalUrl;
const images = artifacts.OptimizedImages;
const imageElements = artifacts.ImageElements;
/** @type {Map<string, LH.Artifacts.ImageElement>} */
const imageElementsByURL = new Map();
imageElements.forEach(img => imageElementsByURL.set(img.src, img));

/** @type {Array<{url: string, fromProtocol: boolean, isCrossOrigin: boolean, totalBytes: number, wastedBytes: number}>} */
const items = [];
Expand All @@ -63,18 +82,34 @@ class UsesOptimizedImages extends ByteEfficiencyAudit {
if (image.failed) {
warnings.push(`Unable to decode ${URL.getURLDisplayName(image.url)}`);
continue;
} else if (/(jpeg|bmp)/.test(image.mimeType) === false ||
image.originalSize < image.jpegSize + IGNORE_THRESHOLD_IN_BYTES) {
} else if (/(jpeg|bmp)/.test(image.mimeType) === false) {
continue;
}

let jpegSize = image.jpegSize;
let fromProtocol = true;

if (typeof jpegSize === 'undefined') {
const imageElement = imageElementsByURL.get(image.url);
if (!imageElement) {
warnings.push(`Unable to locate resource ${URL.getURLDisplayName(image.url)}`);
continue;
}

jpegSize = UsesOptimizedImages.estimateJPEGSizeFromDimensions(imageElement);
fromProtocol = false;
}

if (image.originalSize < jpegSize + IGNORE_THRESHOLD_IN_BYTES) continue;

const url = URL.elideDataURI(image.url);
const jpegSavings = UsesOptimizedImages.computeSavings(image);
const isCrossOrigin = !URL.originsMatch(pageURL, image.url);
const jpegSavings = UsesOptimizedImages.computeSavings({...image, jpegSize});

items.push({
url,
fromProtocol: image.fromProtocol,
isCrossOrigin: !image.isSameOrigin,
fromProtocol,
isCrossOrigin,
totalBytes: image.originalSize,
wastedBytes: jpegSavings.bytes,
});
Expand Down
45 changes: 40 additions & 5 deletions lighthouse-core/audits/byte-efficiency/uses-webp-images.js
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,32 @@ class UsesWebPImages extends ByteEfficiencyAudit {
return {bytes, percent};
}

/**
* @param {LH.Artifacts.ImageElement} imageElement
* @return {number}
*/
static estimateWebPSizeFromDimensions(imageElement) {
const totalPixels = imageElement.naturalWidth * imageElement.naturalHeight;
// See uses-optimized-images for the rationale behind our 2 byte-per-pixel baseline and
// JPEG compression ratio of 8:1.
// WebP usually gives ~20% additional savings on top of that, so we will use 10:1.
// This is quite pessimistic as their study shows a photographic compression ratio of ~29:1.
// https://developers.google.com/speed/webp/docs/webp_lossless_alpha_study#results
const expectedBytesPerPixel = 2 * 1 / 10;
return Math.round(totalPixels * expectedBytesPerPixel);
}

/**
* @param {LH.Artifacts} artifacts
* @return {ByteEfficiencyAudit.ByteEfficiencyProduct}
*/
static audit_(artifacts) {
const pageURL = artifacts.URL.finalUrl;
const images = artifacts.OptimizedImages;
const imageElements = artifacts.ImageElements;
/** @type {Map<string, LH.Artifacts.ImageElement>} */
const imageElementsByURL = new Map();
imageElements.forEach(img => imageElementsByURL.set(img.src, img));

/** @type {Array<LH.Audit.ByteEfficiencyItem>} */
const items = [];
Expand All @@ -63,17 +83,32 @@ class UsesWebPImages extends ByteEfficiencyAudit {
if (image.failed) {
warnings.push(`Unable to decode ${URL.getURLDisplayName(image.url)}`);
continue;
} else if (image.originalSize < image.webpSize + IGNORE_THRESHOLD_IN_BYTES) {
continue;
}

let webpSize = image.webpSize;
let fromProtocol = true;

if (typeof webpSize === 'undefined') {
const imageElement = imageElementsByURL.get(image.url);
if (!imageElement) {
warnings.push(`Unable to locate resource ${URL.getURLDisplayName(image.url)}`);
continue;
}

webpSize = UsesWebPImages.estimateWebPSizeFromDimensions(imageElement);
fromProtocol = false;
}

if (image.originalSize < webpSize + IGNORE_THRESHOLD_IN_BYTES) continue;

const url = URL.elideDataURI(image.url);
const webpSavings = UsesWebPImages.computeSavings(image);
const isCrossOrigin = !URL.originsMatch(pageURL, image.url);
const webpSavings = UsesWebPImages.computeSavings({...image, webpSize: webpSize});

items.push({
url,
fromProtocol: image.fromProtocol,
isCrossOrigin: !image.isSameOrigin,
fromProtocol,
isCrossOrigin,
totalBytes: image.originalSize,
wastedBytes: webpSavings.bytes,
});
Expand Down
136 changes: 35 additions & 101 deletions lighthouse-core/gather/gatherers/dobetterweb/optimized-images.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,70 +16,32 @@ const NetworkRequest = require('../../../lib/network-request');
const Sentry = require('../../../lib/sentry');
const Driver = require('../../driver.js'); // eslint-disable-line no-unused-vars

// Image encoding can be slow and we don't want to spend forever on it.
// Cap our encoding to 5 seconds, anything after that will be estimated.
const MAX_TIME_TO_SPEND_ENCODING = 5000;
// Cap our image file size at 2MB, anything bigger than that will be estimated.
const MAX_RESOURCE_SIZE_TO_ENCODE = 2000 * 1024;

const JPEG_QUALITY = 0.92;
const WEBP_QUALITY = 0.85;

const MINIMUM_IMAGE_SIZE = 4096; // savings of <4 KB will be ignored in the audit anyway

const IMAGE_REGEX = /^image\/((x|ms|x-ms)-)?(png|bmp|jpeg)$/;

/** @typedef {{isSameOrigin: boolean, isBase64DataUri: boolean, requestId: string, url: string, mimeType: string, resourceSize: number}} SimplifiedNetworkRecord */

/* global document, Image, atob */

/**
* Runs in the context of the browser
* @param {string} url
* @return {Promise<{jpeg: {base64: number, binary: number}, webp: {base64: number, binary: number}}>}
*/
/* istanbul ignore next */
function getOptimizedNumBytes(url) {
return new Promise(function(resolve, reject) {
const img = new Image();
const canvas = document.createElement('canvas');
const context = canvas.getContext('2d');
if (!context) {
return reject(new Error('unable to create canvas context'));
}

/**
* @param {'image/jpeg'|'image/webp'} type
* @param {number} quality
* @return {{base64: number, binary: number}}
*/
function getTypeStats(type, quality) {
const dataURI = canvas.toDataURL(type, quality);
const base64 = dataURI.slice(dataURI.indexOf(',') + 1);
return {base64: base64.length, binary: atob(base64).length};
}

img.addEventListener('error', reject);
img.addEventListener('load', () => {
try {
canvas.height = img.height;
canvas.width = img.width;
context.drawImage(img, 0, 0);

const jpeg = getTypeStats('image/jpeg', 0.92);
const webp = getTypeStats('image/webp', 0.85);

resolve({jpeg, webp});
} catch (err) {
reject(err);
}
}, false);

img.src = url;
});
}
/** @typedef {{requestId: string, url: string, mimeType: string, resourceSize: number}} SimplifiedNetworkRecord */

class OptimizedImages extends Gatherer {
constructor() {
super();
this._encodingStartAt = 0;
}

/**
* @param {string} pageUrl
* @param {Array<LH.Artifacts.NetworkRequest>} networkRecords
* @return {Array<SimplifiedNetworkRecord>}
*/
static filterImageRequests(pageUrl, networkRecords) {
static filterImageRequests(networkRecords) {
/** @type {Set<string>} */
const seenUrls = new Set();
return networkRecords.reduce((prev, record) => {
Expand All @@ -90,14 +52,10 @@ class OptimizedImages extends Gatherer {
seenUrls.add(record.url);
const isOptimizableImage = record.resourceType === NetworkRequest.TYPES.Image &&
IMAGE_REGEX.test(record.mimeType);
const isSameOrigin = URL.originsMatch(pageUrl, record.url);
const isBase64DataUri = /^data:.{2,40}base64\s*,/.test(record.url);

const actualResourceSize = Math.min(record.resourceSize || 0, record.transferSize || 0);
if (isOptimizableImage && actualResourceSize > MINIMUM_IMAGE_SIZE) {
prev.push({
isSameOrigin,
isBase64DataUri,
requestId: record.requestId,
url: record.url,
mimeType: record.mimeType,
Expand Down Expand Up @@ -126,48 +84,24 @@ class OptimizedImages extends Gatherer {
/**
* @param {Driver} driver
* @param {SimplifiedNetworkRecord} networkRecord
* @return {Promise<?{fromProtocol: boolean, originalSize: number, jpegSize: number, webpSize: number}>}
* @return {Promise<{originalSize: number, jpegSize?: number, webpSize?: number}>}
*/
calculateImageStats(driver, networkRecord) {
return Promise.resolve(networkRecord.requestId).then(requestId => {
if (this._getEncodedResponseUnsupported) return;
return this._getEncodedResponse(driver, requestId, 'jpeg').then(jpegData => {
return this._getEncodedResponse(driver, requestId, 'webp').then(webpData => {
return {
fromProtocol: true,
originalSize: networkRecord.resourceSize,
jpegSize: jpegData.encodedSize,
webpSize: webpData.encodedSize,
};
});
}).catch(err => {
if (/wasn't found/.test(err.message)) {
// Mark non-support so we don't keep attempting the protocol method over and over
this._getEncodedResponseUnsupported = true;
} else {
throw err;
}
});
}).then(result => {
if (result) return result;

// Take the slower fallback path if getEncodedResponse didn't work
// CORS canvas tainting doesn't support cross-origin images, so skip them early
if (!networkRecord.isSameOrigin && !networkRecord.isBase64DataUri) return null;

const script = `(${getOptimizedNumBytes.toString()})(${JSON.stringify(networkRecord.url)})`;
return driver.evaluateAsync(script).then(stats => {
if (!stats) return null;
const isBase64DataUri = networkRecord.isBase64DataUri;
const base64Length = networkRecord.url.length - networkRecord.url.indexOf(',') - 1;
return {
fromProtocol: false,
originalSize: isBase64DataUri ? base64Length : networkRecord.resourceSize,
jpegSize: isBase64DataUri ? stats.jpeg.base64 : stats.jpeg.binary,
webpSize: isBase64DataUri ? stats.webp.base64 : stats.webp.binary,
};
});
});
async calculateImageStats(driver, networkRecord) {
const originalSize = networkRecord.resourceSize;
// We'll use our heuristic on the audit side for images that are too large and once we've hit our cap.
patrickhulce marked this conversation as resolved.
Show resolved Hide resolved
if (Date.now() - this._encodingStartAt > MAX_TIME_TO_SPEND_ENCODING ||
originalSize > MAX_RESOURCE_SIZE_TO_ENCODE) {
return {originalSize, jpegSize: undefined, webpSize: undefined};
}

const jpegData = await this._getEncodedResponse(driver, networkRecord.requestId, 'jpeg');
const webpData = await this._getEncodedResponse(driver, networkRecord.requestId, 'webp');

return {
originalSize,
jpegSize: jpegData.encodedSize,
webpSize: webpData.encodedSize,
};
}

/**
Expand All @@ -176,16 +110,14 @@ class OptimizedImages extends Gatherer {
* @return {Promise<LH.Artifacts['OptimizedImages']>}
*/
async computeOptimizedImages(driver, imageRecords) {
this._encodingStartAt = Date.now();

/** @type {LH.Artifacts['OptimizedImages']} */
const results = [];

for (const record of imageRecords) {
try {
const stats = await this.calculateImageStats(driver, record);
if (stats === null) {
continue;
}

/** @type {LH.Artifacts.OptimizedImage} */
const image = {failed: false, ...stats, ...record};
results.push(image);
Expand Down Expand Up @@ -214,7 +146,9 @@ class OptimizedImages extends Gatherer {
*/
afterPass(passContext, loadData) {
const networkRecords = loadData.networkRecords;
const imageRecords = OptimizedImages.filterImageRequests(passContext.url, networkRecords);
const imageRecords = OptimizedImages
.filterImageRequests(networkRecords)
.sort((a, b) => b.resourceSize - a.resourceSize);

return Promise.resolve()
.then(_ => this.computeOptimizedImages(passContext.driver, imageRecords))
Expand Down
Loading