Refactor routing in App component to enhance navigation and improve error handling by integrating dynamic routes and updating the NotFound route.

This commit is contained in:
becarta
2025-05-23 12:43:00 +02:00
parent f40db0f5c9
commit a544759a3b
11127 changed files with 1647032 additions and 0 deletions

174
node_modules/sharp/lib/channel.js generated vendored Normal file
View File

@@ -0,0 +1,174 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const is = require('./is');
/**
* Boolean operations for bandbool.
* @private
*/
const bool = {
and: 'and',
or: 'or',
eor: 'eor'
};
/**
* Remove alpha channel, if any. This is a no-op if the image does not have an alpha channel.
*
* See also {@link /api-operation#flatten|flatten}.
*
* @example
* sharp('rgba.png')
* .removeAlpha()
* .toFile('rgb.png', function(err, info) {
* // rgb.png is a 3 channel image without an alpha channel
* });
*
* @returns {Sharp}
*/
function removeAlpha () {
this.options.removeAlpha = true;
return this;
}
/**
* Ensure the output image has an alpha transparency channel.
* If missing, the added alpha channel will have the specified
* transparency level, defaulting to fully-opaque (1).
* This is a no-op if the image already has an alpha channel.
*
* @since 0.21.2
*
* @example
* // rgba.png will be a 4 channel image with a fully-opaque alpha channel
* await sharp('rgb.jpg')
* .ensureAlpha()
* .toFile('rgba.png')
*
* @example
* // rgba is a 4 channel image with a fully-transparent alpha channel
* const rgba = await sharp(rgb)
* .ensureAlpha(0)
* .toBuffer();
*
* @param {number} [alpha=1] - alpha transparency level (0=fully-transparent, 1=fully-opaque)
* @returns {Sharp}
* @throws {Error} Invalid alpha transparency level
*/
function ensureAlpha (alpha) {
if (is.defined(alpha)) {
if (is.number(alpha) && is.inRange(alpha, 0, 1)) {
this.options.ensureAlpha = alpha;
} else {
throw is.invalidParameterError('alpha', 'number between 0 and 1', alpha);
}
} else {
this.options.ensureAlpha = 1;
}
return this;
}
/**
* Extract a single channel from a multi-channel image.
*
* @example
* // green.jpg is a greyscale image containing the green channel of the input
* await sharp(input)
* .extractChannel('green')
* .toFile('green.jpg');
*
* @example
* // red1 is the red value of the first pixel, red2 the second pixel etc.
* const [red1, red2, ...] = await sharp(input)
* .extractChannel(0)
* .raw()
* .toBuffer();
*
* @param {number|string} channel - zero-indexed channel/band number to extract, or `red`, `green`, `blue` or `alpha`.
* @returns {Sharp}
* @throws {Error} Invalid channel
*/
function extractChannel (channel) {
const channelMap = { red: 0, green: 1, blue: 2, alpha: 3 };
if (Object.keys(channelMap).includes(channel)) {
channel = channelMap[channel];
}
if (is.integer(channel) && is.inRange(channel, 0, 4)) {
this.options.extractChannel = channel;
} else {
throw is.invalidParameterError('channel', 'integer or one of: red, green, blue, alpha', channel);
}
return this;
}
/**
* Join one or more channels to the image.
* The meaning of the added channels depends on the output colourspace, set with `toColourspace()`.
* By default the output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
* Channel ordering follows vips convention:
* - sRGB: 0: Red, 1: Green, 2: Blue, 3: Alpha.
* - CMYK: 0: Magenta, 1: Cyan, 2: Yellow, 3: Black, 4: Alpha.
*
* Buffers may be any of the image formats supported by sharp.
* For raw pixel input, the `options` object should contain a `raw` attribute, which follows the format of the attribute of the same name in the `sharp()` constructor.
*
* @param {Array<string|Buffer>|string|Buffer} images - one or more images (file paths, Buffers).
* @param {Object} options - image options, see `sharp()` constructor.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function joinChannel (images, options) {
if (Array.isArray(images)) {
images.forEach(function (image) {
this.options.joinChannelIn.push(this._createInputDescriptor(image, options));
}, this);
} else {
this.options.joinChannelIn.push(this._createInputDescriptor(images, options));
}
return this;
}
/**
* Perform a bitwise boolean operation on all input image channels (bands) to produce a single channel output image.
*
* @example
* sharp('3-channel-rgb-input.png')
* .bandbool(sharp.bool.and)
* .toFile('1-channel-output.png', function (err, info) {
* // The output will be a single channel image where each pixel `P = R & G & B`.
* // If `I(1,1) = [247, 170, 14] = [0b11110111, 0b10101010, 0b00001111]`
* // then `O(1,1) = 0b11110111 & 0b10101010 & 0b00001111 = 0b00000010 = 2`.
* });
*
* @param {string} boolOp - one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function bandbool (boolOp) {
if (is.string(boolOp) && is.inArray(boolOp, ['and', 'or', 'eor'])) {
this.options.bandBoolOp = boolOp;
} else {
throw is.invalidParameterError('boolOp', 'one of: and, or, eor', boolOp);
}
return this;
}
/**
* Decorate the Sharp prototype with channel-related functions.
* @private
*/
module.exports = function (Sharp) {
Object.assign(Sharp.prototype, {
// Public instance functions
removeAlpha,
ensureAlpha,
extractChannel,
joinChannel,
bandbool
});
// Class attributes
Sharp.bool = bool;
};

180
node_modules/sharp/lib/colour.js generated vendored Normal file
View File

@@ -0,0 +1,180 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const color = require('color');
const is = require('./is');
/**
* Colourspaces.
* @private
*/
const colourspace = {
multiband: 'multiband',
'b-w': 'b-w',
bw: 'b-w',
cmyk: 'cmyk',
srgb: 'srgb'
};
/**
* Tint the image using the provided colour.
* An alpha channel may be present and will be unchanged by the operation.
*
* @example
* const output = await sharp(input)
* .tint({ r: 255, g: 240, b: 16 })
* .toBuffer();
*
* @param {string|Object} tint - Parsed by the [color](https://www.npmjs.org/package/color) module.
* @returns {Sharp}
* @throws {Error} Invalid parameter
*/
function tint (tint) {
this._setBackgroundColourOption('tint', tint);
return this;
}
/**
* Convert to 8-bit greyscale; 256 shades of grey.
* This is a linear operation. If the input image is in a non-linear colour space such as sRGB, use `gamma()` with `greyscale()` for the best results.
* By default the output image will be web-friendly sRGB and contain three (identical) color channels.
* This may be overridden by other sharp operations such as `toColourspace('b-w')`,
* which will produce an output image containing one color channel.
* An alpha channel may be present, and will be unchanged by the operation.
*
* @example
* const output = await sharp(input).greyscale().toBuffer();
*
* @param {Boolean} [greyscale=true]
* @returns {Sharp}
*/
function greyscale (greyscale) {
this.options.greyscale = is.bool(greyscale) ? greyscale : true;
return this;
}
/**
* Alternative spelling of `greyscale`.
* @param {Boolean} [grayscale=true]
* @returns {Sharp}
*/
function grayscale (grayscale) {
return this.greyscale(grayscale);
}
/**
* Set the pipeline colourspace.
*
* The input image will be converted to the provided colourspace at the start of the pipeline.
* All operations will use this colourspace before converting to the output colourspace,
* as defined by {@link #tocolourspace|toColourspace}.
*
* @since 0.29.0
*
* @example
* // Run pipeline in 16 bits per channel RGB while converting final result to 8 bits per channel sRGB.
* await sharp(input)
* .pipelineColourspace('rgb16')
* .toColourspace('srgb')
* .toFile('16bpc-pipeline-to-8bpc-output.png')
*
* @param {string} [colourspace] - pipeline colourspace e.g. `rgb16`, `scrgb`, `lab`, `grey16` [...](https://github.com/libvips/libvips/blob/41cff4e9d0838498487a00623462204eb10ee5b8/libvips/iofuncs/enumtypes.c#L774)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColourspace (colourspace) {
if (!is.string(colourspace)) {
throw is.invalidParameterError('colourspace', 'string', colourspace);
}
this.options.colourspacePipeline = colourspace;
return this;
}
/**
* Alternative spelling of `pipelineColourspace`.
* @param {string} [colorspace] - pipeline colorspace.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function pipelineColorspace (colorspace) {
return this.pipelineColourspace(colorspace);
}
/**
* Set the output colourspace.
* By default output image will be web-friendly sRGB, with additional channels interpreted as alpha channels.
*
* @example
* // Output 16 bits per pixel RGB
* await sharp(input)
* .toColourspace('rgb16')
* .toFile('16-bpp.png')
*
* @param {string} [colourspace] - output colourspace e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://github.com/libvips/libvips/blob/3c0bfdf74ce1dc37a6429bed47fa76f16e2cd70a/libvips/iofuncs/enumtypes.c#L777-L794)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function toColourspace (colourspace) {
if (!is.string(colourspace)) {
throw is.invalidParameterError('colourspace', 'string', colourspace);
}
this.options.colourspace = colourspace;
return this;
}
/**
* Alternative spelling of `toColourspace`.
* @param {string} [colorspace] - output colorspace.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function toColorspace (colorspace) {
return this.toColourspace(colorspace);
}
/**
* Update a colour attribute of the this.options Object.
* @private
* @param {string} key
* @param {string|Object} value
* @throws {Error} Invalid value
*/
function _setBackgroundColourOption (key, value) {
if (is.defined(value)) {
if (is.object(value) || is.string(value)) {
const colour = color(value);
this.options[key] = [
colour.red(),
colour.green(),
colour.blue(),
Math.round(colour.alpha() * 255)
];
} else {
throw is.invalidParameterError('background', 'object or string', value);
}
}
}
/**
* Decorate the Sharp prototype with colour-related functions.
* @private
*/
module.exports = function (Sharp) {
Object.assign(Sharp.prototype, {
// Public
tint,
greyscale,
grayscale,
pipelineColourspace,
pipelineColorspace,
toColourspace,
toColorspace,
// Private
_setBackgroundColourOption
});
// Class attributes
Sharp.colourspace = colourspace;
Sharp.colorspace = colourspace;
};

210
node_modules/sharp/lib/composite.js generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const is = require('./is');
/**
* Blend modes.
* @member
* @private
*/
const blend = {
clear: 'clear',
source: 'source',
over: 'over',
in: 'in',
out: 'out',
atop: 'atop',
dest: 'dest',
'dest-over': 'dest-over',
'dest-in': 'dest-in',
'dest-out': 'dest-out',
'dest-atop': 'dest-atop',
xor: 'xor',
add: 'add',
saturate: 'saturate',
multiply: 'multiply',
screen: 'screen',
overlay: 'overlay',
darken: 'darken',
lighten: 'lighten',
'colour-dodge': 'colour-dodge',
'color-dodge': 'colour-dodge',
'colour-burn': 'colour-burn',
'color-burn': 'colour-burn',
'hard-light': 'hard-light',
'soft-light': 'soft-light',
difference: 'difference',
exclusion: 'exclusion'
};
/**
* Composite image(s) over the processed (resized, extracted etc.) image.
*
* The images to composite must be the same size or smaller than the processed image.
* If both `top` and `left` options are provided, they take precedence over `gravity`.
*
* Any resize, rotate or extract operations in the same processing pipeline
* will always be applied to the input image before composition.
*
* The `blend` option can be one of `clear`, `source`, `over`, `in`, `out`, `atop`,
* `dest`, `dest-over`, `dest-in`, `dest-out`, `dest-atop`,
* `xor`, `add`, `saturate`, `multiply`, `screen`, `overlay`, `darken`, `lighten`,
* `colour-dodge`, `color-dodge`, `colour-burn`,`color-burn`,
* `hard-light`, `soft-light`, `difference`, `exclusion`.
*
* More information about blend modes can be found at
* https://www.libvips.org/API/current/libvips-conversion.html#VipsBlendMode
* and https://www.cairographics.org/operators/
*
* @since 0.22.0
*
* @example
* await sharp(background)
* .composite([
* { input: layer1, gravity: 'northwest' },
* { input: layer2, gravity: 'southeast' },
* ])
* .toFile('combined.png');
*
* @example
* const output = await sharp('input.gif', { animated: true })
* .composite([
* { input: 'overlay.png', tile: true, blend: 'saturate' }
* ])
* .toBuffer();
*
* @example
* sharp('input.png')
* .rotate(180)
* .resize(300)
* .flatten( { background: '#ff6600' } )
* .composite([{ input: 'overlay.png', gravity: 'southeast' }])
* .sharpen()
* .withMetadata()
* .webp( { quality: 90 } )
* .toBuffer()
* .then(function(outputBuffer) {
* // outputBuffer contains upside down, 300px wide, alpha channel flattened
* // onto orange background, composited with overlay.png with SE gravity,
* // sharpened, with metadata, 90% quality WebP image data. Phew!
* });
*
* @param {Object[]} images - Ordered list of images to composite
* @param {Buffer|String} [images[].input] - Buffer containing image data, String containing the path to an image file, or Create object (see below)
* @param {Object} [images[].input.create] - describes a blank overlay to be created.
* @param {Number} [images[].input.create.width]
* @param {Number} [images[].input.create.height]
* @param {Number} [images[].input.create.channels] - 3-4
* @param {String|Object} [images[].input.create.background] - parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
* @param {Object} [images[].input.text] - describes a new text image to be created.
* @param {string} [images[].input.text.text] - text to render as a UTF-8 string. It can contain Pango markup, for example `<i>Le</i>Monde`.
* @param {string} [images[].input.text.font] - font name to render with.
* @param {string} [images[].input.text.fontfile] - absolute filesystem path to a font file that can be used by `font`.
* @param {number} [images[].input.text.width=0] - integral number of pixels to word-wrap at. Lines of text wider than this will be broken at word boundaries.
* @param {number} [images[].input.text.height=0] - integral number of pixels high. When defined, `dpi` will be ignored and the text will automatically fit the pixel resolution defined by `width` and `height`. Will be ignored if `width` is not specified or set to 0.
* @param {string} [images[].input.text.align='left'] - text alignment (`'left'`, `'centre'`, `'center'`, `'right'`).
* @param {boolean} [images[].input.text.justify=false] - set this to true to apply justification to the text.
* @param {number} [images[].input.text.dpi=72] - the resolution (size) at which to render the text. Does not take effect if `height` is specified.
* @param {boolean} [images[].input.text.rgba=false] - set this to true to enable RGBA output. This is useful for colour emoji rendering, or support for Pango markup features like `<span foreground="red">Red!</span>`.
* @param {number} [images[].input.text.spacing=0] - text line height in points. Will use the font line height if none is specified.
* @param {String} [images[].blend='over'] - how to blend this image with the image below.
* @param {String} [images[].gravity='centre'] - gravity at which to place the overlay.
* @param {Number} [images[].top] - the pixel offset from the top edge.
* @param {Number} [images[].left] - the pixel offset from the left edge.
* @param {Boolean} [images[].tile=false] - set to true to repeat the overlay image across the entire image with the given `gravity`.
* @param {Boolean} [images[].premultiplied=false] - set to true to avoid premultiplying the image below. Equivalent to the `--premultiplied` vips option.
* @param {Number} [images[].density=72] - number representing the DPI for vector overlay image.
* @param {Object} [images[].raw] - describes overlay when using raw pixel data.
* @param {Number} [images[].raw.width]
* @param {Number} [images[].raw.height]
* @param {Number} [images[].raw.channels]
* @param {boolean} [images[].animated=false] - Set to `true` to read all frames/pages of an animated image.
* @param {string} [images[].failOn='warning'] - @see {@link /api-constructor#parameters|constructor parameters}
* @param {number|boolean} [images[].limitInputPixels=268402689] - @see {@link /api-constructor#parameters|constructor parameters}
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function composite (images) {
if (!Array.isArray(images)) {
throw is.invalidParameterError('images to composite', 'array', images);
}
this.options.composite = images.map(image => {
if (!is.object(image)) {
throw is.invalidParameterError('image to composite', 'object', image);
}
const inputOptions = this._inputOptionsFromObject(image);
const composite = {
input: this._createInputDescriptor(image.input, inputOptions, { allowStream: false }),
blend: 'over',
tile: false,
left: 0,
top: 0,
hasOffset: false,
gravity: 0,
premultiplied: false
};
if (is.defined(image.blend)) {
if (is.string(blend[image.blend])) {
composite.blend = blend[image.blend];
} else {
throw is.invalidParameterError('blend', 'valid blend name', image.blend);
}
}
if (is.defined(image.tile)) {
if (is.bool(image.tile)) {
composite.tile = image.tile;
} else {
throw is.invalidParameterError('tile', 'boolean', image.tile);
}
}
if (is.defined(image.left)) {
if (is.integer(image.left)) {
composite.left = image.left;
} else {
throw is.invalidParameterError('left', 'integer', image.left);
}
}
if (is.defined(image.top)) {
if (is.integer(image.top)) {
composite.top = image.top;
} else {
throw is.invalidParameterError('top', 'integer', image.top);
}
}
if (is.defined(image.top) !== is.defined(image.left)) {
throw new Error('Expected both left and top to be set');
} else {
composite.hasOffset = is.integer(image.top) && is.integer(image.left);
}
if (is.defined(image.gravity)) {
if (is.integer(image.gravity) && is.inRange(image.gravity, 0, 8)) {
composite.gravity = image.gravity;
} else if (is.string(image.gravity) && is.integer(this.constructor.gravity[image.gravity])) {
composite.gravity = this.constructor.gravity[image.gravity];
} else {
throw is.invalidParameterError('gravity', 'valid gravity', image.gravity);
}
}
if (is.defined(image.premultiplied)) {
if (is.bool(image.premultiplied)) {
composite.premultiplied = image.premultiplied;
} else {
throw is.invalidParameterError('premultiplied', 'boolean', image.premultiplied);
}
}
return composite;
});
return this;
}
/**
* Decorate the Sharp prototype with composite-related functions.
* @private
*/
module.exports = function (Sharp) {
Sharp.prototype.composite = composite;
Sharp.blend = blend;
};

452
node_modules/sharp/lib/constructor.js generated vendored Normal file
View File

@@ -0,0 +1,452 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const util = require('node:util');
const stream = require('node:stream');
const is = require('./is');
require('./sharp');
// Use NODE_DEBUG=sharp to enable libvips warnings
const debuglog = util.debuglog('sharp');
/**
* Constructor factory to create an instance of `sharp`, to which further methods are chained.
*
* JPEG, PNG, WebP, GIF, AVIF or TIFF format image data can be streamed out from this object.
* When using Stream based output, derived attributes are available from the `info` event.
*
* Non-critical problems encountered during processing are emitted as `warning` events.
*
* Implements the [stream.Duplex](http://nodejs.org/api/stream.html#stream_class_stream_duplex) class.
*
* When loading more than one page/frame of an animated image,
* these are combined as a vertically-stacked "toilet roll" image
* where the overall height is the `pageHeight` multiplied by the number of `pages`.
*
* @constructs Sharp
*
* @emits Sharp#info
* @emits Sharp#warning
*
* @example
* sharp('input.jpg')
* .resize(300, 200)
* .toFile('output.jpg', function(err) {
* // output.jpg is a 300 pixels wide and 200 pixels high image
* // containing a scaled and cropped version of input.jpg
* });
*
* @example
* // Read image data from remote URL,
* // resize to 300 pixels wide,
* // emit an 'info' event with calculated dimensions
* // and finally write image data to writableStream
* const { body } = fetch('https://...');
* const readableStream = Readable.fromWeb(body);
* const transformer = sharp()
* .resize(300)
* .on('info', ({ height }) => {
* console.log(`Image height is ${height}`);
* });
* readableStream.pipe(transformer).pipe(writableStream);
*
* @example
* // Create a blank 300x200 PNG image of semi-translucent red pixels
* sharp({
* create: {
* width: 300,
* height: 200,
* channels: 4,
* background: { r: 255, g: 0, b: 0, alpha: 0.5 }
* }
* })
* .png()
* .toBuffer()
* .then( ... );
*
* @example
* // Convert an animated GIF to an animated WebP
* await sharp('in.gif', { animated: true }).toFile('out.webp');
*
* @example
* // Read a raw array of pixels and save it to a png
* const input = Uint8Array.from([255, 255, 255, 0, 0, 0]); // or Uint8ClampedArray
* const image = sharp(input, {
* // because the input does not contain its dimensions or how many channels it has
* // we need to specify it in the constructor options
* raw: {
* width: 2,
* height: 1,
* channels: 3
* }
* });
* await image.toFile('my-two-pixels.png');
*
* @example
* // Generate RGB Gaussian noise
* await sharp({
* create: {
* width: 300,
* height: 200,
* channels: 3,
* noise: {
* type: 'gaussian',
* mean: 128,
* sigma: 30
* }
* }
* }).toFile('noise.png');
*
* @example
* // Generate an image from text
* await sharp({
* text: {
* text: 'Hello, world!',
* width: 400, // max width
* height: 300 // max height
* }
* }).toFile('text_bw.png');
*
* @example
* // Generate an rgba image from text using pango markup and font
* await sharp({
* text: {
* text: '<span foreground="red">Red!</span><span background="cyan">blue</span>',
* font: 'sans',
* rgba: true,
* dpi: 300
* }
* }).toFile('text_rgba.png');
*
* @param {(Buffer|ArrayBuffer|Uint8Array|Uint8ClampedArray|Int8Array|Uint16Array|Int16Array|Uint32Array|Int32Array|Float32Array|Float64Array|string)} [input] - if present, can be
* a Buffer / ArrayBuffer / Uint8Array / Uint8ClampedArray containing JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image data, or
* a TypedArray containing raw pixel image data, or
* a String containing the filesystem path to an JPEG, PNG, WebP, AVIF, GIF, SVG or TIFF image file.
* JPEG, PNG, WebP, AVIF, GIF, SVG, TIFF or raw pixel image data can be streamed into the object when not present.
* @param {Object} [options] - if present, is an Object with optional attributes.
* @param {string} [options.failOn='warning'] - When to abort processing of invalid pixel data, one of (in order of sensitivity, least to most): 'none', 'truncated', 'error', 'warning'. Higher levels imply lower levels. Invalid metadata will always abort.
* @param {number|boolean} [options.limitInputPixels=268402689] - Do not process input images where the number of pixels
* (width x height) exceeds this limit. Assumes image dimensions contained in the input metadata can be trusted.
* An integral Number of pixels, zero or false to remove limit, true to use default limit of 268402689 (0x3FFF x 0x3FFF).
* @param {boolean} [options.unlimited=false] - Set this to `true` to remove safety features that help prevent memory exhaustion (JPEG, PNG, SVG, HEIF).
* @param {boolean} [options.sequentialRead=true] - Set this to `false` to use random access rather than sequential read. Some operations will do this automatically.
* @param {number} [options.density=72] - number representing the DPI for vector images in the range 1 to 100000.
* @param {number} [options.ignoreIcc=false] - should the embedded ICC profile, if any, be ignored.
* @param {number} [options.pages=1] - Number of pages to extract for multi-page input (GIF, WebP, TIFF), use -1 for all pages.
* @param {number} [options.page=0] - Page number to start extracting from for multi-page input (GIF, WebP, TIFF), zero based.
* @param {number} [options.subifd=-1] - subIFD (Sub Image File Directory) to extract for OME-TIFF, defaults to main image.
* @param {number} [options.level=0] - level to extract from a multi-level input (OpenSlide), zero based.
* @param {boolean} [options.animated=false] - Set to `true` to read all frames/pages of an animated image (GIF, WebP, TIFF), equivalent of setting `pages` to `-1`.
* @param {Object} [options.raw] - describes raw pixel input image data. See `raw()` for pixel ordering.
* @param {number} [options.raw.width] - integral number of pixels wide.
* @param {number} [options.raw.height] - integral number of pixels high.
* @param {number} [options.raw.channels] - integral number of channels, between 1 and 4.
* @param {boolean} [options.raw.premultiplied] - specifies that the raw input has already been premultiplied, set to `true`
* to avoid sharp premultiplying the image. (optional, default `false`)
* @param {Object} [options.create] - describes a new image to be created.
* @param {number} [options.create.width] - integral number of pixels wide.
* @param {number} [options.create.height] - integral number of pixels high.
* @param {number} [options.create.channels] - integral number of channels, either 3 (RGB) or 4 (RGBA).
* @param {string|Object} [options.create.background] - parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
* @param {Object} [options.create.noise] - describes a noise to be created.
* @param {string} [options.create.noise.type] - type of generated noise, currently only `gaussian` is supported.
* @param {number} [options.create.noise.mean] - mean of pixels in generated noise.
* @param {number} [options.create.noise.sigma] - standard deviation of pixels in generated noise.
* @param {Object} [options.text] - describes a new text image to be created.
* @param {string} [options.text.text] - text to render as a UTF-8 string. It can contain Pango markup, for example `<i>Le</i>Monde`.
* @param {string} [options.text.font] - font name to render with.
* @param {string} [options.text.fontfile] - absolute filesystem path to a font file that can be used by `font`.
* @param {number} [options.text.width=0] - Integral number of pixels to word-wrap at. Lines of text wider than this will be broken at word boundaries.
* @param {number} [options.text.height=0] - Maximum integral number of pixels high. When defined, `dpi` will be ignored and the text will automatically fit the pixel resolution defined by `width` and `height`. Will be ignored if `width` is not specified or set to 0.
* @param {string} [options.text.align='left'] - Alignment style for multi-line text (`'left'`, `'centre'`, `'center'`, `'right'`).
* @param {boolean} [options.text.justify=false] - set this to true to apply justification to the text.
* @param {number} [options.text.dpi=72] - the resolution (size) at which to render the text. Does not take effect if `height` is specified.
* @param {boolean} [options.text.rgba=false] - set this to true to enable RGBA output. This is useful for colour emoji rendering, or support for pango markup features like `<span foreground="red">Red!</span>`.
* @param {number} [options.text.spacing=0] - text line height in points. Will use the font line height if none is specified.
* @param {string} [options.text.wrap='word'] - word wrapping style when width is provided, one of: 'word', 'char', 'word-char' (prefer word, fallback to char) or 'none'.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
const Sharp = function (input, options) {
if (arguments.length === 1 && !is.defined(input)) {
throw new Error('Invalid input');
}
if (!(this instanceof Sharp)) {
return new Sharp(input, options);
}
stream.Duplex.call(this);
this.options = {
// resize options
topOffsetPre: -1,
leftOffsetPre: -1,
widthPre: -1,
heightPre: -1,
topOffsetPost: -1,
leftOffsetPost: -1,
widthPost: -1,
heightPost: -1,
width: -1,
height: -1,
canvas: 'crop',
position: 0,
resizeBackground: [0, 0, 0, 255],
useExifOrientation: false,
angle: 0,
rotationAngle: 0,
rotationBackground: [0, 0, 0, 255],
rotateBeforePreExtract: false,
flip: false,
flop: false,
extendTop: 0,
extendBottom: 0,
extendLeft: 0,
extendRight: 0,
extendBackground: [0, 0, 0, 255],
extendWith: 'background',
withoutEnlargement: false,
withoutReduction: false,
affineMatrix: [],
affineBackground: [0, 0, 0, 255],
affineIdx: 0,
affineIdy: 0,
affineOdx: 0,
affineOdy: 0,
affineInterpolator: this.constructor.interpolators.bilinear,
kernel: 'lanczos3',
fastShrinkOnLoad: true,
// operations
tint: [-1, 0, 0, 0],
flatten: false,
flattenBackground: [0, 0, 0],
unflatten: false,
negate: false,
negateAlpha: true,
medianSize: 0,
blurSigma: 0,
precision: 'integer',
minAmpl: 0.2,
sharpenSigma: 0,
sharpenM1: 1,
sharpenM2: 2,
sharpenX1: 2,
sharpenY2: 10,
sharpenY3: 20,
threshold: 0,
thresholdGrayscale: true,
trimBackground: [],
trimThreshold: -1,
trimLineArt: false,
gamma: 0,
gammaOut: 0,
greyscale: false,
normalise: false,
normaliseLower: 1,
normaliseUpper: 99,
claheWidth: 0,
claheHeight: 0,
claheMaxSlope: 3,
brightness: 1,
saturation: 1,
hue: 0,
lightness: 0,
booleanBufferIn: null,
booleanFileIn: '',
joinChannelIn: [],
extractChannel: -1,
removeAlpha: false,
ensureAlpha: -1,
colourspace: 'srgb',
colourspacePipeline: 'last',
composite: [],
// output
fileOut: '',
formatOut: 'input',
streamOut: false,
keepMetadata: 0,
withMetadataOrientation: -1,
withMetadataDensity: 0,
withIccProfile: '',
withExif: {},
withExifMerge: true,
resolveWithObject: false,
// output format
jpegQuality: 80,
jpegProgressive: false,
jpegChromaSubsampling: '4:2:0',
jpegTrellisQuantisation: false,
jpegOvershootDeringing: false,
jpegOptimiseScans: false,
jpegOptimiseCoding: true,
jpegQuantisationTable: 0,
pngProgressive: false,
pngCompressionLevel: 6,
pngAdaptiveFiltering: false,
pngPalette: false,
pngQuality: 100,
pngEffort: 7,
pngBitdepth: 8,
pngDither: 1,
jp2Quality: 80,
jp2TileHeight: 512,
jp2TileWidth: 512,
jp2Lossless: false,
jp2ChromaSubsampling: '4:4:4',
webpQuality: 80,
webpAlphaQuality: 100,
webpLossless: false,
webpNearLossless: false,
webpSmartSubsample: false,
webpPreset: 'default',
webpEffort: 4,
webpMinSize: false,
webpMixed: false,
gifBitdepth: 8,
gifEffort: 7,
gifDither: 1,
gifInterFrameMaxError: 0,
gifInterPaletteMaxError: 3,
gifReuse: true,
gifProgressive: false,
tiffQuality: 80,
tiffCompression: 'jpeg',
tiffPredictor: 'horizontal',
tiffPyramid: false,
tiffMiniswhite: false,
tiffBitdepth: 8,
tiffTile: false,
tiffTileHeight: 256,
tiffTileWidth: 256,
tiffXres: 1.0,
tiffYres: 1.0,
tiffResolutionUnit: 'inch',
heifQuality: 50,
heifLossless: false,
heifCompression: 'av1',
heifEffort: 4,
heifChromaSubsampling: '4:4:4',
heifBitdepth: 8,
jxlDistance: 1,
jxlDecodingTier: 0,
jxlEffort: 7,
jxlLossless: false,
rawDepth: 'uchar',
tileSize: 256,
tileOverlap: 0,
tileContainer: 'fs',
tileLayout: 'dz',
tileFormat: 'last',
tileDepth: 'last',
tileAngle: 0,
tileSkipBlanks: -1,
tileBackground: [255, 255, 255, 255],
tileCentre: false,
tileId: 'https://example.com/iiif',
tileBasename: '',
timeoutSeconds: 0,
linearA: [],
linearB: [],
// Function to notify of libvips warnings
debuglog: warning => {
this.emit('warning', warning);
debuglog(warning);
},
// Function to notify of queue length changes
queueListener: function (queueLength) {
Sharp.queue.emit('change', queueLength);
}
};
this.options.input = this._createInputDescriptor(input, options, { allowStream: true });
return this;
};
Object.setPrototypeOf(Sharp.prototype, stream.Duplex.prototype);
Object.setPrototypeOf(Sharp, stream.Duplex);
/**
* Take a "snapshot" of the Sharp instance, returning a new instance.
* Cloned instances inherit the input of their parent instance.
* This allows multiple output Streams and therefore multiple processing pipelines to share a single input Stream.
*
* @example
* const pipeline = sharp().rotate();
* pipeline.clone().resize(800, 600).pipe(firstWritableStream);
* pipeline.clone().extract({ left: 20, top: 20, width: 100, height: 100 }).pipe(secondWritableStream);
* readableStream.pipe(pipeline);
* // firstWritableStream receives auto-rotated, resized readableStream
* // secondWritableStream receives auto-rotated, extracted region of readableStream
*
* @example
* // Create a pipeline that will download an image, resize it and format it to different files
* // Using Promises to know when the pipeline is complete
* const fs = require("fs");
* const got = require("got");
* const sharpStream = sharp({ failOn: 'none' });
*
* const promises = [];
*
* promises.push(
* sharpStream
* .clone()
* .jpeg({ quality: 100 })
* .toFile("originalFile.jpg")
* );
*
* promises.push(
* sharpStream
* .clone()
* .resize({ width: 500 })
* .jpeg({ quality: 80 })
* .toFile("optimized-500.jpg")
* );
*
* promises.push(
* sharpStream
* .clone()
* .resize({ width: 500 })
* .webp({ quality: 80 })
* .toFile("optimized-500.webp")
* );
*
* // https://github.com/sindresorhus/got/blob/main/documentation/3-streams.md
* got.stream("https://www.example.com/some-file.jpg").pipe(sharpStream);
*
* Promise.all(promises)
* .then(res => { console.log("Done!", res); })
* .catch(err => {
* console.error("Error processing files, let's clean it up", err);
* try {
* fs.unlinkSync("originalFile.jpg");
* fs.unlinkSync("optimized-500.jpg");
* fs.unlinkSync("optimized-500.webp");
* } catch (e) {}
* });
*
* @returns {Sharp}
*/
function clone () {
// Clone existing options
const clone = this.constructor.call();
const { debuglog, queueListener, ...options } = this.options;
clone.options = structuredClone(options);
clone.options.debuglog = debuglog;
clone.options.queueListener = queueListener;
// Pass 'finish' event to clone for Stream-based input
if (this._isStreamInput()) {
this.on('finish', () => {
// Clone inherits input data
this._flattenBufferIn();
clone.options.input.buffer = this.options.input.buffer;
clone.emit('finish');
});
}
return clone;
}
Object.assign(Sharp.prototype, { clone });
/**
* Export constructor.
* @private
*/
module.exports = Sharp;

1754
node_modules/sharp/lib/index.d.ts generated vendored Normal file

File diff suppressed because it is too large Load Diff

16
node_modules/sharp/lib/index.js generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const Sharp = require('./constructor');
require('./input')(Sharp);
require('./resize')(Sharp);
require('./composite')(Sharp);
require('./operation')(Sharp);
require('./colour')(Sharp);
require('./channel')(Sharp);
require('./output')(Sharp);
require('./utility')(Sharp);
module.exports = Sharp;

658
node_modules/sharp/lib/input.js generated vendored Normal file
View File

@@ -0,0 +1,658 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const color = require('color');
const is = require('./is');
const sharp = require('./sharp');
/**
* Justication alignment
* @member
* @private
*/
const align = {
left: 'low',
center: 'centre',
centre: 'centre',
right: 'high'
};
/**
* Extract input options, if any, from an object.
* @private
*/
function _inputOptionsFromObject (obj) {
const { raw, density, limitInputPixels, ignoreIcc, unlimited, sequentialRead, failOn, failOnError, animated, page, pages, subifd } = obj;
return [raw, density, limitInputPixels, ignoreIcc, unlimited, sequentialRead, failOn, failOnError, animated, page, pages, subifd].some(is.defined)
? { raw, density, limitInputPixels, ignoreIcc, unlimited, sequentialRead, failOn, failOnError, animated, page, pages, subifd }
: undefined;
}
/**
* Create Object containing input and input-related options.
* @private
*/
function _createInputDescriptor (input, inputOptions, containerOptions) {
const inputDescriptor = {
failOn: 'warning',
limitInputPixels: Math.pow(0x3FFF, 2),
ignoreIcc: false,
unlimited: false,
sequentialRead: true
};
if (is.string(input)) {
// filesystem
inputDescriptor.file = input;
} else if (is.buffer(input)) {
// Buffer
if (input.length === 0) {
throw Error('Input Buffer is empty');
}
inputDescriptor.buffer = input;
} else if (is.arrayBuffer(input)) {
if (input.byteLength === 0) {
throw Error('Input bit Array is empty');
}
inputDescriptor.buffer = Buffer.from(input, 0, input.byteLength);
} else if (is.typedArray(input)) {
if (input.length === 0) {
throw Error('Input Bit Array is empty');
}
inputDescriptor.buffer = Buffer.from(input.buffer, input.byteOffset, input.byteLength);
} else if (is.plainObject(input) && !is.defined(inputOptions)) {
// Plain Object descriptor, e.g. create
inputOptions = input;
if (_inputOptionsFromObject(inputOptions)) {
// Stream with options
inputDescriptor.buffer = [];
}
} else if (!is.defined(input) && !is.defined(inputOptions) && is.object(containerOptions) && containerOptions.allowStream) {
// Stream without options
inputDescriptor.buffer = [];
} else {
throw new Error(`Unsupported input '${input}' of type ${typeof input}${
is.defined(inputOptions) ? ` when also providing options of type ${typeof inputOptions}` : ''
}`);
}
if (is.object(inputOptions)) {
// Deprecated: failOnError
if (is.defined(inputOptions.failOnError)) {
if (is.bool(inputOptions.failOnError)) {
inputDescriptor.failOn = inputOptions.failOnError ? 'warning' : 'none';
} else {
throw is.invalidParameterError('failOnError', 'boolean', inputOptions.failOnError);
}
}
// failOn
if (is.defined(inputOptions.failOn)) {
if (is.string(inputOptions.failOn) && is.inArray(inputOptions.failOn, ['none', 'truncated', 'error', 'warning'])) {
inputDescriptor.failOn = inputOptions.failOn;
} else {
throw is.invalidParameterError('failOn', 'one of: none, truncated, error, warning', inputOptions.failOn);
}
}
// Density
if (is.defined(inputOptions.density)) {
if (is.inRange(inputOptions.density, 1, 100000)) {
inputDescriptor.density = inputOptions.density;
} else {
throw is.invalidParameterError('density', 'number between 1 and 100000', inputOptions.density);
}
}
// Ignore embeddded ICC profile
if (is.defined(inputOptions.ignoreIcc)) {
if (is.bool(inputOptions.ignoreIcc)) {
inputDescriptor.ignoreIcc = inputOptions.ignoreIcc;
} else {
throw is.invalidParameterError('ignoreIcc', 'boolean', inputOptions.ignoreIcc);
}
}
// limitInputPixels
if (is.defined(inputOptions.limitInputPixels)) {
if (is.bool(inputOptions.limitInputPixels)) {
inputDescriptor.limitInputPixels = inputOptions.limitInputPixels
? Math.pow(0x3FFF, 2)
: 0;
} else if (is.integer(inputOptions.limitInputPixels) && is.inRange(inputOptions.limitInputPixels, 0, Number.MAX_SAFE_INTEGER)) {
inputDescriptor.limitInputPixels = inputOptions.limitInputPixels;
} else {
throw is.invalidParameterError('limitInputPixels', 'positive integer', inputOptions.limitInputPixels);
}
}
// unlimited
if (is.defined(inputOptions.unlimited)) {
if (is.bool(inputOptions.unlimited)) {
inputDescriptor.unlimited = inputOptions.unlimited;
} else {
throw is.invalidParameterError('unlimited', 'boolean', inputOptions.unlimited);
}
}
// sequentialRead
if (is.defined(inputOptions.sequentialRead)) {
if (is.bool(inputOptions.sequentialRead)) {
inputDescriptor.sequentialRead = inputOptions.sequentialRead;
} else {
throw is.invalidParameterError('sequentialRead', 'boolean', inputOptions.sequentialRead);
}
}
// Raw pixel input
if (is.defined(inputOptions.raw)) {
if (
is.object(inputOptions.raw) &&
is.integer(inputOptions.raw.width) && inputOptions.raw.width > 0 &&
is.integer(inputOptions.raw.height) && inputOptions.raw.height > 0 &&
is.integer(inputOptions.raw.channels) && is.inRange(inputOptions.raw.channels, 1, 4)
) {
inputDescriptor.rawWidth = inputOptions.raw.width;
inputDescriptor.rawHeight = inputOptions.raw.height;
inputDescriptor.rawChannels = inputOptions.raw.channels;
inputDescriptor.rawPremultiplied = !!inputOptions.raw.premultiplied;
switch (input.constructor) {
case Uint8Array:
case Uint8ClampedArray:
inputDescriptor.rawDepth = 'uchar';
break;
case Int8Array:
inputDescriptor.rawDepth = 'char';
break;
case Uint16Array:
inputDescriptor.rawDepth = 'ushort';
break;
case Int16Array:
inputDescriptor.rawDepth = 'short';
break;
case Uint32Array:
inputDescriptor.rawDepth = 'uint';
break;
case Int32Array:
inputDescriptor.rawDepth = 'int';
break;
case Float32Array:
inputDescriptor.rawDepth = 'float';
break;
case Float64Array:
inputDescriptor.rawDepth = 'double';
break;
default:
inputDescriptor.rawDepth = 'uchar';
break;
}
} else {
throw new Error('Expected width, height and channels for raw pixel input');
}
}
// Multi-page input (GIF, TIFF, PDF)
if (is.defined(inputOptions.animated)) {
if (is.bool(inputOptions.animated)) {
inputDescriptor.pages = inputOptions.animated ? -1 : 1;
} else {
throw is.invalidParameterError('animated', 'boolean', inputOptions.animated);
}
}
if (is.defined(inputOptions.pages)) {
if (is.integer(inputOptions.pages) && is.inRange(inputOptions.pages, -1, 100000)) {
inputDescriptor.pages = inputOptions.pages;
} else {
throw is.invalidParameterError('pages', 'integer between -1 and 100000', inputOptions.pages);
}
}
if (is.defined(inputOptions.page)) {
if (is.integer(inputOptions.page) && is.inRange(inputOptions.page, 0, 100000)) {
inputDescriptor.page = inputOptions.page;
} else {
throw is.invalidParameterError('page', 'integer between 0 and 100000', inputOptions.page);
}
}
// Multi-level input (OpenSlide)
if (is.defined(inputOptions.level)) {
if (is.integer(inputOptions.level) && is.inRange(inputOptions.level, 0, 256)) {
inputDescriptor.level = inputOptions.level;
} else {
throw is.invalidParameterError('level', 'integer between 0 and 256', inputOptions.level);
}
}
// Sub Image File Directory (TIFF)
if (is.defined(inputOptions.subifd)) {
if (is.integer(inputOptions.subifd) && is.inRange(inputOptions.subifd, -1, 100000)) {
inputDescriptor.subifd = inputOptions.subifd;
} else {
throw is.invalidParameterError('subifd', 'integer between -1 and 100000', inputOptions.subifd);
}
}
// Create new image
if (is.defined(inputOptions.create)) {
if (
is.object(inputOptions.create) &&
is.integer(inputOptions.create.width) && inputOptions.create.width > 0 &&
is.integer(inputOptions.create.height) && inputOptions.create.height > 0 &&
is.integer(inputOptions.create.channels)
) {
inputDescriptor.createWidth = inputOptions.create.width;
inputDescriptor.createHeight = inputOptions.create.height;
inputDescriptor.createChannels = inputOptions.create.channels;
// Noise
if (is.defined(inputOptions.create.noise)) {
if (!is.object(inputOptions.create.noise)) {
throw new Error('Expected noise to be an object');
}
if (!is.inArray(inputOptions.create.noise.type, ['gaussian'])) {
throw new Error('Only gaussian noise is supported at the moment');
}
if (!is.inRange(inputOptions.create.channels, 1, 4)) {
throw is.invalidParameterError('create.channels', 'number between 1 and 4', inputOptions.create.channels);
}
inputDescriptor.createNoiseType = inputOptions.create.noise.type;
if (is.number(inputOptions.create.noise.mean) && is.inRange(inputOptions.create.noise.mean, 0, 10000)) {
inputDescriptor.createNoiseMean = inputOptions.create.noise.mean;
} else {
throw is.invalidParameterError('create.noise.mean', 'number between 0 and 10000', inputOptions.create.noise.mean);
}
if (is.number(inputOptions.create.noise.sigma) && is.inRange(inputOptions.create.noise.sigma, 0, 10000)) {
inputDescriptor.createNoiseSigma = inputOptions.create.noise.sigma;
} else {
throw is.invalidParameterError('create.noise.sigma', 'number between 0 and 10000', inputOptions.create.noise.sigma);
}
} else if (is.defined(inputOptions.create.background)) {
if (!is.inRange(inputOptions.create.channels, 3, 4)) {
throw is.invalidParameterError('create.channels', 'number between 3 and 4', inputOptions.create.channels);
}
const background = color(inputOptions.create.background);
inputDescriptor.createBackground = [
background.red(),
background.green(),
background.blue(),
Math.round(background.alpha() * 255)
];
} else {
throw new Error('Expected valid noise or background to create a new input image');
}
delete inputDescriptor.buffer;
} else {
throw new Error('Expected valid width, height and channels to create a new input image');
}
}
// Create a new image with text
if (is.defined(inputOptions.text)) {
if (is.object(inputOptions.text) && is.string(inputOptions.text.text)) {
inputDescriptor.textValue = inputOptions.text.text;
if (is.defined(inputOptions.text.height) && is.defined(inputOptions.text.dpi)) {
throw new Error('Expected only one of dpi or height');
}
if (is.defined(inputOptions.text.font)) {
if (is.string(inputOptions.text.font)) {
inputDescriptor.textFont = inputOptions.text.font;
} else {
throw is.invalidParameterError('text.font', 'string', inputOptions.text.font);
}
}
if (is.defined(inputOptions.text.fontfile)) {
if (is.string(inputOptions.text.fontfile)) {
inputDescriptor.textFontfile = inputOptions.text.fontfile;
} else {
throw is.invalidParameterError('text.fontfile', 'string', inputOptions.text.fontfile);
}
}
if (is.defined(inputOptions.text.width)) {
if (is.integer(inputOptions.text.width) && inputOptions.text.width > 0) {
inputDescriptor.textWidth = inputOptions.text.width;
} else {
throw is.invalidParameterError('text.width', 'positive integer', inputOptions.text.width);
}
}
if (is.defined(inputOptions.text.height)) {
if (is.integer(inputOptions.text.height) && inputOptions.text.height > 0) {
inputDescriptor.textHeight = inputOptions.text.height;
} else {
throw is.invalidParameterError('text.height', 'positive integer', inputOptions.text.height);
}
}
if (is.defined(inputOptions.text.align)) {
if (is.string(inputOptions.text.align) && is.string(this.constructor.align[inputOptions.text.align])) {
inputDescriptor.textAlign = this.constructor.align[inputOptions.text.align];
} else {
throw is.invalidParameterError('text.align', 'valid alignment', inputOptions.text.align);
}
}
if (is.defined(inputOptions.text.justify)) {
if (is.bool(inputOptions.text.justify)) {
inputDescriptor.textJustify = inputOptions.text.justify;
} else {
throw is.invalidParameterError('text.justify', 'boolean', inputOptions.text.justify);
}
}
if (is.defined(inputOptions.text.dpi)) {
if (is.integer(inputOptions.text.dpi) && is.inRange(inputOptions.text.dpi, 1, 1000000)) {
inputDescriptor.textDpi = inputOptions.text.dpi;
} else {
throw is.invalidParameterError('text.dpi', 'integer between 1 and 1000000', inputOptions.text.dpi);
}
}
if (is.defined(inputOptions.text.rgba)) {
if (is.bool(inputOptions.text.rgba)) {
inputDescriptor.textRgba = inputOptions.text.rgba;
} else {
throw is.invalidParameterError('text.rgba', 'bool', inputOptions.text.rgba);
}
}
if (is.defined(inputOptions.text.spacing)) {
if (is.integer(inputOptions.text.spacing) && is.inRange(inputOptions.text.spacing, -1000000, 1000000)) {
inputDescriptor.textSpacing = inputOptions.text.spacing;
} else {
throw is.invalidParameterError('text.spacing', 'integer between -1000000 and 1000000', inputOptions.text.spacing);
}
}
if (is.defined(inputOptions.text.wrap)) {
if (is.string(inputOptions.text.wrap) && is.inArray(inputOptions.text.wrap, ['word', 'char', 'word-char', 'none'])) {
inputDescriptor.textWrap = inputOptions.text.wrap;
} else {
throw is.invalidParameterError('text.wrap', 'one of: word, char, word-char, none', inputOptions.text.wrap);
}
}
delete inputDescriptor.buffer;
} else {
throw new Error('Expected a valid string to create an image with text.');
}
}
} else if (is.defined(inputOptions)) {
throw new Error('Invalid input options ' + inputOptions);
}
return inputDescriptor;
}
/**
* Handle incoming Buffer chunk on Writable Stream.
* @private
* @param {Buffer} chunk
* @param {string} encoding - unused
* @param {Function} callback
*/
function _write (chunk, encoding, callback) {
/* istanbul ignore else */
if (Array.isArray(this.options.input.buffer)) {
/* istanbul ignore else */
if (is.buffer(chunk)) {
if (this.options.input.buffer.length === 0) {
this.on('finish', () => {
this.streamInFinished = true;
});
}
this.options.input.buffer.push(chunk);
callback();
} else {
callback(new Error('Non-Buffer data on Writable Stream'));
}
} else {
callback(new Error('Unexpected data on Writable Stream'));
}
}
/**
* Flattens the array of chunks accumulated in input.buffer.
* @private
*/
function _flattenBufferIn () {
if (this._isStreamInput()) {
this.options.input.buffer = Buffer.concat(this.options.input.buffer);
}
}
/**
* Are we expecting Stream-based input?
* @private
* @returns {boolean}
*/
function _isStreamInput () {
return Array.isArray(this.options.input.buffer);
}
/**
* Fast access to (uncached) image metadata without decoding any compressed pixel data.
*
* This is read from the header of the input image.
* It does not take into consideration any operations to be applied to the output image,
* such as resize or rotate.
*
* Dimensions in the response will respect the `page` and `pages` properties of the
* {@link /api-constructor#parameters|constructor parameters}.
*
* A `Promise` is returned when `callback` is not provided.
*
* - `format`: Name of decoder used to decompress image data e.g. `jpeg`, `png`, `webp`, `gif`, `svg`
* - `size`: Total size of image in bytes, for Stream and Buffer input only
* - `width`: Number of pixels wide (EXIF orientation is not taken into consideration, see example below)
* - `height`: Number of pixels high (EXIF orientation is not taken into consideration, see example below)
* - `space`: Name of colour space interpretation e.g. `srgb`, `rgb`, `cmyk`, `lab`, `b-w` [...](https://www.libvips.org/API/current/VipsImage.html#VipsInterpretation)
* - `channels`: Number of bands e.g. `3` for sRGB, `4` for CMYK
* - `depth`: Name of pixel depth format e.g. `uchar`, `char`, `ushort`, `float` [...](https://www.libvips.org/API/current/VipsImage.html#VipsBandFormat)
* - `density`: Number of pixels per inch (DPI), if present
* - `chromaSubsampling`: String containing JPEG chroma subsampling, `4:2:0` or `4:4:4` for RGB, `4:2:0:4` or `4:4:4:4` for CMYK
* - `isProgressive`: Boolean indicating whether the image is interlaced using a progressive scan
* - `pages`: Number of pages/frames contained within the image, with support for TIFF, HEIF, PDF, animated GIF and animated WebP
* - `pageHeight`: Number of pixels high each page in a multi-page image will be.
* - `paletteBitDepth`: Bit depth of palette-based image (GIF, PNG).
* - `loop`: Number of times to loop an animated image, zero refers to a continuous loop.
* - `delay`: Delay in ms between each page in an animated image, provided as an array of integers.
* - `pagePrimary`: Number of the primary page in a HEIF image
* - `levels`: Details of each level in a multi-level image provided as an array of objects, requires libvips compiled with support for OpenSlide
* - `subifds`: Number of Sub Image File Directories in an OME-TIFF image
* - `background`: Default background colour, if present, for PNG (bKGD) and GIF images, either an RGB Object or a single greyscale value
* - `compression`: The encoder used to compress an HEIF file, `av1` (AVIF) or `hevc` (HEIC)
* - `resolutionUnit`: The unit of resolution (density), either `inch` or `cm`, if present
* - `hasProfile`: Boolean indicating the presence of an embedded ICC profile
* - `hasAlpha`: Boolean indicating the presence of an alpha transparency channel
* - `orientation`: Number value of the EXIF Orientation header, if present
* - `exif`: Buffer containing raw EXIF data, if present
* - `icc`: Buffer containing raw [ICC](https://www.npmjs.com/package/icc) profile data, if present
* - `iptc`: Buffer containing raw IPTC data, if present
* - `xmp`: Buffer containing raw XMP data, if present
* - `tifftagPhotoshop`: Buffer containing raw TIFFTAG_PHOTOSHOP data, if present
* - `formatMagick`: String containing format for images loaded via *magick
* - `comments`: Array of keyword/text pairs representing PNG text blocks, if present.
*
* @example
* const metadata = await sharp(input).metadata();
*
* @example
* const image = sharp(inputJpg);
* image
* .metadata()
* .then(function(metadata) {
* return image
* .resize(Math.round(metadata.width / 2))
* .webp()
* .toBuffer();
* })
* .then(function(data) {
* // data contains a WebP image half the width and height of the original JPEG
* });
*
* @example
* // Based on EXIF rotation metadata, get the right-side-up width and height:
*
* const size = getNormalSize(await sharp(input).metadata());
*
* function getNormalSize({ width, height, orientation }) {
* return (orientation || 0) >= 5
* ? { width: height, height: width }
* : { width, height };
* }
*
* @param {Function} [callback] - called with the arguments `(err, metadata)`
* @returns {Promise<Object>|Sharp}
*/
function metadata (callback) {
const stack = Error();
if (is.fn(callback)) {
if (this._isStreamInput()) {
this.on('finish', () => {
this._flattenBufferIn();
sharp.metadata(this.options, (err, metadata) => {
if (err) {
callback(is.nativeError(err, stack));
} else {
callback(null, metadata);
}
});
});
} else {
sharp.metadata(this.options, (err, metadata) => {
if (err) {
callback(is.nativeError(err, stack));
} else {
callback(null, metadata);
}
});
}
return this;
} else {
if (this._isStreamInput()) {
return new Promise((resolve, reject) => {
const finished = () => {
this._flattenBufferIn();
sharp.metadata(this.options, (err, metadata) => {
if (err) {
reject(is.nativeError(err, stack));
} else {
resolve(metadata);
}
});
};
if (this.writableFinished) {
finished();
} else {
this.once('finish', finished);
}
});
} else {
return new Promise((resolve, reject) => {
sharp.metadata(this.options, (err, metadata) => {
if (err) {
reject(is.nativeError(err, stack));
} else {
resolve(metadata);
}
});
});
}
}
}
/**
* Access to pixel-derived image statistics for every channel in the image.
* A `Promise` is returned when `callback` is not provided.
*
* - `channels`: Array of channel statistics for each channel in the image. Each channel statistic contains
* - `min` (minimum value in the channel)
* - `max` (maximum value in the channel)
* - `sum` (sum of all values in a channel)
* - `squaresSum` (sum of squared values in a channel)
* - `mean` (mean of the values in a channel)
* - `stdev` (standard deviation for the values in a channel)
* - `minX` (x-coordinate of one of the pixel where the minimum lies)
* - `minY` (y-coordinate of one of the pixel where the minimum lies)
* - `maxX` (x-coordinate of one of the pixel where the maximum lies)
* - `maxY` (y-coordinate of one of the pixel where the maximum lies)
* - `isOpaque`: Is the image fully opaque? Will be `true` if the image has no alpha channel or if every pixel is fully opaque.
* - `entropy`: Histogram-based estimation of greyscale entropy, discarding alpha channel if any.
* - `sharpness`: Estimation of greyscale sharpness based on the standard deviation of a Laplacian convolution, discarding alpha channel if any.
* - `dominant`: Object containing most dominant sRGB colour based on a 4096-bin 3D histogram.
*
* **Note**: Statistics are derived from the original input image. Any operations performed on the image must first be
* written to a buffer in order to run `stats` on the result (see third example).
*
* @example
* const image = sharp(inputJpg);
* image
* .stats()
* .then(function(stats) {
* // stats contains the channel-wise statistics array and the isOpaque value
* });
*
* @example
* const { entropy, sharpness, dominant } = await sharp(input).stats();
* const { r, g, b } = dominant;
*
* @example
* const image = sharp(input);
* // store intermediate result
* const part = await image.extract(region).toBuffer();
* // create new instance to obtain statistics of extracted region
* const stats = await sharp(part).stats();
*
* @param {Function} [callback] - called with the arguments `(err, stats)`
* @returns {Promise<Object>}
*/
function stats (callback) {
const stack = Error();
if (is.fn(callback)) {
if (this._isStreamInput()) {
this.on('finish', () => {
this._flattenBufferIn();
sharp.stats(this.options, (err, stats) => {
if (err) {
callback(is.nativeError(err, stack));
} else {
callback(null, stats);
}
});
});
} else {
sharp.stats(this.options, (err, stats) => {
if (err) {
callback(is.nativeError(err, stack));
} else {
callback(null, stats);
}
});
}
return this;
} else {
if (this._isStreamInput()) {
return new Promise((resolve, reject) => {
this.on('finish', function () {
this._flattenBufferIn();
sharp.stats(this.options, (err, stats) => {
if (err) {
reject(is.nativeError(err, stack));
} else {
resolve(stats);
}
});
});
});
} else {
return new Promise((resolve, reject) => {
sharp.stats(this.options, (err, stats) => {
if (err) {
reject(is.nativeError(err, stack));
} else {
resolve(stats);
}
});
});
}
}
}
/**
* Decorate the Sharp prototype with input-related functions.
* @private
*/
module.exports = function (Sharp) {
Object.assign(Sharp.prototype, {
// Private
_inputOptionsFromObject,
_createInputDescriptor,
_write,
_flattenBufferIn,
_isStreamInput,
// Public
metadata,
stats
});
// Class attributes
Sharp.align = align;
};

169
node_modules/sharp/lib/is.js generated vendored Normal file
View File

@@ -0,0 +1,169 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
/**
* Is this value defined and not null?
* @private
*/
const defined = function (val) {
return typeof val !== 'undefined' && val !== null;
};
/**
* Is this value an object?
* @private
*/
const object = function (val) {
return typeof val === 'object';
};
/**
* Is this value a plain object?
* @private
*/
const plainObject = function (val) {
return Object.prototype.toString.call(val) === '[object Object]';
};
/**
* Is this value a function?
* @private
*/
const fn = function (val) {
return typeof val === 'function';
};
/**
* Is this value a boolean?
* @private
*/
const bool = function (val) {
return typeof val === 'boolean';
};
/**
* Is this value a Buffer object?
* @private
*/
const buffer = function (val) {
return val instanceof Buffer;
};
/**
* Is this value a typed array object?. E.g. Uint8Array or Uint8ClampedArray?
* @private
*/
const typedArray = function (val) {
if (defined(val)) {
switch (val.constructor) {
case Uint8Array:
case Uint8ClampedArray:
case Int8Array:
case Uint16Array:
case Int16Array:
case Uint32Array:
case Int32Array:
case Float32Array:
case Float64Array:
return true;
}
}
return false;
};
/**
* Is this value an ArrayBuffer object?
* @private
*/
const arrayBuffer = function (val) {
return val instanceof ArrayBuffer;
};
/**
* Is this value a non-empty string?
* @private
*/
const string = function (val) {
return typeof val === 'string' && val.length > 0;
};
/**
* Is this value a real number?
* @private
*/
const number = function (val) {
return typeof val === 'number' && !Number.isNaN(val);
};
/**
* Is this value an integer?
* @private
*/
const integer = function (val) {
return Number.isInteger(val);
};
/**
* Is this value within an inclusive given range?
* @private
*/
const inRange = function (val, min, max) {
return val >= min && val <= max;
};
/**
* Is this value within the elements of an array?
* @private
*/
const inArray = function (val, list) {
return list.includes(val);
};
/**
* Create an Error with a message relating to an invalid parameter.
*
* @param {string} name - parameter name.
* @param {string} expected - description of the type/value/range expected.
* @param {*} actual - the value received.
* @returns {Error} Containing the formatted message.
* @private
*/
const invalidParameterError = function (name, expected, actual) {
return new Error(
`Expected ${expected} for ${name} but received ${actual} of type ${typeof actual}`
);
};
/**
* Ensures an Error from C++ contains a JS stack.
*
* @param {Error} native - Error with message from C++.
* @param {Error} context - Error with stack from JS.
* @returns {Error} Error with message and stack.
* @private
*/
const nativeError = function (native, context) {
context.message = native.message;
return context;
};
module.exports = {
defined,
object,
plainObject,
fn,
bool,
buffer,
typedArray,
arrayBuffer,
string,
number,
integer,
inRange,
inArray,
invalidParameterError,
nativeError
};

203
node_modules/sharp/lib/libvips.js generated vendored Normal file
View File

@@ -0,0 +1,203 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const { spawnSync } = require('node:child_process');
const { createHash } = require('node:crypto');
const semverCoerce = require('semver/functions/coerce');
const semverGreaterThanOrEqualTo = require('semver/functions/gte');
const semverSatisfies = require('semver/functions/satisfies');
const detectLibc = require('detect-libc');
const { config, engines, optionalDependencies } = require('../package.json');
const minimumLibvipsVersionLabelled = process.env.npm_package_config_libvips || /* istanbul ignore next */
config.libvips;
const minimumLibvipsVersion = semverCoerce(minimumLibvipsVersionLabelled).version;
const prebuiltPlatforms = [
'darwin-arm64', 'darwin-x64',
'linux-arm', 'linux-arm64', 'linux-s390x', 'linux-x64',
'linuxmusl-arm64', 'linuxmusl-x64',
'win32-ia32', 'win32-x64'
];
const spawnSyncOptions = {
encoding: 'utf8',
shell: true
};
const log = (item) => {
if (item instanceof Error) {
console.error(`sharp: Installation error: ${item.message}`);
} else {
console.log(`sharp: ${item}`);
}
};
/* istanbul ignore next */
const runtimeLibc = () => detectLibc.isNonGlibcLinuxSync() ? detectLibc.familySync() : '';
const runtimePlatformArch = () => `${process.platform}${runtimeLibc()}-${process.arch}`;
/* istanbul ignore next */
const buildPlatformArch = () => {
if (isEmscripten()) {
return 'wasm32';
}
/* eslint camelcase: ["error", { allow: ["^npm_config_"] }] */
const { npm_config_arch, npm_config_platform, npm_config_libc } = process.env;
const libc = typeof npm_config_libc === 'string' ? npm_config_libc : runtimeLibc();
return `${npm_config_platform || process.platform}${libc}-${npm_config_arch || process.arch}`;
};
const buildSharpLibvipsIncludeDir = () => {
try {
return require(`@img/sharp-libvips-dev-${buildPlatformArch()}/include`);
} catch {
try {
return require('@img/sharp-libvips-dev/include');
} catch {}
}
/* istanbul ignore next */
return '';
};
const buildSharpLibvipsCPlusPlusDir = () => {
try {
return require('@img/sharp-libvips-dev/cplusplus');
} catch {}
/* istanbul ignore next */
return '';
};
const buildSharpLibvipsLibDir = () => {
try {
return require(`@img/sharp-libvips-dev-${buildPlatformArch()}/lib`);
} catch {
try {
return require(`@img/sharp-libvips-${buildPlatformArch()}/lib`);
} catch {}
}
/* istanbul ignore next */
return '';
};
const isUnsupportedNodeRuntime = () => {
/* istanbul ignore next */
if (process.release?.name === 'node' && process.versions) {
if (!semverSatisfies(process.versions.node, engines.node)) {
return { found: process.versions.node, expected: engines.node };
}
}
};
/* istanbul ignore next */
const isEmscripten = () => {
const { CC } = process.env;
return Boolean(CC && CC.endsWith('/emcc'));
};
const isRosetta = () => {
/* istanbul ignore next */
if (process.platform === 'darwin' && process.arch === 'x64') {
const translated = spawnSync('sysctl sysctl.proc_translated', spawnSyncOptions).stdout;
return (translated || '').trim() === 'sysctl.proc_translated: 1';
}
return false;
};
const sha512 = (s) => createHash('sha512').update(s).digest('hex');
const yarnLocator = () => {
try {
const identHash = sha512(`imgsharp-libvips-${buildPlatformArch()}`);
const npmVersion = semverCoerce(optionalDependencies[`@img/sharp-libvips-${buildPlatformArch()}`]).version;
return sha512(`${identHash}npm:${npmVersion}`).slice(0, 10);
} catch {}
return '';
};
/* istanbul ignore next */
const spawnRebuild = () =>
spawnSync(`node-gyp rebuild --directory=src ${isEmscripten() ? '--nodedir=emscripten' : ''}`, {
...spawnSyncOptions,
stdio: 'inherit'
}).status;
const globalLibvipsVersion = () => {
if (process.platform !== 'win32') {
const globalLibvipsVersion = spawnSync('pkg-config --modversion vips-cpp', {
...spawnSyncOptions,
env: {
...process.env,
PKG_CONFIG_PATH: pkgConfigPath()
}
}).stdout;
/* istanbul ignore next */
return (globalLibvipsVersion || '').trim();
} else {
return '';
}
};
/* istanbul ignore next */
const pkgConfigPath = () => {
if (process.platform !== 'win32') {
const brewPkgConfigPath = spawnSync(
'which brew >/dev/null 2>&1 && brew environment --plain | grep PKG_CONFIG_LIBDIR | cut -d" " -f2',
spawnSyncOptions
).stdout || '';
return [
brewPkgConfigPath.trim(),
process.env.PKG_CONFIG_PATH,
'/usr/local/lib/pkgconfig',
'/usr/lib/pkgconfig',
'/usr/local/libdata/pkgconfig',
'/usr/libdata/pkgconfig'
].filter(Boolean).join(':');
} else {
return '';
}
};
const skipSearch = (status, reason, logger) => {
if (logger) {
logger(`Detected ${reason}, skipping search for globally-installed libvips`);
}
return status;
};
const useGlobalLibvips = (logger) => {
if (Boolean(process.env.SHARP_IGNORE_GLOBAL_LIBVIPS) === true) {
return skipSearch(false, 'SHARP_IGNORE_GLOBAL_LIBVIPS', logger);
}
if (Boolean(process.env.SHARP_FORCE_GLOBAL_LIBVIPS) === true) {
return skipSearch(true, 'SHARP_FORCE_GLOBAL_LIBVIPS', logger);
}
/* istanbul ignore next */
if (isRosetta()) {
return skipSearch(false, 'Rosetta', logger);
}
const globalVipsVersion = globalLibvipsVersion();
return !!globalVipsVersion && /* istanbul ignore next */
semverGreaterThanOrEqualTo(globalVipsVersion, minimumLibvipsVersion);
};
module.exports = {
minimumLibvipsVersion,
prebuiltPlatforms,
buildPlatformArch,
buildSharpLibvipsIncludeDir,
buildSharpLibvipsCPlusPlusDir,
buildSharpLibvipsLibDir,
isUnsupportedNodeRuntime,
runtimePlatformArch,
log,
yarnLocator,
spawnRebuild,
globalLibvipsVersion,
pkgConfigPath,
useGlobalLibvips
};

958
node_modules/sharp/lib/operation.js generated vendored Normal file
View File

@@ -0,0 +1,958 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const color = require('color');
const is = require('./is');
/**
* How accurate an operation should be.
* @member
* @private
*/
const vipsPrecision = {
integer: 'integer',
float: 'float',
approximate: 'approximate'
};
/**
* Rotate the output image by either an explicit angle
* or auto-orient based on the EXIF `Orientation` tag.
*
* If an angle is provided, it is converted to a valid positive degree rotation.
* For example, `-450` will produce a 270 degree rotation.
*
* When rotating by an angle other than a multiple of 90,
* the background colour can be provided with the `background` option.
*
* If no angle is provided, it is determined from the EXIF data.
* Mirroring is supported and may infer the use of a flip operation.
*
* The use of `rotate` without an angle will remove the EXIF `Orientation` tag, if any.
*
* Only one rotation can occur per pipeline.
* Previous calls to `rotate` in the same pipeline will be ignored.
*
* Multi-page images can only be rotated by 180 degrees.
*
* Method order is important when rotating, resizing and/or extracting regions,
* for example `.rotate(x).extract(y)` will produce a different result to `.extract(y).rotate(x)`.
*
* @example
* const pipeline = sharp()
* .rotate()
* .resize(null, 200)
* .toBuffer(function (err, outputBuffer, info) {
* // outputBuffer contains 200px high JPEG image data,
* // auto-rotated using EXIF Orientation tag
* // info.width and info.height contain the dimensions of the resized image
* });
* readableStream.pipe(pipeline);
*
* @example
* const rotateThenResize = await sharp(input)
* .rotate(90)
* .resize({ width: 16, height: 8, fit: 'fill' })
* .toBuffer();
* const resizeThenRotate = await sharp(input)
* .resize({ width: 16, height: 8, fit: 'fill' })
* .rotate(90)
* .toBuffer();
*
* @param {number} [angle=auto] angle of rotation.
* @param {Object} [options] - if present, is an Object with optional attributes.
* @param {string|Object} [options.background="#000000"] parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function rotate (angle, options) {
if (this.options.useExifOrientation || this.options.angle || this.options.rotationAngle) {
this.options.debuglog('ignoring previous rotate options');
}
if (!is.defined(angle)) {
this.options.useExifOrientation = true;
} else if (is.integer(angle) && !(angle % 90)) {
this.options.angle = angle;
} else if (is.number(angle)) {
this.options.rotationAngle = angle;
if (is.object(options) && options.background) {
const backgroundColour = color(options.background);
this.options.rotationBackground = [
backgroundColour.red(),
backgroundColour.green(),
backgroundColour.blue(),
Math.round(backgroundColour.alpha() * 255)
];
}
} else {
throw is.invalidParameterError('angle', 'numeric', angle);
}
return this;
}
/**
* Mirror the image vertically (up-down) about the x-axis.
* This always occurs before rotation, if any.
*
* This operation does not work correctly with multi-page images.
*
* @example
* const output = await sharp(input).flip().toBuffer();
*
* @param {Boolean} [flip=true]
* @returns {Sharp}
*/
function flip (flip) {
this.options.flip = is.bool(flip) ? flip : true;
return this;
}
/**
* Mirror the image horizontally (left-right) about the y-axis.
* This always occurs before rotation, if any.
*
* @example
* const output = await sharp(input).flop().toBuffer();
*
* @param {Boolean} [flop=true]
* @returns {Sharp}
*/
function flop (flop) {
this.options.flop = is.bool(flop) ? flop : true;
return this;
}
/**
* Perform an affine transform on an image. This operation will always occur after resizing, extraction and rotation, if any.
*
* You must provide an array of length 4 or a 2x2 affine transformation matrix.
* By default, new pixels are filled with a black background. You can provide a background color with the `background` option.
* A particular interpolator may also be specified. Set the `interpolator` option to an attribute of the `sharp.interpolators` Object e.g. `sharp.interpolators.nohalo`.
*
* In the case of a 2x2 matrix, the transform is:
* - X = `matrix[0, 0]` \* (x + `idx`) + `matrix[0, 1]` \* (y + `idy`) + `odx`
* - Y = `matrix[1, 0]` \* (x + `idx`) + `matrix[1, 1]` \* (y + `idy`) + `ody`
*
* where:
* - x and y are the coordinates in input image.
* - X and Y are the coordinates in output image.
* - (0,0) is the upper left corner.
*
* @since 0.27.0
*
* @example
* const pipeline = sharp()
* .affine([[1, 0.3], [0.1, 0.7]], {
* background: 'white',
* interpolator: sharp.interpolators.nohalo
* })
* .toBuffer((err, outputBuffer, info) => {
* // outputBuffer contains the transformed image
* // info.width and info.height contain the new dimensions
* });
*
* inputStream
* .pipe(pipeline);
*
* @param {Array<Array<number>>|Array<number>} matrix - affine transformation matrix
* @param {Object} [options] - if present, is an Object with optional attributes.
* @param {String|Object} [options.background="#000000"] - parsed by the [color](https://www.npmjs.org/package/color) module to extract values for red, green, blue and alpha.
* @param {Number} [options.idx=0] - input horizontal offset
* @param {Number} [options.idy=0] - input vertical offset
* @param {Number} [options.odx=0] - output horizontal offset
* @param {Number} [options.ody=0] - output vertical offset
* @param {String} [options.interpolator=sharp.interpolators.bicubic] - interpolator
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function affine (matrix, options) {
const flatMatrix = [].concat(...matrix);
if (flatMatrix.length === 4 && flatMatrix.every(is.number)) {
this.options.affineMatrix = flatMatrix;
} else {
throw is.invalidParameterError('matrix', '1x4 or 2x2 array', matrix);
}
if (is.defined(options)) {
if (is.object(options)) {
this._setBackgroundColourOption('affineBackground', options.background);
if (is.defined(options.idx)) {
if (is.number(options.idx)) {
this.options.affineIdx = options.idx;
} else {
throw is.invalidParameterError('options.idx', 'number', options.idx);
}
}
if (is.defined(options.idy)) {
if (is.number(options.idy)) {
this.options.affineIdy = options.idy;
} else {
throw is.invalidParameterError('options.idy', 'number', options.idy);
}
}
if (is.defined(options.odx)) {
if (is.number(options.odx)) {
this.options.affineOdx = options.odx;
} else {
throw is.invalidParameterError('options.odx', 'number', options.odx);
}
}
if (is.defined(options.ody)) {
if (is.number(options.ody)) {
this.options.affineOdy = options.ody;
} else {
throw is.invalidParameterError('options.ody', 'number', options.ody);
}
}
if (is.defined(options.interpolator)) {
if (is.inArray(options.interpolator, Object.values(this.constructor.interpolators))) {
this.options.affineInterpolator = options.interpolator;
} else {
throw is.invalidParameterError('options.interpolator', 'valid interpolator name', options.interpolator);
}
}
} else {
throw is.invalidParameterError('options', 'object', options);
}
}
return this;
}
/**
* Sharpen the image.
*
* When used without parameters, performs a fast, mild sharpen of the output image.
*
* When a `sigma` is provided, performs a slower, more accurate sharpen of the L channel in the LAB colour space.
* Fine-grained control over the level of sharpening in "flat" (m1) and "jagged" (m2) areas is available.
*
* See {@link https://www.libvips.org/API/current/libvips-convolution.html#vips-sharpen|libvips sharpen} operation.
*
* @example
* const data = await sharp(input).sharpen().toBuffer();
*
* @example
* const data = await sharp(input).sharpen({ sigma: 2 }).toBuffer();
*
* @example
* const data = await sharp(input)
* .sharpen({
* sigma: 2,
* m1: 0,
* m2: 3,
* x1: 3,
* y2: 15,
* y3: 15,
* })
* .toBuffer();
*
* @param {Object|number} [options] - if present, is an Object with attributes
* @param {number} [options.sigma] - the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`, between 0.000001 and 10
* @param {number} [options.m1=1.0] - the level of sharpening to apply to "flat" areas, between 0 and 1000000
* @param {number} [options.m2=2.0] - the level of sharpening to apply to "jagged" areas, between 0 and 1000000
* @param {number} [options.x1=2.0] - threshold between "flat" and "jagged", between 0 and 1000000
* @param {number} [options.y2=10.0] - maximum amount of brightening, between 0 and 1000000
* @param {number} [options.y3=20.0] - maximum amount of darkening, between 0 and 1000000
* @param {number} [flat] - (deprecated) see `options.m1`.
* @param {number} [jagged] - (deprecated) see `options.m2`.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function sharpen (options, flat, jagged) {
if (!is.defined(options)) {
// No arguments: default to mild sharpen
this.options.sharpenSigma = -1;
} else if (is.bool(options)) {
// Deprecated boolean argument: apply mild sharpen?
this.options.sharpenSigma = options ? -1 : 0;
} else if (is.number(options) && is.inRange(options, 0.01, 10000)) {
// Deprecated numeric argument: specific sigma
this.options.sharpenSigma = options;
// Deprecated control over flat areas
if (is.defined(flat)) {
if (is.number(flat) && is.inRange(flat, 0, 10000)) {
this.options.sharpenM1 = flat;
} else {
throw is.invalidParameterError('flat', 'number between 0 and 10000', flat);
}
}
// Deprecated control over jagged areas
if (is.defined(jagged)) {
if (is.number(jagged) && is.inRange(jagged, 0, 10000)) {
this.options.sharpenM2 = jagged;
} else {
throw is.invalidParameterError('jagged', 'number between 0 and 10000', jagged);
}
}
} else if (is.plainObject(options)) {
if (is.number(options.sigma) && is.inRange(options.sigma, 0.000001, 10)) {
this.options.sharpenSigma = options.sigma;
} else {
throw is.invalidParameterError('options.sigma', 'number between 0.000001 and 10', options.sigma);
}
if (is.defined(options.m1)) {
if (is.number(options.m1) && is.inRange(options.m1, 0, 1000000)) {
this.options.sharpenM1 = options.m1;
} else {
throw is.invalidParameterError('options.m1', 'number between 0 and 1000000', options.m1);
}
}
if (is.defined(options.m2)) {
if (is.number(options.m2) && is.inRange(options.m2, 0, 1000000)) {
this.options.sharpenM2 = options.m2;
} else {
throw is.invalidParameterError('options.m2', 'number between 0 and 1000000', options.m2);
}
}
if (is.defined(options.x1)) {
if (is.number(options.x1) && is.inRange(options.x1, 0, 1000000)) {
this.options.sharpenX1 = options.x1;
} else {
throw is.invalidParameterError('options.x1', 'number between 0 and 1000000', options.x1);
}
}
if (is.defined(options.y2)) {
if (is.number(options.y2) && is.inRange(options.y2, 0, 1000000)) {
this.options.sharpenY2 = options.y2;
} else {
throw is.invalidParameterError('options.y2', 'number between 0 and 1000000', options.y2);
}
}
if (is.defined(options.y3)) {
if (is.number(options.y3) && is.inRange(options.y3, 0, 1000000)) {
this.options.sharpenY3 = options.y3;
} else {
throw is.invalidParameterError('options.y3', 'number between 0 and 1000000', options.y3);
}
}
} else {
throw is.invalidParameterError('sigma', 'number between 0.01 and 10000', options);
}
return this;
}
/**
* Apply median filter.
* When used without parameters the default window is 3x3.
*
* @example
* const output = await sharp(input).median().toBuffer();
*
* @example
* const output = await sharp(input).median(5).toBuffer();
*
* @param {number} [size=3] square mask size: size x size
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function median (size) {
if (!is.defined(size)) {
// No arguments: default to 3x3
this.options.medianSize = 3;
} else if (is.integer(size) && is.inRange(size, 1, 1000)) {
// Numeric argument: specific sigma
this.options.medianSize = size;
} else {
throw is.invalidParameterError('size', 'integer between 1 and 1000', size);
}
return this;
}
/**
* Blur the image.
*
* When used without parameters, performs a fast 3x3 box blur (equivalent to a box linear filter).
*
* When a `sigma` is provided, performs a slower, more accurate Gaussian blur.
*
* @example
* const boxBlurred = await sharp(input)
* .blur()
* .toBuffer();
*
* @example
* const gaussianBlurred = await sharp(input)
* .blur(5)
* .toBuffer();
*
* @param {Object|number|Boolean} [options]
* @param {number} [options.sigma] a value between 0.3 and 1000 representing the sigma of the Gaussian mask, where `sigma = 1 + radius / 2`.
* @param {string} [options.precision='integer'] How accurate the operation should be, one of: integer, float, approximate.
* @param {number} [options.minAmplitude=0.2] A value between 0.001 and 1. A smaller value will generate a larger, more accurate mask.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function blur (options) {
let sigma;
if (is.number(options)) {
sigma = options;
} else if (is.plainObject(options)) {
if (!is.number(options.sigma)) {
throw is.invalidParameterError('options.sigma', 'number between 0.3 and 1000', sigma);
}
sigma = options.sigma;
if ('precision' in options) {
if (is.string(vipsPrecision[options.precision])) {
this.options.precision = vipsPrecision[options.precision];
} else {
throw is.invalidParameterError('precision', 'one of: integer, float, approximate', options.precision);
}
}
if ('minAmplitude' in options) {
if (is.number(options.minAmplitude) && is.inRange(options.minAmplitude, 0.001, 1)) {
this.options.minAmpl = options.minAmplitude;
} else {
throw is.invalidParameterError('minAmplitude', 'number between 0.001 and 1', options.minAmplitude);
}
}
}
if (!is.defined(options)) {
// No arguments: default to mild blur
this.options.blurSigma = -1;
} else if (is.bool(options)) {
// Boolean argument: apply mild blur?
this.options.blurSigma = options ? -1 : 0;
} else if (is.number(sigma) && is.inRange(sigma, 0.3, 1000)) {
// Numeric argument: specific sigma
this.options.blurSigma = sigma;
} else {
throw is.invalidParameterError('sigma', 'number between 0.3 and 1000', sigma);
}
return this;
}
/**
* Merge alpha transparency channel, if any, with a background, then remove the alpha channel.
*
* See also {@link /api-channel#removealpha|removeAlpha}.
*
* @example
* await sharp(rgbaInput)
* .flatten({ background: '#F0A703' })
* .toBuffer();
*
* @param {Object} [options]
* @param {string|Object} [options.background={r: 0, g: 0, b: 0}] - background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black.
* @returns {Sharp}
*/
function flatten (options) {
this.options.flatten = is.bool(options) ? options : true;
if (is.object(options)) {
this._setBackgroundColourOption('flattenBackground', options.background);
}
return this;
}
/**
* Ensure the image has an alpha channel
* with all white pixel values made fully transparent.
*
* Existing alpha channel values for non-white pixels remain unchanged.
*
* This feature is experimental and the API may change.
*
* @since 0.32.1
*
* @example
* await sharp(rgbInput)
* .unflatten()
* .toBuffer();
*
* @example
* await sharp(rgbInput)
* .threshold(128, { grayscale: false }) // converter bright pixels to white
* .unflatten()
* .toBuffer();
*/
function unflatten () {
this.options.unflatten = true;
return this;
}
/**
* Apply a gamma correction by reducing the encoding (darken) pre-resize at a factor of `1/gamma`
* then increasing the encoding (brighten) post-resize at a factor of `gamma`.
* This can improve the perceived brightness of a resized image in non-linear colour spaces.
* JPEG and WebP input images will not take advantage of the shrink-on-load performance optimisation
* when applying a gamma correction.
*
* Supply a second argument to use a different output gamma value, otherwise the first value is used in both cases.
*
* @param {number} [gamma=2.2] value between 1.0 and 3.0.
* @param {number} [gammaOut] value between 1.0 and 3.0. (optional, defaults to same as `gamma`)
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function gamma (gamma, gammaOut) {
if (!is.defined(gamma)) {
// Default gamma correction of 2.2 (sRGB)
this.options.gamma = 2.2;
} else if (is.number(gamma) && is.inRange(gamma, 1, 3)) {
this.options.gamma = gamma;
} else {
throw is.invalidParameterError('gamma', 'number between 1.0 and 3.0', gamma);
}
if (!is.defined(gammaOut)) {
// Default gamma correction for output is same as input
this.options.gammaOut = this.options.gamma;
} else if (is.number(gammaOut) && is.inRange(gammaOut, 1, 3)) {
this.options.gammaOut = gammaOut;
} else {
throw is.invalidParameterError('gammaOut', 'number between 1.0 and 3.0', gammaOut);
}
return this;
}
/**
* Produce the "negative" of the image.
*
* @example
* const output = await sharp(input)
* .negate()
* .toBuffer();
*
* @example
* const output = await sharp(input)
* .negate({ alpha: false })
* .toBuffer();
*
* @param {Object} [options]
* @param {Boolean} [options.alpha=true] Whether or not to negate any alpha channel
* @returns {Sharp}
*/
function negate (options) {
this.options.negate = is.bool(options) ? options : true;
if (is.plainObject(options) && 'alpha' in options) {
if (!is.bool(options.alpha)) {
throw is.invalidParameterError('alpha', 'should be boolean value', options.alpha);
} else {
this.options.negateAlpha = options.alpha;
}
}
return this;
}
/**
* Enhance output image contrast by stretching its luminance to cover a full dynamic range.
*
* Uses a histogram-based approach, taking a default range of 1% to 99% to reduce sensitivity to noise at the extremes.
*
* Luminance values below the `lower` percentile will be underexposed by clipping to zero.
* Luminance values above the `upper` percentile will be overexposed by clipping to the max pixel value.
*
* @example
* const output = await sharp(input)
* .normalise()
* .toBuffer();
*
* @example
* const output = await sharp(input)
* .normalise({ lower: 0, upper: 100 })
* .toBuffer();
*
* @param {Object} [options]
* @param {number} [options.lower=1] - Percentile below which luminance values will be underexposed.
* @param {number} [options.upper=99] - Percentile above which luminance values will be overexposed.
* @returns {Sharp}
*/
function normalise (options) {
if (is.plainObject(options)) {
if (is.defined(options.lower)) {
if (is.number(options.lower) && is.inRange(options.lower, 0, 99)) {
this.options.normaliseLower = options.lower;
} else {
throw is.invalidParameterError('lower', 'number between 0 and 99', options.lower);
}
}
if (is.defined(options.upper)) {
if (is.number(options.upper) && is.inRange(options.upper, 1, 100)) {
this.options.normaliseUpper = options.upper;
} else {
throw is.invalidParameterError('upper', 'number between 1 and 100', options.upper);
}
}
}
if (this.options.normaliseLower >= this.options.normaliseUpper) {
throw is.invalidParameterError('range', 'lower to be less than upper',
`${this.options.normaliseLower} >= ${this.options.normaliseUpper}`);
}
this.options.normalise = true;
return this;
}
/**
* Alternative spelling of normalise.
*
* @example
* const output = await sharp(input)
* .normalize()
* .toBuffer();
*
* @param {Object} [options]
* @param {number} [options.lower=1] - Percentile below which luminance values will be underexposed.
* @param {number} [options.upper=99] - Percentile above which luminance values will be overexposed.
* @returns {Sharp}
*/
function normalize (options) {
return this.normalise(options);
}
/**
* Perform contrast limiting adaptive histogram equalization
* {@link https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#Contrast_Limited_AHE|CLAHE}.
*
* This will, in general, enhance the clarity of the image by bringing out darker details.
*
* @since 0.28.3
*
* @example
* const output = await sharp(input)
* .clahe({
* width: 3,
* height: 3,
* })
* .toBuffer();
*
* @param {Object} options
* @param {number} options.width - Integral width of the search window, in pixels.
* @param {number} options.height - Integral height of the search window, in pixels.
* @param {number} [options.maxSlope=3] - Integral level of brightening, between 0 and 100, where 0 disables contrast limiting.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function clahe (options) {
if (is.plainObject(options)) {
if (is.integer(options.width) && options.width > 0) {
this.options.claheWidth = options.width;
} else {
throw is.invalidParameterError('width', 'integer greater than zero', options.width);
}
if (is.integer(options.height) && options.height > 0) {
this.options.claheHeight = options.height;
} else {
throw is.invalidParameterError('height', 'integer greater than zero', options.height);
}
if (is.defined(options.maxSlope)) {
if (is.integer(options.maxSlope) && is.inRange(options.maxSlope, 0, 100)) {
this.options.claheMaxSlope = options.maxSlope;
} else {
throw is.invalidParameterError('maxSlope', 'integer between 0 and 100', options.maxSlope);
}
}
} else {
throw is.invalidParameterError('options', 'plain object', options);
}
return this;
}
/**
* Convolve the image with the specified kernel.
*
* @example
* sharp(input)
* .convolve({
* width: 3,
* height: 3,
* kernel: [-1, 0, 1, -2, 0, 2, -1, 0, 1]
* })
* .raw()
* .toBuffer(function(err, data, info) {
* // data contains the raw pixel data representing the convolution
* // of the input image with the horizontal Sobel operator
* });
*
* @param {Object} kernel
* @param {number} kernel.width - width of the kernel in pixels.
* @param {number} kernel.height - height of the kernel in pixels.
* @param {Array<number>} kernel.kernel - Array of length `width*height` containing the kernel values.
* @param {number} [kernel.scale=sum] - the scale of the kernel in pixels.
* @param {number} [kernel.offset=0] - the offset of the kernel in pixels.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function convolve (kernel) {
if (!is.object(kernel) || !Array.isArray(kernel.kernel) ||
!is.integer(kernel.width) || !is.integer(kernel.height) ||
!is.inRange(kernel.width, 3, 1001) || !is.inRange(kernel.height, 3, 1001) ||
kernel.height * kernel.width !== kernel.kernel.length
) {
// must pass in a kernel
throw new Error('Invalid convolution kernel');
}
// Default scale is sum of kernel values
if (!is.integer(kernel.scale)) {
kernel.scale = kernel.kernel.reduce(function (a, b) {
return a + b;
}, 0);
}
// Clip scale to a minimum value of 1
if (kernel.scale < 1) {
kernel.scale = 1;
}
if (!is.integer(kernel.offset)) {
kernel.offset = 0;
}
this.options.convKernel = kernel;
return this;
}
/**
* Any pixel value greater than or equal to the threshold value will be set to 255, otherwise it will be set to 0.
* @param {number} [threshold=128] - a value in the range 0-255 representing the level at which the threshold will be applied.
* @param {Object} [options]
* @param {Boolean} [options.greyscale=true] - convert to single channel greyscale.
* @param {Boolean} [options.grayscale=true] - alternative spelling for greyscale.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function threshold (threshold, options) {
if (!is.defined(threshold)) {
this.options.threshold = 128;
} else if (is.bool(threshold)) {
this.options.threshold = threshold ? 128 : 0;
} else if (is.integer(threshold) && is.inRange(threshold, 0, 255)) {
this.options.threshold = threshold;
} else {
throw is.invalidParameterError('threshold', 'integer between 0 and 255', threshold);
}
if (!is.object(options) || options.greyscale === true || options.grayscale === true) {
this.options.thresholdGrayscale = true;
} else {
this.options.thresholdGrayscale = false;
}
return this;
}
/**
* Perform a bitwise boolean operation with operand image.
*
* This operation creates an output image where each pixel is the result of
* the selected bitwise boolean `operation` between the corresponding pixels of the input images.
*
* @param {Buffer|string} operand - Buffer containing image data or string containing the path to an image file.
* @param {string} operator - one of `and`, `or` or `eor` to perform that bitwise operation, like the C logic operators `&`, `|` and `^` respectively.
* @param {Object} [options]
* @param {Object} [options.raw] - describes operand when using raw pixel data.
* @param {number} [options.raw.width]
* @param {number} [options.raw.height]
* @param {number} [options.raw.channels]
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function boolean (operand, operator, options) {
this.options.boolean = this._createInputDescriptor(operand, options);
if (is.string(operator) && is.inArray(operator, ['and', 'or', 'eor'])) {
this.options.booleanOp = operator;
} else {
throw is.invalidParameterError('operator', 'one of: and, or, eor', operator);
}
return this;
}
/**
* Apply the linear formula `a` * input + `b` to the image to adjust image levels.
*
* When a single number is provided, it will be used for all image channels.
* When an array of numbers is provided, the array length must match the number of channels.
*
* @example
* await sharp(input)
* .linear(0.5, 2)
* .toBuffer();
*
* @example
* await sharp(rgbInput)
* .linear(
* [0.25, 0.5, 0.75],
* [150, 100, 50]
* )
* .toBuffer();
*
* @param {(number|number[])} [a=[]] multiplier
* @param {(number|number[])} [b=[]] offset
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function linear (a, b) {
if (!is.defined(a) && is.number(b)) {
a = 1.0;
} else if (is.number(a) && !is.defined(b)) {
b = 0.0;
}
if (!is.defined(a)) {
this.options.linearA = [];
} else if (is.number(a)) {
this.options.linearA = [a];
} else if (Array.isArray(a) && a.length && a.every(is.number)) {
this.options.linearA = a;
} else {
throw is.invalidParameterError('a', 'number or array of numbers', a);
}
if (!is.defined(b)) {
this.options.linearB = [];
} else if (is.number(b)) {
this.options.linearB = [b];
} else if (Array.isArray(b) && b.length && b.every(is.number)) {
this.options.linearB = b;
} else {
throw is.invalidParameterError('b', 'number or array of numbers', b);
}
if (this.options.linearA.length !== this.options.linearB.length) {
throw new Error('Expected a and b to be arrays of the same length');
}
return this;
}
/**
* Recombine the image with the specified matrix.
*
* @since 0.21.1
*
* @example
* sharp(input)
* .recomb([
* [0.3588, 0.7044, 0.1368],
* [0.2990, 0.5870, 0.1140],
* [0.2392, 0.4696, 0.0912],
* ])
* .raw()
* .toBuffer(function(err, data, info) {
* // data contains the raw pixel data after applying the matrix
* // With this example input, a sepia filter has been applied
* });
*
* @param {Array<Array<number>>} inputMatrix - 3x3 or 4x4 Recombination matrix
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function recomb (inputMatrix) {
if (!Array.isArray(inputMatrix)) {
throw is.invalidParameterError('inputMatrix', 'array', inputMatrix);
}
if (inputMatrix.length !== 3 && inputMatrix.length !== 4) {
throw is.invalidParameterError('inputMatrix', '3x3 or 4x4 array', inputMatrix.length);
}
const recombMatrix = inputMatrix.flat().map(Number);
if (recombMatrix.length !== 9 && recombMatrix.length !== 16) {
throw is.invalidParameterError('inputMatrix', 'cardinality of 9 or 16', recombMatrix.length);
}
this.options.recombMatrix = recombMatrix;
return this;
}
/**
* Transforms the image using brightness, saturation, hue rotation, and lightness.
* Brightness and lightness both operate on luminance, with the difference being that
* brightness is multiplicative whereas lightness is additive.
*
* @since 0.22.1
*
* @example
* // increase brightness by a factor of 2
* const output = await sharp(input)
* .modulate({
* brightness: 2
* })
* .toBuffer();
*
* @example
* // hue-rotate by 180 degrees
* const output = await sharp(input)
* .modulate({
* hue: 180
* })
* .toBuffer();
*
* @example
* // increase lightness by +50
* const output = await sharp(input)
* .modulate({
* lightness: 50
* })
* .toBuffer();
*
* @example
* // decrease brightness and saturation while also hue-rotating by 90 degrees
* const output = await sharp(input)
* .modulate({
* brightness: 0.5,
* saturation: 0.5,
* hue: 90,
* })
* .toBuffer();
*
* @param {Object} [options]
* @param {number} [options.brightness] Brightness multiplier
* @param {number} [options.saturation] Saturation multiplier
* @param {number} [options.hue] Degrees for hue rotation
* @param {number} [options.lightness] Lightness addend
* @returns {Sharp}
*/
function modulate (options) {
if (!is.plainObject(options)) {
throw is.invalidParameterError('options', 'plain object', options);
}
if ('brightness' in options) {
if (is.number(options.brightness) && options.brightness >= 0) {
this.options.brightness = options.brightness;
} else {
throw is.invalidParameterError('brightness', 'number above zero', options.brightness);
}
}
if ('saturation' in options) {
if (is.number(options.saturation) && options.saturation >= 0) {
this.options.saturation = options.saturation;
} else {
throw is.invalidParameterError('saturation', 'number above zero', options.saturation);
}
}
if ('hue' in options) {
if (is.integer(options.hue)) {
this.options.hue = options.hue % 360;
} else {
throw is.invalidParameterError('hue', 'number', options.hue);
}
}
if ('lightness' in options) {
if (is.number(options.lightness)) {
this.options.lightness = options.lightness;
} else {
throw is.invalidParameterError('lightness', 'number', options.lightness);
}
}
return this;
}
/**
* Decorate the Sharp prototype with operation-related functions.
* @private
*/
module.exports = function (Sharp) {
Object.assign(Sharp.prototype, {
rotate,
flip,
flop,
affine,
sharpen,
median,
blur,
flatten,
unflatten,
gamma,
negate,
normalise,
normalize,
clahe,
convolve,
threshold,
boolean,
linear,
recomb,
modulate
});
};

1587
node_modules/sharp/lib/output.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

587
node_modules/sharp/lib/resize.js generated vendored Normal file
View File

@@ -0,0 +1,587 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const is = require('./is');
/**
* Weighting to apply when using contain/cover fit.
* @member
* @private
*/
const gravity = {
center: 0,
centre: 0,
north: 1,
east: 2,
south: 3,
west: 4,
northeast: 5,
southeast: 6,
southwest: 7,
northwest: 8
};
/**
* Position to apply when using contain/cover fit.
* @member
* @private
*/
const position = {
top: 1,
right: 2,
bottom: 3,
left: 4,
'right top': 5,
'right bottom': 6,
'left bottom': 7,
'left top': 8
};
/**
* How to extend the image.
* @member
* @private
*/
const extendWith = {
background: 'background',
copy: 'copy',
repeat: 'repeat',
mirror: 'mirror'
};
/**
* Strategies for automagic cover behaviour.
* @member
* @private
*/
const strategy = {
entropy: 16,
attention: 17
};
/**
* Reduction kernels.
* @member
* @private
*/
const kernel = {
nearest: 'nearest',
linear: 'linear',
cubic: 'cubic',
mitchell: 'mitchell',
lanczos2: 'lanczos2',
lanczos3: 'lanczos3'
};
/**
* Methods by which an image can be resized to fit the provided dimensions.
* @member
* @private
*/
const fit = {
contain: 'contain',
cover: 'cover',
fill: 'fill',
inside: 'inside',
outside: 'outside'
};
/**
* Map external fit property to internal canvas property.
* @member
* @private
*/
const mapFitToCanvas = {
contain: 'embed',
cover: 'crop',
fill: 'ignore_aspect',
inside: 'max',
outside: 'min'
};
/**
* @private
*/
function isRotationExpected (options) {
return (options.angle % 360) !== 0 || options.useExifOrientation === true || options.rotationAngle !== 0;
}
/**
* @private
*/
function isResizeExpected (options) {
return options.width !== -1 || options.height !== -1;
}
/**
* Resize image to `width`, `height` or `width x height`.
*
* When both a `width` and `height` are provided, the possible methods by which the image should **fit** these are:
* - `cover`: (default) Preserving aspect ratio, attempt to ensure the image covers both provided dimensions by cropping/clipping to fit.
* - `contain`: Preserving aspect ratio, contain within both provided dimensions using "letterboxing" where necessary.
* - `fill`: Ignore the aspect ratio of the input and stretch to both provided dimensions.
* - `inside`: Preserving aspect ratio, resize the image to be as large as possible while ensuring its dimensions are less than or equal to both those specified.
* - `outside`: Preserving aspect ratio, resize the image to be as small as possible while ensuring its dimensions are greater than or equal to both those specified.
*
* Some of these values are based on the [object-fit](https://developer.mozilla.org/en-US/docs/Web/CSS/object-fit) CSS property.
*
* <img alt="Examples of various values for the fit property when resizing" width="100%" style="aspect-ratio: 998/243" src="https://cdn.jsdelivr.net/gh/lovell/sharp@main/docs/image/api-resize-fit.svg">
*
* When using a **fit** of `cover` or `contain`, the default **position** is `centre`. Other options are:
* - `sharp.position`: `top`, `right top`, `right`, `right bottom`, `bottom`, `left bottom`, `left`, `left top`.
* - `sharp.gravity`: `north`, `northeast`, `east`, `southeast`, `south`, `southwest`, `west`, `northwest`, `center` or `centre`.
* - `sharp.strategy`: `cover` only, dynamically crop using either the `entropy` or `attention` strategy.
*
* Some of these values are based on the [object-position](https://developer.mozilla.org/en-US/docs/Web/CSS/object-position) CSS property.
*
* The strategy-based approach initially resizes so one dimension is at its target length
* then repeatedly ranks edge regions, discarding the edge with the lowest score based on the selected strategy.
* - `entropy`: focus on the region with the highest [Shannon entropy](https://en.wikipedia.org/wiki/Entropy_%28information_theory%29).
* - `attention`: focus on the region with the highest luminance frequency, colour saturation and presence of skin tones.
*
* Possible downsizing kernels are:
* - `nearest`: Use [nearest neighbour interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation).
* - `linear`: Use a [triangle filter](https://en.wikipedia.org/wiki/Triangular_function).
* - `cubic`: Use a [Catmull-Rom spline](https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline).
* - `mitchell`: Use a [Mitchell-Netravali spline](https://www.cs.utexas.edu/~fussell/courses/cs384g-fall2013/lectures/mitchell/Mitchell.pdf).
* - `lanczos2`: Use a [Lanczos kernel](https://en.wikipedia.org/wiki/Lanczos_resampling#Lanczos_kernel) with `a=2`.
* - `lanczos3`: Use a Lanczos kernel with `a=3` (the default).
*
* When upsampling, these kernels map to `nearest`, `linear` and `cubic` interpolators.
* Downsampling kernels without a matching upsampling interpolator map to `cubic`.
*
* Only one resize can occur per pipeline.
* Previous calls to `resize` in the same pipeline will be ignored.
*
* @example
* sharp(input)
* .resize({ width: 100 })
* .toBuffer()
* .then(data => {
* // 100 pixels wide, auto-scaled height
* });
*
* @example
* sharp(input)
* .resize({ height: 100 })
* .toBuffer()
* .then(data => {
* // 100 pixels high, auto-scaled width
* });
*
* @example
* sharp(input)
* .resize(200, 300, {
* kernel: sharp.kernel.nearest,
* fit: 'contain',
* position: 'right top',
* background: { r: 255, g: 255, b: 255, alpha: 0.5 }
* })
* .toFile('output.png')
* .then(() => {
* // output.png is a 200 pixels wide and 300 pixels high image
* // containing a nearest-neighbour scaled version
* // contained within the north-east corner of a semi-transparent white canvas
* });
*
* @example
* const transformer = sharp()
* .resize({
* width: 200,
* height: 200,
* fit: sharp.fit.cover,
* position: sharp.strategy.entropy
* });
* // Read image data from readableStream
* // Write 200px square auto-cropped image data to writableStream
* readableStream
* .pipe(transformer)
* .pipe(writableStream);
*
* @example
* sharp(input)
* .resize(200, 200, {
* fit: sharp.fit.inside,
* withoutEnlargement: true
* })
* .toFormat('jpeg')
* .toBuffer()
* .then(function(outputBuffer) {
* // outputBuffer contains JPEG image data
* // no wider and no higher than 200 pixels
* // and no larger than the input image
* });
*
* @example
* sharp(input)
* .resize(200, 200, {
* fit: sharp.fit.outside,
* withoutReduction: true
* })
* .toFormat('jpeg')
* .toBuffer()
* .then(function(outputBuffer) {
* // outputBuffer contains JPEG image data
* // of at least 200 pixels wide and 200 pixels high while maintaining aspect ratio
* // and no smaller than the input image
* });
*
* @example
* const scaleByHalf = await sharp(input)
* .metadata()
* .then(({ width }) => sharp(input)
* .resize(Math.round(width * 0.5))
* .toBuffer()
* );
*
* @param {number} [width] - How many pixels wide the resultant image should be. Use `null` or `undefined` to auto-scale the width to match the height.
* @param {number} [height] - How many pixels high the resultant image should be. Use `null` or `undefined` to auto-scale the height to match the width.
* @param {Object} [options]
* @param {number} [options.width] - An alternative means of specifying `width`. If both are present this takes priority.
* @param {number} [options.height] - An alternative means of specifying `height`. If both are present this takes priority.
* @param {String} [options.fit='cover'] - How the image should be resized/cropped to fit the target dimension(s), one of `cover`, `contain`, `fill`, `inside` or `outside`.
* @param {String} [options.position='centre'] - A position, gravity or strategy to use when `fit` is `cover` or `contain`.
* @param {String|Object} [options.background={r: 0, g: 0, b: 0, alpha: 1}] - background colour when `fit` is `contain`, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black without transparency.
* @param {String} [options.kernel='lanczos3'] - The kernel to use for image reduction and the inferred interpolator to use for upsampling. Use the `fastShrinkOnLoad` option to control kernel vs shrink-on-load.
* @param {Boolean} [options.withoutEnlargement=false] - Do not scale up if the width *or* height are already less than the target dimensions, equivalent to GraphicsMagick's `>` geometry option. This may result in output dimensions smaller than the target dimensions.
* @param {Boolean} [options.withoutReduction=false] - Do not scale down if the width *or* height are already greater than the target dimensions, equivalent to GraphicsMagick's `<` geometry option. This may still result in a crop to reach the target dimensions.
* @param {Boolean} [options.fastShrinkOnLoad=true] - Take greater advantage of the JPEG and WebP shrink-on-load feature, which can lead to a slight moiré pattern or round-down of an auto-scaled dimension.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function resize (widthOrOptions, height, options) {
if (isResizeExpected(this.options)) {
this.options.debuglog('ignoring previous resize options');
}
if (this.options.widthPost !== -1) {
this.options.debuglog('operation order will be: extract, resize, extract');
}
if (is.defined(widthOrOptions)) {
if (is.object(widthOrOptions) && !is.defined(options)) {
options = widthOrOptions;
} else if (is.integer(widthOrOptions) && widthOrOptions > 0) {
this.options.width = widthOrOptions;
} else {
throw is.invalidParameterError('width', 'positive integer', widthOrOptions);
}
} else {
this.options.width = -1;
}
if (is.defined(height)) {
if (is.integer(height) && height > 0) {
this.options.height = height;
} else {
throw is.invalidParameterError('height', 'positive integer', height);
}
} else {
this.options.height = -1;
}
if (is.object(options)) {
// Width
if (is.defined(options.width)) {
if (is.integer(options.width) && options.width > 0) {
this.options.width = options.width;
} else {
throw is.invalidParameterError('width', 'positive integer', options.width);
}
}
// Height
if (is.defined(options.height)) {
if (is.integer(options.height) && options.height > 0) {
this.options.height = options.height;
} else {
throw is.invalidParameterError('height', 'positive integer', options.height);
}
}
// Fit
if (is.defined(options.fit)) {
const canvas = mapFitToCanvas[options.fit];
if (is.string(canvas)) {
this.options.canvas = canvas;
} else {
throw is.invalidParameterError('fit', 'valid fit', options.fit);
}
}
// Position
if (is.defined(options.position)) {
const pos = is.integer(options.position)
? options.position
: strategy[options.position] || position[options.position] || gravity[options.position];
if (is.integer(pos) && (is.inRange(pos, 0, 8) || is.inRange(pos, 16, 17))) {
this.options.position = pos;
} else {
throw is.invalidParameterError('position', 'valid position/gravity/strategy', options.position);
}
}
// Background
this._setBackgroundColourOption('resizeBackground', options.background);
// Kernel
if (is.defined(options.kernel)) {
if (is.string(kernel[options.kernel])) {
this.options.kernel = kernel[options.kernel];
} else {
throw is.invalidParameterError('kernel', 'valid kernel name', options.kernel);
}
}
// Without enlargement
if (is.defined(options.withoutEnlargement)) {
this._setBooleanOption('withoutEnlargement', options.withoutEnlargement);
}
// Without reduction
if (is.defined(options.withoutReduction)) {
this._setBooleanOption('withoutReduction', options.withoutReduction);
}
// Shrink on load
if (is.defined(options.fastShrinkOnLoad)) {
this._setBooleanOption('fastShrinkOnLoad', options.fastShrinkOnLoad);
}
}
if (isRotationExpected(this.options) && isResizeExpected(this.options)) {
this.options.rotateBeforePreExtract = true;
}
return this;
}
/**
* Extend / pad / extrude one or more edges of the image with either
* the provided background colour or pixels derived from the image.
* This operation will always occur after resizing and extraction, if any.
*
* @example
* // Resize to 140 pixels wide, then add 10 transparent pixels
* // to the top, left and right edges and 20 to the bottom edge
* sharp(input)
* .resize(140)
* .extend({
* top: 10,
* bottom: 20,
* left: 10,
* right: 10,
* background: { r: 0, g: 0, b: 0, alpha: 0 }
* })
* ...
*
* @example
* // Add a row of 10 red pixels to the bottom
* sharp(input)
* .extend({
* bottom: 10,
* background: 'red'
* })
* ...
*
* @example
* // Extrude image by 8 pixels to the right, mirroring existing right hand edge
* sharp(input)
* .extend({
* right: 8,
* background: 'mirror'
* })
* ...
*
* @param {(number|Object)} extend - single pixel count to add to all edges or an Object with per-edge counts
* @param {number} [extend.top=0]
* @param {number} [extend.left=0]
* @param {number} [extend.bottom=0]
* @param {number} [extend.right=0]
* @param {String} [extend.extendWith='background'] - populate new pixels using this method, one of: background, copy, repeat, mirror.
* @param {String|Object} [extend.background={r: 0, g: 0, b: 0, alpha: 1}] - background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to black without transparency.
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function extend (extend) {
if (is.integer(extend) && extend > 0) {
this.options.extendTop = extend;
this.options.extendBottom = extend;
this.options.extendLeft = extend;
this.options.extendRight = extend;
} else if (is.object(extend)) {
if (is.defined(extend.top)) {
if (is.integer(extend.top) && extend.top >= 0) {
this.options.extendTop = extend.top;
} else {
throw is.invalidParameterError('top', 'positive integer', extend.top);
}
}
if (is.defined(extend.bottom)) {
if (is.integer(extend.bottom) && extend.bottom >= 0) {
this.options.extendBottom = extend.bottom;
} else {
throw is.invalidParameterError('bottom', 'positive integer', extend.bottom);
}
}
if (is.defined(extend.left)) {
if (is.integer(extend.left) && extend.left >= 0) {
this.options.extendLeft = extend.left;
} else {
throw is.invalidParameterError('left', 'positive integer', extend.left);
}
}
if (is.defined(extend.right)) {
if (is.integer(extend.right) && extend.right >= 0) {
this.options.extendRight = extend.right;
} else {
throw is.invalidParameterError('right', 'positive integer', extend.right);
}
}
this._setBackgroundColourOption('extendBackground', extend.background);
if (is.defined(extend.extendWith)) {
if (is.string(extendWith[extend.extendWith])) {
this.options.extendWith = extendWith[extend.extendWith];
} else {
throw is.invalidParameterError('extendWith', 'one of: background, copy, repeat, mirror', extend.extendWith);
}
}
} else {
throw is.invalidParameterError('extend', 'integer or object', extend);
}
return this;
}
/**
* Extract/crop a region of the image.
*
* - Use `extract` before `resize` for pre-resize extraction.
* - Use `extract` after `resize` for post-resize extraction.
* - Use `extract` twice and `resize` once for extract-then-resize-then-extract in a fixed operation order.
*
* @example
* sharp(input)
* .extract({ left: left, top: top, width: width, height: height })
* .toFile(output, function(err) {
* // Extract a region of the input image, saving in the same format.
* });
* @example
* sharp(input)
* .extract({ left: leftOffsetPre, top: topOffsetPre, width: widthPre, height: heightPre })
* .resize(width, height)
* .extract({ left: leftOffsetPost, top: topOffsetPost, width: widthPost, height: heightPost })
* .toFile(output, function(err) {
* // Extract a region, resize, then extract from the resized image
* });
*
* @param {Object} options - describes the region to extract using integral pixel values
* @param {number} options.left - zero-indexed offset from left edge
* @param {number} options.top - zero-indexed offset from top edge
* @param {number} options.width - width of region to extract
* @param {number} options.height - height of region to extract
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function extract (options) {
const suffix = isResizeExpected(this.options) || this.options.widthPre !== -1 ? 'Post' : 'Pre';
if (this.options[`width${suffix}`] !== -1) {
this.options.debuglog('ignoring previous extract options');
}
['left', 'top', 'width', 'height'].forEach(function (name) {
const value = options[name];
if (is.integer(value) && value >= 0) {
this.options[name + (name === 'left' || name === 'top' ? 'Offset' : '') + suffix] = value;
} else {
throw is.invalidParameterError(name, 'integer', value);
}
}, this);
// Ensure existing rotation occurs before pre-resize extraction
if (isRotationExpected(this.options) && !isResizeExpected(this.options)) {
if (this.options.widthPre === -1 || this.options.widthPost === -1) {
this.options.rotateBeforePreExtract = true;
}
}
return this;
}
/**
* Trim pixels from all edges that contain values similar to the given background colour, which defaults to that of the top-left pixel.
*
* Images with an alpha channel will use the combined bounding box of alpha and non-alpha channels.
*
* If the result of this operation would trim an image to nothing then no change is made.
*
* The `info` response Object will contain `trimOffsetLeft` and `trimOffsetTop` properties.
*
* @example
* // Trim pixels with a colour similar to that of the top-left pixel.
* await sharp(input)
* .trim()
* .toFile(output);
*
* @example
* // Trim pixels with the exact same colour as that of the top-left pixel.
* await sharp(input)
* .trim({
* threshold: 0
* })
* .toFile(output);
*
* @example
* // Assume input is line art and trim only pixels with a similar colour to red.
* const output = await sharp(input)
* .trim({
* background: "#FF0000",
* lineArt: true
* })
* .toBuffer();
*
* @example
* // Trim all "yellow-ish" pixels, being more lenient with the higher threshold.
* const output = await sharp(input)
* .trim({
* background: "yellow",
* threshold: 42,
* })
* .toBuffer();
*
* @param {Object} [options]
* @param {string|Object} [options.background='top-left pixel'] - Background colour, parsed by the [color](https://www.npmjs.org/package/color) module, defaults to that of the top-left pixel.
* @param {number} [options.threshold=10] - Allowed difference from the above colour, a positive number.
* @param {boolean} [options.lineArt=false] - Does the input more closely resemble line art (e.g. vector) rather than being photographic?
* @returns {Sharp}
* @throws {Error} Invalid parameters
*/
function trim (options) {
this.options.trimThreshold = 10;
if (is.defined(options)) {
if (is.object(options)) {
if (is.defined(options.background)) {
this._setBackgroundColourOption('trimBackground', options.background);
}
if (is.defined(options.threshold)) {
if (is.number(options.threshold) && options.threshold >= 0) {
this.options.trimThreshold = options.threshold;
} else {
throw is.invalidParameterError('threshold', 'positive number', options.threshold);
}
}
if (is.defined(options.lineArt)) {
this._setBooleanOption('trimLineArt', options.lineArt);
}
} else {
throw is.invalidParameterError('trim', 'object', options);
}
}
if (isRotationExpected(this.options)) {
this.options.rotateBeforePreExtract = true;
}
return this;
}
/**
* Decorate the Sharp prototype with resize-related functions.
* @private
*/
module.exports = function (Sharp) {
Object.assign(Sharp.prototype, {
resize,
extend,
extract,
trim
});
// Class attributes
Sharp.gravity = gravity;
Sharp.strategy = strategy;
Sharp.kernel = kernel;
Sharp.fit = fit;
Sharp.position = position;
};

114
node_modules/sharp/lib/sharp.js generated vendored Normal file
View File

@@ -0,0 +1,114 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
// Inspects the runtime environment and exports the relevant sharp.node binary
const { familySync, versionSync } = require('detect-libc');
const { runtimePlatformArch, isUnsupportedNodeRuntime, prebuiltPlatforms, minimumLibvipsVersion } = require('./libvips');
const runtimePlatform = runtimePlatformArch();
const paths = [
`../src/build/Release/sharp-${runtimePlatform}.node`,
'../src/build/Release/sharp-wasm32.node',
`@img/sharp-${runtimePlatform}/sharp.node`,
'@img/sharp-wasm32/sharp.node'
];
let sharp;
const errors = [];
for (const path of paths) {
try {
sharp = require(path);
break;
} catch (err) {
/* istanbul ignore next */
errors.push(err);
}
}
/* istanbul ignore next */
if (sharp) {
module.exports = sharp;
} else {
const [isLinux, isMacOs, isWindows] = ['linux', 'darwin', 'win32'].map(os => runtimePlatform.startsWith(os));
const help = [`Could not load the "sharp" module using the ${runtimePlatform} runtime`];
errors.forEach(err => {
if (err.code !== 'MODULE_NOT_FOUND') {
help.push(`${err.code}: ${err.message}`);
}
});
const messages = errors.map(err => err.message).join(' ');
help.push('Possible solutions:');
// Common error messages
if (isUnsupportedNodeRuntime()) {
const { found, expected } = isUnsupportedNodeRuntime();
help.push(
'- Please upgrade Node.js:',
` Found ${found}`,
` Requires ${expected}`
);
} else if (prebuiltPlatforms.includes(runtimePlatform)) {
const [os, cpu] = runtimePlatform.split('-');
const libc = os.endsWith('musl') ? ' --libc=musl' : '';
help.push(
'- Ensure optional dependencies can be installed:',
' npm install --include=optional sharp',
'- Ensure your package manager supports multi-platform installation:',
' See https://sharp.pixelplumbing.com/install#cross-platform',
'- Add platform-specific dependencies:',
` npm install --os=${os.replace('musl', '')}${libc} --cpu=${cpu} sharp`
);
} else {
help.push(
`- Manually install libvips >= ${minimumLibvipsVersion}`,
'- Add experimental WebAssembly-based dependencies:',
' npm install --cpu=wasm32 sharp',
' npm install @img/sharp-wasm32'
);
}
if (isLinux && /(symbol not found|CXXABI_)/i.test(messages)) {
try {
const { config } = require(`@img/sharp-libvips-${runtimePlatform}/package`);
const libcFound = `${familySync()} ${versionSync()}`;
const libcRequires = `${config.musl ? 'musl' : 'glibc'} ${config.musl || config.glibc}`;
help.push(
'- Update your OS:',
` Found ${libcFound}`,
` Requires ${libcRequires}`
);
} catch (errEngines) {}
}
if (isLinux && /\/snap\/core[0-9]{2}/.test(messages)) {
help.push(
'- Remove the Node.js Snap, which does not support native modules',
' snap remove node'
);
}
if (isMacOs && /Incompatible library version/.test(messages)) {
help.push(
'- Update Homebrew:',
' brew update && brew upgrade vips'
);
}
if (errors.some(err => err.code === 'ERR_DLOPEN_DISABLED')) {
help.push('- Run Node.js without using the --no-addons flag');
}
// Link to installation docs
if (isWindows && /The specified procedure could not be found/.test(messages)) {
help.push(
'- Using the canvas package on Windows?',
' See https://sharp.pixelplumbing.com/install#canvas-and-windows',
'- Check for outdated versions of sharp in the dependency tree:',
' npm ls sharp'
);
}
help.push(
'- Consult the installation documentation:',
' See https://sharp.pixelplumbing.com/install'
);
throw new Error(help.join('\n'));
}

296
node_modules/sharp/lib/utility.js generated vendored Normal file
View File

@@ -0,0 +1,296 @@
// Copyright 2013 Lovell Fuller and others.
// SPDX-License-Identifier: Apache-2.0
'use strict';
const events = require('node:events');
const detectLibc = require('detect-libc');
const is = require('./is');
const { runtimePlatformArch } = require('./libvips');
const sharp = require('./sharp');
const runtimePlatform = runtimePlatformArch();
const libvipsVersion = sharp.libvipsVersion();
/**
* An Object containing nested boolean values representing the available input and output formats/methods.
* @member
* @example
* console.log(sharp.format);
* @returns {Object}
*/
const format = sharp.format();
format.heif.output.alias = ['avif', 'heic'];
format.jpeg.output.alias = ['jpe', 'jpg'];
format.tiff.output.alias = ['tif'];
format.jp2k.output.alias = ['j2c', 'j2k', 'jp2', 'jpx'];
/**
* An Object containing the available interpolators and their proper values
* @readonly
* @enum {string}
*/
const interpolators = {
/** [Nearest neighbour interpolation](http://en.wikipedia.org/wiki/Nearest-neighbor_interpolation). Suitable for image enlargement only. */
nearest: 'nearest',
/** [Bilinear interpolation](http://en.wikipedia.org/wiki/Bilinear_interpolation). Faster than bicubic but with less smooth results. */
bilinear: 'bilinear',
/** [Bicubic interpolation](http://en.wikipedia.org/wiki/Bicubic_interpolation) (the default). */
bicubic: 'bicubic',
/** [LBB interpolation](https://github.com/libvips/libvips/blob/master/libvips/resample/lbb.cpp#L100). Prevents some "[acutance](http://en.wikipedia.org/wiki/Acutance)" but typically reduces performance by a factor of 2. */
locallyBoundedBicubic: 'lbb',
/** [Nohalo interpolation](http://eprints.soton.ac.uk/268086/). Prevents acutance but typically reduces performance by a factor of 3. */
nohalo: 'nohalo',
/** [VSQBS interpolation](https://github.com/libvips/libvips/blob/master/libvips/resample/vsqbs.cpp#L48). Prevents "staircasing" when enlarging. */
vertexSplitQuadraticBasisSpline: 'vsqbs'
};
/**
* An Object containing the version numbers of sharp, libvips
* and (when using prebuilt binaries) its dependencies.
*
* @member
* @example
* console.log(sharp.versions);
*/
let versions = {
vips: libvipsVersion.semver
};
/* istanbul ignore next */
if (!libvipsVersion.isGlobal) {
if (!libvipsVersion.isWasm) {
try {
versions = require(`@img/sharp-${runtimePlatform}/versions`);
} catch (_) {
try {
versions = require(`@img/sharp-libvips-${runtimePlatform}/versions`);
} catch (_) {}
}
} else {
try {
versions = require('@img/sharp-wasm32/versions');
} catch (_) {}
}
}
versions.sharp = require('../package.json').version;
/* istanbul ignore next */
if (versions.heif && format.heif) {
// Prebuilt binaries provide AV1
format.heif.input.fileSuffix = ['.avif'];
format.heif.output.alias = ['avif'];
}
/**
* Gets or, when options are provided, sets the limits of _libvips'_ operation cache.
* Existing entries in the cache will be trimmed after any change in limits.
* This method always returns cache statistics,
* useful for determining how much working memory is required for a particular task.
*
* @example
* const stats = sharp.cache();
* @example
* sharp.cache( { items: 200 } );
* sharp.cache( { files: 0 } );
* sharp.cache(false);
*
* @param {Object|boolean} [options=true] - Object with the following attributes, or boolean where true uses default cache settings and false removes all caching
* @param {number} [options.memory=50] - is the maximum memory in MB to use for this cache
* @param {number} [options.files=20] - is the maximum number of files to hold open
* @param {number} [options.items=100] - is the maximum number of operations to cache
* @returns {Object}
*/
function cache (options) {
if (is.bool(options)) {
if (options) {
// Default cache settings of 50MB, 20 files, 100 items
return sharp.cache(50, 20, 100);
} else {
return sharp.cache(0, 0, 0);
}
} else if (is.object(options)) {
return sharp.cache(options.memory, options.files, options.items);
} else {
return sharp.cache();
}
}
cache(true);
/**
* Gets or, when a concurrency is provided, sets
* the maximum number of threads _libvips_ should use to process _each image_.
* These are from a thread pool managed by glib,
* which helps avoid the overhead of creating new threads.
*
* This method always returns the current concurrency.
*
* The default value is the number of CPU cores,
* except when using glibc-based Linux without jemalloc,
* where the default is `1` to help reduce memory fragmentation.
*
* A value of `0` will reset this to the number of CPU cores.
*
* Some image format libraries spawn additional threads,
* e.g. libaom manages its own 4 threads when encoding AVIF images,
* and these are independent of the value set here.
*
* The maximum number of images that sharp can process in parallel
* is controlled by libuv's `UV_THREADPOOL_SIZE` environment variable,
* which defaults to 4.
*
* https://nodejs.org/api/cli.html#uv_threadpool_sizesize
*
* For example, by default, a machine with 8 CPU cores will process
* 4 images in parallel and use up to 8 threads per image,
* so there will be up to 32 concurrent threads.
*
* @example
* const threads = sharp.concurrency(); // 4
* sharp.concurrency(2); // 2
* sharp.concurrency(0); // 4
*
* @param {number} [concurrency]
* @returns {number} concurrency
*/
function concurrency (concurrency) {
return sharp.concurrency(is.integer(concurrency) ? concurrency : null);
}
/* istanbul ignore next */
if (detectLibc.familySync() === detectLibc.GLIBC && !sharp._isUsingJemalloc()) {
// Reduce default concurrency to 1 when using glibc memory allocator
sharp.concurrency(1);
} else if (detectLibc.familySync() === detectLibc.MUSL && sharp.concurrency() === 1024) {
// Reduce default concurrency when musl thread over-subscription detected
sharp.concurrency(require('node:os').availableParallelism());
}
/**
* An EventEmitter that emits a `change` event when a task is either:
* - queued, waiting for _libuv_ to provide a worker thread
* - complete
* @member
* @example
* sharp.queue.on('change', function(queueLength) {
* console.log('Queue contains ' + queueLength + ' task(s)');
* });
*/
const queue = new events.EventEmitter();
/**
* Provides access to internal task counters.
* - queue is the number of tasks this module has queued waiting for _libuv_ to provide a worker thread from its pool.
* - process is the number of resize tasks currently being processed.
*
* @example
* const counters = sharp.counters(); // { queue: 2, process: 4 }
*
* @returns {Object}
*/
function counters () {
return sharp.counters();
}
/**
* Get and set use of SIMD vector unit instructions.
* Requires libvips to have been compiled with highway support.
*
* Improves the performance of `resize`, `blur` and `sharpen` operations
* by taking advantage of the SIMD vector unit of the CPU, e.g. Intel SSE and ARM NEON.
*
* @example
* const simd = sharp.simd();
* // simd is `true` if the runtime use of highway is currently enabled
* @example
* const simd = sharp.simd(false);
* // prevent libvips from using highway at runtime
*
* @param {boolean} [simd=true]
* @returns {boolean}
*/
function simd (simd) {
return sharp.simd(is.bool(simd) ? simd : null);
}
/**
* Block libvips operations at runtime.
*
* This is in addition to the `VIPS_BLOCK_UNTRUSTED` environment variable,
* which when set will block all "untrusted" operations.
*
* @since 0.32.4
*
* @example <caption>Block all TIFF input.</caption>
* sharp.block({
* operation: ['VipsForeignLoadTiff']
* });
*
* @param {Object} options
* @param {Array<string>} options.operation - List of libvips low-level operation names to block.
*/
function block (options) {
if (is.object(options)) {
if (Array.isArray(options.operation) && options.operation.every(is.string)) {
sharp.block(options.operation, true);
} else {
throw is.invalidParameterError('operation', 'Array<string>', options.operation);
}
} else {
throw is.invalidParameterError('options', 'object', options);
}
}
/**
* Unblock libvips operations at runtime.
*
* This is useful for defining a list of allowed operations.
*
* @since 0.32.4
*
* @example <caption>Block all input except WebP from the filesystem.</caption>
* sharp.block({
* operation: ['VipsForeignLoad']
* });
* sharp.unblock({
* operation: ['VipsForeignLoadWebpFile']
* });
*
* @example <caption>Block all input except JPEG and PNG from a Buffer or Stream.</caption>
* sharp.block({
* operation: ['VipsForeignLoad']
* });
* sharp.unblock({
* operation: ['VipsForeignLoadJpegBuffer', 'VipsForeignLoadPngBuffer']
* });
*
* @param {Object} options
* @param {Array<string>} options.operation - List of libvips low-level operation names to unblock.
*/
function unblock (options) {
if (is.object(options)) {
if (Array.isArray(options.operation) && options.operation.every(is.string)) {
sharp.block(options.operation, false);
} else {
throw is.invalidParameterError('operation', 'Array<string>', options.operation);
}
} else {
throw is.invalidParameterError('options', 'object', options);
}
}
/**
* Decorate the Sharp class with utility-related functions.
* @private
*/
module.exports = function (Sharp) {
Sharp.cache = cache;
Sharp.concurrency = concurrency;
Sharp.counters = counters;
Sharp.simd = simd;
Sharp.format = format;
Sharp.interpolators = interpolators;
Sharp.versions = versions;
Sharp.queue = queue;
Sharp.block = block;
Sharp.unblock = unblock;
};