Refactor routing in App component to enhance navigation and improve error handling by integrating dynamic routes and updating the NotFound route.

This commit is contained in:
becarta
2025-05-23 12:43:00 +02:00
parent f40db0f5c9
commit a544759a3b
11127 changed files with 1647032 additions and 0 deletions

82
node_modules/micromark/dev/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,82 @@
/**
* Compile markdown to HTML.
*
* > Note: which encodings are supported depends on the engine.
* > For info on Node.js, see:
* > <https://nodejs.org/api/util.html#whatwg-supported-encodings>.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | null | undefined} encoding
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | Options | null | undefined} [encoding]
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*/
export function micromark(value: Value, encoding: Encoding | null | undefined, options?: Options | null | undefined): string;
/**
* Compile markdown to HTML.
*
* > Note: which encodings are supported depends on the engine.
* > For info on Node.js, see:
* > <https://nodejs.org/api/util.html#whatwg-supported-encodings>.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | null | undefined} encoding
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | Options | null | undefined} [encoding]
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*/
export function micromark(value: Value, options?: Options | null | undefined): string;
export { compile } from "./lib/compile.js";
export { parse } from "./lib/parse.js";
export { postprocess } from "./lib/postprocess.js";
export { preprocess } from "./lib/preprocess.js";
export type Options = import("micromark-util-types").Options;
import type { Value } from 'micromark-util-types';
import type { Encoding } from 'micromark-util-types';
//# sourceMappingURL=index.d.ts.map

1
node_modules/micromark/dev/index.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.js"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyBG,iCACQ,KAAK,YAEL,QAAQ,GAAG,IAAI,GAAG,SAAS,YAG3B,OAAO,GAAG,IAAI,GAAG,SAAS,GAExB,MAAM,CAGhB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,iCACQ,KAAK,YAEL,OAAO,GAAG,IAAI,GAAG,SAAS,GAExB,MAAM,CAGhB;;;;;sBAvCU,OAAO,sBAAsB,EAAE,OAAO;2BAJjB,sBAAsB;8BAAtB,sBAAsB"}

68
node_modules/micromark/dev/index.js generated vendored Normal file
View File

@@ -0,0 +1,68 @@
/**
* @import {Encoding, Value} from 'micromark-util-types'
*/
/**
* @typedef {import('micromark-util-types').Options} Options
*/
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
export {compile} from './lib/compile.js'
export {parse} from './lib/parse.js'
export {postprocess} from './lib/postprocess.js'
export {preprocess} from './lib/preprocess.js'
/**
* Compile markdown to HTML.
*
* > Note: which encodings are supported depends on the engine.
* > For info on Node.js, see:
* > <https://nodejs.org/api/util.html#whatwg-supported-encodings>.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | null | undefined} encoding
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @overload
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*
* @param {Value} value
* Markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | Options | null | undefined} [encoding]
* Character encoding to understand `value` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Options | null | undefined} [options]
* Configuration.
* @returns {string}
* Compiled HTML.
*/
export function micromark(value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return compile(options)(
postprocess(
parse(options)
.document()
.write(preprocess()(value, encoding, true))
)
)
}

16
node_modules/micromark/dev/lib/compile.d.ts generated vendored Normal file
View File

@@ -0,0 +1,16 @@
/**
* @param {CompileOptions | null | undefined} [options]
* @returns {Compile}
*/
export function compile(options?: CompileOptions | null | undefined): Compile;
export type Media = {
image?: boolean | undefined;
labelId?: string | undefined;
label?: string | undefined;
referenceId?: string | undefined;
destination?: string | undefined;
title?: string | undefined;
};
import type { CompileOptions } from 'micromark-util-types';
import type { Compile } from 'micromark-util-types';
//# sourceMappingURL=compile.d.ts.map

1
node_modules/micromark/dev/lib/compile.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"compile.d.ts","sourceRoot":"","sources":["compile.js"],"names":[],"mappings":"AA6DA;;;GAGG;AACH,kCAHW,cAAc,GAAG,IAAI,GAAG,SAAS,GAC/B,OAAO,CAgkCnB;;YA/lCa,OAAO,GAAG,SAAS;cACnB,MAAM,GAAG,SAAS;YAClB,MAAM,GAAG,SAAS;kBAClB,MAAM,GAAG,SAAS;kBAClB,MAAM,GAAG,SAAS;YAClB,MAAM,GAAG,SAAS;;oCAVtB,sBAAsB;6BAAtB,sBAAsB"}

1152
node_modules/micromark/dev/lib/compile.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

73
node_modules/micromark/dev/lib/constructs.d.ts generated vendored Normal file
View File

@@ -0,0 +1,73 @@
/** @satisfies {Extension['document']} */
export const document: {
42: import("micromark-util-types").Construct;
43: import("micromark-util-types").Construct;
45: import("micromark-util-types").Construct;
48: import("micromark-util-types").Construct;
49: import("micromark-util-types").Construct;
50: import("micromark-util-types").Construct;
51: import("micromark-util-types").Construct;
52: import("micromark-util-types").Construct;
53: import("micromark-util-types").Construct;
54: import("micromark-util-types").Construct;
55: import("micromark-util-types").Construct;
56: import("micromark-util-types").Construct;
57: import("micromark-util-types").Construct;
62: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['contentInitial']} */
export const contentInitial: {
91: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['flowInitial']} */
export const flowInitial: {
[-2]: import("micromark-util-types").Construct;
[-1]: import("micromark-util-types").Construct;
32: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['flow']} */
export const flow: {
35: import("micromark-util-types").Construct;
42: import("micromark-util-types").Construct;
45: import("micromark-util-types").Construct[];
60: import("micromark-util-types").Construct;
61: import("micromark-util-types").Construct;
95: import("micromark-util-types").Construct;
96: import("micromark-util-types").Construct;
126: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['string']} */
export const string: {
38: import("micromark-util-types").Construct;
92: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['text']} */
export const text: {
[-5]: import("micromark-util-types").Construct;
[-4]: import("micromark-util-types").Construct;
[-3]: import("micromark-util-types").Construct;
33: import("micromark-util-types").Construct;
38: import("micromark-util-types").Construct;
42: import("micromark-util-types").Construct;
60: import("micromark-util-types").Construct[];
91: import("micromark-util-types").Construct;
92: import("micromark-util-types").Construct[];
93: import("micromark-util-types").Construct;
95: import("micromark-util-types").Construct;
96: import("micromark-util-types").Construct;
};
export namespace insideSpan {
let _null: (import("micromark-util-types").Construct | {
resolveAll: import("micromark-util-types").Resolver;
})[];
export { _null as null };
}
export namespace attentionMarkers {
let _null_1: (42 | 95)[];
export { _null_1 as null };
}
export namespace disable {
let _null_2: never[];
export { _null_2 as null };
}
//# sourceMappingURL=constructs.d.ts.map

1
node_modules/micromark/dev/lib/constructs.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"constructs.d.ts","sourceRoot":"","sources":["constructs.js"],"names":[],"mappings":"AA6BA,yCAAyC;AACzC;;;;;;;;;;;;;;;EAeC;AAED,+CAA+C;AAC/C;;EAEC;AAED,4CAA4C;AAC5C;;;;EAIC;AAED,qCAAqC;AACrC;;;;;;;;;EASC;AAED,uCAAuC;AACvC;;;EAGC;AAED,qCAAqC;AACrC;;;;;;;;;;;;;EAaC"}

101
node_modules/micromark/dev/lib/constructs.js generated vendored Normal file
View File

@@ -0,0 +1,101 @@
/**
* @import {Extension} from 'micromark-util-types'
*/
import {
attention,
autolink,
blockQuote,
characterEscape,
characterReference,
codeFenced,
codeIndented,
codeText,
definition,
hardBreakEscape,
headingAtx,
htmlFlow,
htmlText,
labelEnd,
labelStartImage,
labelStartLink,
lineEnding,
list,
setextUnderline,
thematicBreak
} from 'micromark-core-commonmark'
import {codes} from 'micromark-util-symbol'
import {resolver as resolveText} from './initialize/text.js'
/** @satisfies {Extension['document']} */
export const document = {
[codes.asterisk]: list,
[codes.plusSign]: list,
[codes.dash]: list,
[codes.digit0]: list,
[codes.digit1]: list,
[codes.digit2]: list,
[codes.digit3]: list,
[codes.digit4]: list,
[codes.digit5]: list,
[codes.digit6]: list,
[codes.digit7]: list,
[codes.digit8]: list,
[codes.digit9]: list,
[codes.greaterThan]: blockQuote
}
/** @satisfies {Extension['contentInitial']} */
export const contentInitial = {
[codes.leftSquareBracket]: definition
}
/** @satisfies {Extension['flowInitial']} */
export const flowInitial = {
[codes.horizontalTab]: codeIndented,
[codes.virtualSpace]: codeIndented,
[codes.space]: codeIndented
}
/** @satisfies {Extension['flow']} */
export const flow = {
[codes.numberSign]: headingAtx,
[codes.asterisk]: thematicBreak,
[codes.dash]: [setextUnderline, thematicBreak],
[codes.lessThan]: htmlFlow,
[codes.equalsTo]: setextUnderline,
[codes.underscore]: thematicBreak,
[codes.graveAccent]: codeFenced,
[codes.tilde]: codeFenced
}
/** @satisfies {Extension['string']} */
export const string = {
[codes.ampersand]: characterReference,
[codes.backslash]: characterEscape
}
/** @satisfies {Extension['text']} */
export const text = {
[codes.carriageReturn]: lineEnding,
[codes.lineFeed]: lineEnding,
[codes.carriageReturnLineFeed]: lineEnding,
[codes.exclamationMark]: labelStartImage,
[codes.ampersand]: characterReference,
[codes.asterisk]: attention,
[codes.lessThan]: [autolink, htmlText],
[codes.leftSquareBracket]: labelStartLink,
[codes.backslash]: [hardBreakEscape, characterEscape],
[codes.rightSquareBracket]: labelEnd,
[codes.underscore]: attention,
[codes.graveAccent]: codeText
}
/** @satisfies {Extension['insideSpan']} */
export const insideSpan = {null: [attention, resolveText]}
/** @satisfies {Extension['attentionMarkers']} */
export const attentionMarkers = {null: [codes.asterisk, codes.underscore]}
/** @satisfies {Extension['disable']} */
export const disable = {null: []}

46
node_modules/micromark/dev/lib/create-tokenizer.d.ts generated vendored Normal file
View File

@@ -0,0 +1,46 @@
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* Parser.
* @param {InitialConstruct} initialize
* Construct.
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* Point (optional).
* @returns {TokenizeContext}
* Context.
*/
export function createTokenizer(parser: ParseContext, initialize: InitialConstruct, from?: Omit<Point, "_bufferIndex" | "_index"> | undefined): TokenizeContext;
/**
* Restore the state.
*/
export type Restore = () => undefined;
/**
* Info.
*/
export type Info = {
/**
* Restore.
*/
restore: Restore;
/**
* From.
*/
from: number;
};
/**
* Handle a successful run.
*/
export type ReturnHandle = (construct: Construct, info: Info) => undefined;
import type { ParseContext } from 'micromark-util-types';
import type { InitialConstruct } from 'micromark-util-types';
import type { Point } from 'micromark-util-types';
import type { TokenizeContext } from 'micromark-util-types';
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=create-tokenizer.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"create-tokenizer.d.ts","sourceRoot":"","sources":["create-tokenizer.js"],"names":[],"mappings":"AAgDA;;;;;;;;;;;;;;;;;GAiBG;AACH,wCATW,YAAY,cAEZ,gBAAgB,SAEhB,IAAI,CAAC,KAAK,EAAE,cAAc,GAAG,QAAQ,CAAC,GAAG,SAAS,GAEhD,eAAe,CAwhB3B;;;;4BApkBY,SAAS;;;;;;;;aAKR,OAAO;;;;UAEP,MAAM;;;;;uCAKT,SAAS,QAET,IAAI,KAEF,SAAS;kCAtBZ,sBAAsB;sCAAtB,sBAAsB;2BAAtB,sBAAsB;qCAAtB,sBAAsB;+BAAtB,sBAAsB"}

717
node_modules/micromark/dev/lib/create-tokenizer.js generated vendored Normal file
View File

@@ -0,0 +1,717 @@
/**
* @import {
* Chunk,
* Code,
* ConstructRecord,
* Construct,
* Effects,
* InitialConstruct,
* ParseContext,
* Point,
* State,
* TokenizeContext,
* Token
* } from 'micromark-util-types'
*/
/**
* @callback Restore
* Restore the state.
* @returns {undefined}
* Nothing.
*
* @typedef Info
* Info.
* @property {Restore} restore
* Restore.
* @property {number} from
* From.
*
* @callback ReturnHandle
* Handle a successful run.
* @param {Construct} construct
* Construct.
* @param {Info} info
* Info.
* @returns {undefined}
* Nothing.
*/
import createDebug from 'debug'
import {ok as assert} from 'devlop'
import {markdownLineEnding} from 'micromark-util-character'
import {push, splice} from 'micromark-util-chunked'
import {resolveAll} from 'micromark-util-resolve-all'
import {codes, values} from 'micromark-util-symbol'
const debug = createDebug('micromark')
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* Parser.
* @param {InitialConstruct} initialize
* Construct.
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* Point (optional).
* @returns {TokenizeContext}
* Context.
*/
export function createTokenizer(parser, initialize, from) {
/** @type {Point} */
let point = {
_bufferIndex: -1,
_index: 0,
line: (from && from.line) || 1,
column: (from && from.column) || 1,
offset: (from && from.offset) || 0
}
/** @type {Record<string, number>} */
const columnStart = {}
/** @type {Array<Construct>} */
const resolveAllConstructs = []
/** @type {Array<Chunk>} */
let chunks = []
/** @type {Array<Token>} */
let stack = []
/** @type {boolean | undefined} */
let consumed = true
/**
* Tools used for tokenizing.
*
* @type {Effects}
*/
const effects = {
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
consume,
enter,
exit,
interrupt: constructFactory(onsuccessfulcheck, {interrupt: true})
}
/**
* State and tools for resolving and serializing.
*
* @type {TokenizeContext}
*/
const context = {
code: codes.eof,
containerState: {},
defineSkip,
events: [],
now,
parser,
previous: codes.eof,
sliceSerialize,
sliceStream,
write
}
/**
* The state function.
*
* @type {State | undefined}
*/
let state = initialize.tokenize.call(context, effects)
/**
* Track which character we expect to be consumed, to catch bugs.
*
* @type {Code}
*/
let expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
return context
/** @type {TokenizeContext['write']} */
function write(slice) {
chunks = push(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== codes.eof) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
/** @type {TokenizeContext['sliceSerialize']} */
function sliceSerialize(token, expandTabs) {
return serializeChunks(sliceStream(token), expandTabs)
}
/** @type {TokenizeContext['sliceStream']} */
function sliceStream(token) {
return sliceChunks(chunks, token)
}
/** @type {TokenizeContext['now']} */
function now() {
// This is a hot path, so we clone manually instead of `Object.assign({}, point)`
const {_bufferIndex, _index, line, column, offset} = point
return {_bufferIndex, _index, line, column, offset}
}
/** @type {TokenizeContext['defineSkip']} */
function defineSkip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
debug('position: define skip: `%j`', point)
}
//
// State management.
//
/**
* Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
* `consume`).
* Here is where we walk through the chunks, which either include strings of
* several characters, or numerical character codes.
* The reason to do this in a loop instead of a call is so the stack can
* drain.
*
* @returns {undefined}
* Nothing.
*/
function main() {
/** @type {number} */
let chunkIndex
while (point._index < chunks.length) {
const chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
/**
* Deal with one code.
*
* @param {Code} code
* Code.
* @returns {undefined}
* Nothing.
*/
function go(code) {
assert(consumed === true, 'expected character to be consumed')
consumed = undefined
debug('main: passing `%s` to %s', code, state && state.name)
expectedCode = code
assert(typeof state === 'function', 'expected state')
state = state(code)
}
/** @type {Effects['consume']} */
function consume(code) {
assert(code === expectedCode, 'expected given code to equal expected code')
debug('consume: `%s`', code)
assert(
consumed === undefined,
'expected code to not have been consumed: this might be because `return x(code)` instead of `return x` was used'
)
assert(
code === null
? context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit'
: context.events[context.events.length - 1][0] === 'enter',
'expected last token to be open'
)
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === codes.carriageReturnLineFeed ? 2 : 1
accountForPotentialSkip()
debug('position: after eol: `%j`', point)
} else if (code !== codes.virtualSpace) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
if (
point._bufferIndex ===
// Points w/ non-negative `_bufferIndex` reference
// strings.
/** @type {string} */ (chunks[point._index]).length
) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
/** @type {Effects['enter']} */
function enter(type, fields) {
/** @type {Token} */
// @ts-expect-error Patch instead of assign required fields to help GC.
const token = fields || {}
token.type = type
token.start = now()
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
debug('enter: `%s`', type)
context.events.push(['enter', token, context])
stack.push(token)
return token
}
/** @type {Effects['exit']} */
function exit(type) {
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
const token = stack.pop()
assert(token, 'cannot close w/o open tokens')
token.end = now()
assert(type === token.type, 'expected exit token to match current token')
assert(
!(
token.start._index === token.end._index &&
token.start._bufferIndex === token.end._bufferIndex
),
'expected non-empty token (`' + type + '`)'
)
debug('exit: `%s`', token.type)
context.events.push(['exit', token, context])
return token
}
/**
* Use results.
*
* @type {ReturnHandle}
*/
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
/**
* Discard results.
*
* @type {ReturnHandle}
*/
function onsuccessfulcheck(_, info) {
info.restore()
}
/**
* Factory to attempt/check/interrupt.
*
* @param {ReturnHandle} onreturn
* Callback.
* @param {{interrupt?: boolean | undefined} | undefined} [fields]
* Fields.
*/
function constructFactory(onreturn, fields) {
return hook
/**
* Handle either an object mapping codes to constructs, a list of
* constructs, or a single construct.
*
* @param {Array<Construct> | ConstructRecord | Construct} constructs
* Constructs.
* @param {State} returnState
* State.
* @param {State | undefined} [bogusState]
* State.
* @returns {State}
* State.
*/
function hook(constructs, returnState, bogusState) {
/** @type {ReadonlyArray<Construct>} */
let listOfConstructs
/** @type {number} */
let constructIndex
/** @type {Construct} */
let currentConstruct
/** @type {Info} */
let info
return Array.isArray(constructs)
? /* c8 ignore next 1 */
handleListOfConstructs(constructs)
: 'tokenize' in constructs
? // Looks like a construct.
handleListOfConstructs([/** @type {Construct} */ (constructs)])
: handleMapOfConstructs(constructs)
/**
* Handle a list of construct.
*
* @param {ConstructRecord} map
* Constructs.
* @returns {State}
* State.
*/
function handleMapOfConstructs(map) {
return start
/** @type {State} */
function start(code) {
const left = code !== null && map[code]
const all = code !== null && map.null
const list = [
// To do: add more extension tests.
/* c8 ignore next 2 */
...(Array.isArray(left) ? left : left ? [left] : []),
...(Array.isArray(all) ? all : all ? [all] : [])
]
return handleListOfConstructs(list)(code)
}
}
/**
* Handle a list of construct.
*
* @param {ReadonlyArray<Construct>} list
* Constructs.
* @returns {State}
* State.
*/
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
if (list.length === 0) {
assert(bogusState, 'expected `bogusState` to be given')
return bogusState
}
return handleConstruct(list[constructIndex])
}
/**
* Handle a single construct.
*
* @param {Construct} construct
* Construct.
* @returns {State}
* State.
*/
function handleConstruct(construct) {
return start
/** @type {State} */
function start(code) {
// To do: not needed to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
// Always populated by defaults.
assert(
context.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
if (
construct.name &&
context.parser.constructs.disable.null.includes(construct.name)
) {
return nok(code)
}
return construct.tokenize.call(
// If we do have fields, create an object w/ `context` as its
// prototype.
// This allows a “live binding”, which is needed for `interrupt`.
fields ? Object.assign(Object.create(context), fields) : context,
effects,
ok,
nok
)(code)
}
}
/** @type {State} */
function ok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
onreturn(currentConstruct, info)
return returnState
}
/** @type {State} */
function nok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
/**
* @param {Construct} construct
* Construct.
* @param {number} from
* From.
* @returns {undefined}
* Nothing.
*/
function addResult(construct, from) {
if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
splice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
assert(
construct.partial ||
context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit',
'expected last token to end'
)
}
/**
* Store state.
*
* @returns {Info}
* Info.
*/
function store() {
const startPoint = now()
const startPrevious = context.previous
const startCurrentConstruct = context.currentConstruct
const startEventsIndex = context.events.length
const startStack = Array.from(stack)
return {from: startEventsIndex, restore}
/**
* Restore state.
*
* @returns {undefined}
* Nothing.
*/
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
debug('position: restore: `%j`', point)
}
}
/**
* Move the current point a bit forward in the line when its on a column
* skip.
*
* @returns {undefined}
* Nothing.
*/
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
/**
* Get the chunks from a slice of chunks in the range of a token.
*
* @param {ReadonlyArray<Chunk>} chunks
* Chunks.
* @param {Pick<Token, 'end' | 'start'>} token
* Token.
* @returns {Array<Chunk>}
* Chunks.
*/
function sliceChunks(chunks, token) {
const startIndex = token.start._index
const startBufferIndex = token.start._bufferIndex
const endIndex = token.end._index
const endBufferIndex = token.end._bufferIndex
/** @type {Array<Chunk>} */
let view
if (startIndex === endIndex) {
assert(endBufferIndex > -1, 'expected non-negative end buffer index')
assert(startBufferIndex > -1, 'expected non-negative start buffer index')
// @ts-expect-error `_bufferIndex` is used on string chunks.
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
const head = view[0]
if (typeof head === 'string') {
view[0] = head.slice(startBufferIndex)
/* c8 ignore next 4 -- used to be used, no longer */
} else {
assert(startBufferIndex === 0, 'expected `startBufferIndex` to be `0`')
view.shift()
}
}
if (endBufferIndex > 0) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
/**
* Get the string value of a slice of chunks.
*
* @param {ReadonlyArray<Chunk>} chunks
* Chunks.
* @param {boolean | undefined} [expandTabs=false]
* Whether to expand tabs (default: `false`).
* @returns {string}
* Result.
*/
function serializeChunks(chunks, expandTabs) {
let index = -1
/** @type {Array<string>} */
const result = []
/** @type {boolean | undefined} */
let atTab
while (++index < chunks.length) {
const chunk = chunks[index]
/** @type {string} */
let value
if (typeof chunk === 'string') {
value = chunk
} else
switch (chunk) {
case codes.carriageReturn: {
value = values.cr
break
}
case codes.lineFeed: {
value = values.lf
break
}
case codes.carriageReturnLineFeed: {
value = values.cr + values.lf
break
}
case codes.horizontalTab: {
value = expandTabs ? values.space : values.ht
break
}
case codes.virtualSpace: {
if (!expandTabs && atTab) continue
value = values.space
break
}
default: {
assert(typeof chunk === 'number', 'expected number')
// Currently only replacement character.
value = String.fromCharCode(chunk)
}
}
atTab = chunk === codes.horizontalTab
result.push(value)
}
return result.join('')
}

View File

@@ -0,0 +1,4 @@
/** @type {InitialConstruct} */
export const content: InitialConstruct;
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=content.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"content.d.ts","sourceRoot":"","sources":["content.js"],"names":[],"mappings":"AAeA,+BAA+B;AAC/B,sBADW,gBAAgB,CACyB;sCAT1C,sBAAsB"}

99
node_modules/micromark/dev/lib/initialize/content.js generated vendored Normal file
View File

@@ -0,0 +1,99 @@
/**
* @import {
* InitialConstruct,
* Initializer,
* State,
* TokenizeContext,
* Token
* } from 'micromark-util-types'
*/
import {ok as assert} from 'devlop'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes, constants, types} from 'micromark-util-symbol'
/** @type {InitialConstruct} */
export const content = {tokenize: initializeContent}
/**
* @this {TokenizeContext}
* Context.
* @type {Initializer}
* Content.
*/
function initializeContent(effects) {
const contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
/** @type {Token} */
let previous
return contentStart
/** @type {State} */
function afterContentStartConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(effects, contentStart, types.linePrefix)
}
/** @type {State} */
function paragraphInitial(code) {
assert(
code !== codes.eof && !markdownLineEnding(code),
'expected anything other than a line ending or EOF'
)
effects.enter(types.paragraph)
return lineStart(code)
}
/** @type {State} */
function lineStart(code) {
const token = effects.enter(types.chunkText, {
contentType: constants.contentTypeText,
previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
/** @type {State} */
function data(code) {
if (code === codes.eof) {
effects.exit(types.chunkText)
effects.exit(types.paragraph)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit(types.chunkText)
return lineStart
}
// Data.
effects.consume(code)
return data
}
}

View File

@@ -0,0 +1,10 @@
/** @type {InitialConstruct} */
export const document: InitialConstruct;
/**
* Construct and its state.
*/
export type StackItem = [Construct, ContainerState];
import type { InitialConstruct } from 'micromark-util-types';
import type { Construct } from 'micromark-util-types';
import type { ContainerState } from 'micromark-util-types';
//# sourceMappingURL=document.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"document.d.ts","sourceRoot":"","sources":["document.js"],"names":[],"mappings":"AAyBA,+BAA+B;AAC/B,uBADW,gBAAgB,CAC2B;;;;wBAXzC,CAAC,SAAS,EAAE,cAAc,CAAC;sCAJ9B,sBAAsB;+BAAtB,sBAAsB;oCAAtB,sBAAsB"}

445
node_modules/micromark/dev/lib/initialize/document.js generated vendored Normal file
View File

@@ -0,0 +1,445 @@
/**
* @import {
* Construct,
* ContainerState,
* InitialConstruct,
* Initializer,
* Point,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
/**
* @typedef {[Construct, ContainerState]} StackItem
* Construct and its state.
*/
import {ok as assert} from 'devlop'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {splice} from 'micromark-util-chunked'
import {codes, constants, types} from 'micromark-util-symbol'
/** @type {InitialConstruct} */
export const document = {tokenize: initializeDocument}
/** @type {Construct} */
const containerConstruct = {tokenize: tokenizeContainer}
/**
* @this {TokenizeContext}
* Self.
* @type {Initializer}
* Initializer.
*/
function initializeDocument(effects) {
const self = this
/** @type {Array<StackItem>} */
const stack = []
let continued = 0
/** @type {TokenizeContext | undefined} */
let childFlow
/** @type {Token | undefined} */
let childToken
/** @type {number} */
let lineStartOffset
return start
/** @type {State} */
function start(code) {
// First we iterate through the open blocks, starting with the root
// document, and descending through last children down to the last open
// block.
// Each block imposes a condition that the line must satisfy if the block is
// to remain open.
// For example, a block quote requires a `>` character.
// A paragraph requires a non-blank line.
// In this phase we may match all or just some of the open blocks.
// But we cannot close unmatched blocks yet, because we may have a lazy
// continuation line.
if (continued < stack.length) {
const item = stack[continued]
self.containerState = item[1]
assert(
item[0].continuation,
'expected `continuation` to be defined on container construct'
)
return effects.attempt(
item[0].continuation,
documentContinue,
checkNewContainers
)(code)
}
// Done.
return checkNewContainers(code)
}
/** @type {State} */
function documentContinue(code) {
assert(
self.containerState,
'expected `containerState` to be defined after continuation'
)
continued++
// Note: this field is called `_closeFlow` but it also closes containers.
// Perhaps a good idea to rename it but its already used in the wild by
// extensions.
if (self.containerState._closeFlow) {
self.containerState._closeFlow = undefined
if (childFlow) {
closeFlow()
}
// Note: this algorithm for moving events around is similar to the
// algorithm when dealing with lazy lines in `writeToChild`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {Point | undefined} */
let point
// Find the flow chunk.
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
point = self.events[indexBeforeFlow][1].end
break
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
let index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = {...point}
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
return checkNewContainers(code)
}
return start(code)
}
/** @type {State} */
function checkNewContainers(code) {
// Next, after consuming the continuation markers for existing blocks, we
// look for new block starts (e.g. `>` for a block quote).
// If we encounter a new block start, we close any blocks unmatched in
// step 1 before creating the new block as a child of the last matched
// block.
if (continued === stack.length) {
// No need to `check` whether theres a container, of `exitContainers`
// would be moot.
// We can instead immediately `attempt` to parse one.
if (!childFlow) {
return documentContinued(code)
}
// If we have concrete content, such as block HTML or fenced code,
// we cant have containers “pierce” into them, so we can immediately
// start.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
return flowStart(code)
}
// If we do have flow, it could still be a blank line,
// but wed be interrupting it w/ a new container if theres a current
// construct.
// To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer
// needed in micromark-extension-gfm-table@1.0.6).
self.interrupt = Boolean(
childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack
)
}
// Check if there is a new container.
self.containerState = {}
return effects.check(
containerConstruct,
thereIsANewContainer,
thereIsNoNewContainer
)(code)
}
/** @type {State} */
function thereIsANewContainer(code) {
if (childFlow) closeFlow()
exitContainers(continued)
return documentContinued(code)
}
/** @type {State} */
function thereIsNoNewContainer(code) {
self.parser.lazy[self.now().line] = continued !== stack.length
lineStartOffset = self.now().offset
return flowStart(code)
}
/** @type {State} */
function documentContinued(code) {
// Try new containers.
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
/** @type {State} */
function containerContinue(code) {
assert(
self.currentConstruct,
'expected `currentConstruct` to be defined on tokenizer'
)
assert(
self.containerState,
'expected `containerState` to be defined on tokenizer'
)
continued++
stack.push([self.currentConstruct, self.containerState])
// Try another.
return documentContinued(code)
}
/** @type {State} */
function flowStart(code) {
if (code === codes.eof) {
if (childFlow) closeFlow()
exitContainers(0)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter(types.chunkFlow, {
_tokenizer: childFlow,
contentType: constants.contentTypeFlow,
previous: childToken
})
return flowContinue(code)
}
/** @type {State} */
function flowContinue(code) {
if (code === codes.eof) {
writeToChild(effects.exit(types.chunkFlow), true)
exitContainers(0)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
writeToChild(effects.exit(types.chunkFlow))
// Get ready for the next line.
continued = 0
self.interrupt = undefined
return start
}
effects.consume(code)
return flowContinue
}
/**
* @param {Token} token
* Token.
* @param {boolean | undefined} [endOfFile]
* Whether the token is at the end of the file (default: `false`).
* @returns {undefined}
* Nothing.
*/
function writeToChild(token, endOfFile) {
assert(childFlow, 'expected `childFlow` to be defined when continuing')
const stream = self.sliceStream(token)
if (endOfFile) stream.push(null)
token.previous = childToken
if (childToken) childToken.next = token
childToken = token
childFlow.defineSkip(token.start)
childFlow.write(stream)
// Alright, so we just added a lazy line:
//
// ```markdown
// > a
// b.
//
// Or:
//
// > ~~~c
// d
//
// Or:
//
// > | e |
// f
// ```
//
// The construct in the second example (fenced code) does not accept lazy
// lines, so it marked itself as done at the end of its first line, and
// then the content construct parses `d`.
// Most constructs in markdown match on the first line: if the first line
// forms a construct, a non-lazy line cant “unmake” it.
//
// The construct in the third example is potentially a GFM table, and
// those are *weird*.
// It *could* be a table, from the first line, if the following line
// matches a condition.
// In this case, that second line is lazy, which “unmakes” the first line
// and turns the whole into one content block.
//
// Weve now parsed the non-lazy and the lazy line, and can figure out
// whether the lazy line started a new flow block.
// If it did, we exit the current containers between the two flow blocks.
if (self.parser.lazy[token.start.line]) {
let index = childFlow.events.length
while (index--) {
if (
// The token starts before the line ending…
childFlow.events[index][1].start.offset < lineStartOffset &&
// …and either is not ended yet…
(!childFlow.events[index][1].end ||
// …or ends after it.
childFlow.events[index][1].end.offset > lineStartOffset)
) {
// Exit: theres still something open, which means its a lazy line
// part of something.
return
}
}
// Note: this algorithm for moving events around is similar to the
// algorithm when closing flow in `documentContinue`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {boolean | undefined} */
let seen
/** @type {Point | undefined} */
let point
// Find the previous chunk (the one before the lazy line).
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
if (seen) {
point = self.events[indexBeforeFlow][1].end
break
}
seen = true
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = {...point}
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
}
}
/**
* @param {number} size
* Size.
* @returns {undefined}
* Nothing.
*/
function exitContainers(size) {
let index = stack.length
// Exit open containers.
while (index-- > size) {
const entry = stack[index]
self.containerState = entry[1]
assert(
entry[0].exit,
'expected `exit` to be defined on container construct'
)
entry[0].exit.call(self, effects)
}
stack.length = size
}
function closeFlow() {
assert(
self.containerState,
'expected `containerState` to be defined when closing flow'
)
assert(childFlow, 'expected `childFlow` to be defined when closing it')
childFlow.write([codes.eof])
childToken = undefined
childFlow = undefined
self.containerState._closeFlow = undefined
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
* Tokenizer.
*/
function tokenizeContainer(effects, ok, nok) {
// Always populated by defaults.
assert(
this.parser.constructs.disable.null,
'expected `disable.null` to be populated'
)
return factorySpace(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
}

4
node_modules/micromark/dev/lib/initialize/flow.d.ts generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/** @type {InitialConstruct} */
export const flow: InitialConstruct;
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=flow.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"flow.d.ts","sourceRoot":"","sources":["flow.js"],"names":[],"mappings":"AAeA,+BAA+B;AAC/B,mBADW,gBAAgB,CACmB;sCAVpC,sBAAsB"}

86
node_modules/micromark/dev/lib/initialize/flow.js generated vendored Normal file
View File

@@ -0,0 +1,86 @@
/**
* @import {
* InitialConstruct,
* Initializer,
* State,
* TokenizeContext
* } from 'micromark-util-types'
*/
import {ok as assert} from 'devlop'
import {blankLine, content} from 'micromark-core-commonmark'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes, types} from 'micromark-util-symbol'
/** @type {InitialConstruct} */
export const flow = {tokenize: initializeFlow}
/**
* @this {TokenizeContext}
* Self.
* @type {Initializer}
* Initializer.
*/
function initializeFlow(effects) {
const self = this
const initial = effects.attempt(
// Try to parse a blank line.
blankLine,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
factorySpace(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
types.linePrefix
)
)
)
return initial
/** @type {State} */
function atBlankEnding(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEndingBlank)
effects.consume(code)
effects.exit(types.lineEndingBlank)
self.currentConstruct = undefined
return initial
}
/** @type {State} */
function afterConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
self.currentConstruct = undefined
return initial
}
}

8
node_modules/micromark/dev/lib/initialize/text.d.ts generated vendored Normal file
View File

@@ -0,0 +1,8 @@
export namespace resolver {
let resolveAll: Resolver;
}
export const string: InitialConstruct;
export const text: InitialConstruct;
import type { Resolver } from 'micromark-util-types';
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=text.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["text.js"],"names":[],"mappings":";;;AAeA,sCAAiD;AACjD,oCAA6C;8BARnC,sBAAsB;sCAAtB,sBAAsB"}

244
node_modules/micromark/dev/lib/initialize/text.js generated vendored Normal file
View File

@@ -0,0 +1,244 @@
/**
* @import {
* Code,
* InitialConstruct,
* Initializer,
* Resolver,
* State,
* TokenizeContext
* } from 'micromark-util-types'
*/
import {ok as assert} from 'devlop'
import {codes, constants, types} from 'micromark-util-symbol'
export const resolver = {resolveAll: createResolver()}
export const string = initializeFactory('string')
export const text = initializeFactory('text')
/**
* @param {'string' | 'text'} field
* Field.
* @returns {InitialConstruct}
* Construct.
*/
function initializeFactory(field) {
return {
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
),
tokenize: initializeText
}
/**
* @this {TokenizeContext}
* Context.
* @type {Initializer}
*/
function initializeText(effects) {
const self = this
const constructs = this.parser.constructs[field]
const text = effects.attempt(constructs, start, notText)
return start
/** @type {State} */
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
/** @type {State} */
function notText(code) {
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.data)
effects.consume(code)
return data
}
/** @type {State} */
function data(code) {
if (atBreak(code)) {
effects.exit(types.data)
return text(code)
}
// Data.
effects.consume(code)
return data
}
/**
* @param {Code} code
* Code.
* @returns {boolean}
* Whether the code is a break.
*/
function atBreak(code) {
if (code === codes.eof) {
return true
}
const list = constructs[code]
let index = -1
if (list) {
// Always populated by defaults.
assert(Array.isArray(list), 'expected `disable.null` to be populated')
while (++index < list.length) {
const item = list[index]
if (!item.previous || item.previous.call(self, self.previous)) {
return true
}
}
}
return false
}
}
}
/**
* @param {Resolver | undefined} [extraResolver]
* Resolver.
* @returns {Resolver}
* Resolver.
*/
function createResolver(extraResolver) {
return resolveAllText
/** @type {Resolver} */
function resolveAllText(events, context) {
let index = -1
/** @type {number | undefined} */
let enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === types.data) {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== types.data) {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
/**
* A rather ugly set of instructions which again looks at chunks in the input
* stream.
* The reason to do this here is that it is *much* faster to parse in reverse.
* And that we cant hook into `null` to split the line suffix before an EOF.
* To do: figure out if we can make this into a clean utility, or even in core.
* As it will be useful for GFMs literal autolink extension (and maybe even
* tables?)
*
* @type {Resolver}
*/
function resolveAllLineSuffixes(events, context) {
let eventIndex = 0 // Skip first.
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === types.lineEnding) &&
events[eventIndex - 1][1].type === types.data
) {
const data = events[eventIndex - 1][1]
const chunks = context.sliceStream(data)
let index = chunks.length
let bufferIndex = -1
let size = 0
/** @type {boolean | undefined} */
let tabs
while (index--) {
const chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === codes.space) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === codes.horizontalTab) {
tabs = true
size++
} else if (chunk === codes.virtualSpace) {
// Empty
} else {
// Replacement character, exit.
index++
break
}
}
// Allow final trailing whitespace.
if (context._contentTypeTextTrailing && eventIndex === events.length) {
size = 0
}
if (size) {
const token = {
type:
eventIndex === events.length ||
tabs ||
size < constants.hardBreakPrefixSizeMin
? types.lineSuffix
: types.hardBreakTrailing,
start: {
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex,
_index: data.start._index + index,
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size
},
end: {...data.end}
}
data.end = {...token.start}
if (data.start.offset === data.end.offset) {
Object.assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}

10
node_modules/micromark/dev/lib/parse.d.ts generated vendored Normal file
View File

@@ -0,0 +1,10 @@
/**
* @param {ParseOptions | null | undefined} [options]
* Configuration (optional).
* @returns {ParseContext}
* Parser.
*/
export function parse(options?: ParseOptions | null | undefined): ParseContext;
import type { ParseOptions } from 'micromark-util-types';
import type { ParseContext } from 'micromark-util-types';
//# sourceMappingURL=parse.d.ts.map

1
node_modules/micromark/dev/lib/parse.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"parse.d.ts","sourceRoot":"","sources":["parse.js"],"names":[],"mappings":"AAkBA;;;;;GAKG;AACH,gCALW,YAAY,GAAG,IAAI,GAAG,SAAS,GAE7B,YAAY,CAoCxB;kCAlDS,sBAAsB;kCAAtB,sBAAsB"}

58
node_modules/micromark/dev/lib/parse.js generated vendored Normal file
View File

@@ -0,0 +1,58 @@
/**
* @import {
* Create,
* FullNormalizedExtension,
* InitialConstruct,
* ParseContext,
* ParseOptions
* } from 'micromark-util-types'
*/
import {combineExtensions} from 'micromark-util-combine-extensions'
import {content} from './initialize/content.js'
import {document} from './initialize/document.js'
import {flow} from './initialize/flow.js'
import {string, text} from './initialize/text.js'
import * as defaultConstructs from './constructs.js'
import {createTokenizer} from './create-tokenizer.js'
/**
* @param {ParseOptions | null | undefined} [options]
* Configuration (optional).
* @returns {ParseContext}
* Parser.
*/
export function parse(options) {
const settings = options || {}
const constructs = /** @type {FullNormalizedExtension} */ (
combineExtensions([defaultConstructs, ...(settings.extensions || [])])
)
/** @type {ParseContext} */
const parser = {
constructs,
content: create(content),
defined: [],
document: create(document),
flow: create(flow),
lazy: {},
string: create(string),
text: create(text)
}
return parser
/**
* @param {InitialConstruct} initial
* Construct to start with.
* @returns {Create}
* Create a tokenizer.
*/
function create(initial) {
return creator
/** @type {Create} */
function creator(from) {
return createTokenizer(parser, initial, from)
}
}
}

9
node_modules/micromark/dev/lib/postprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,9 @@
/**
* @param {Array<Event>} events
* Events.
* @returns {Array<Event>}
* Events.
*/
export function postprocess(events: Array<Event>): Array<Event>;
import type { Event } from 'micromark-util-types';
//# sourceMappingURL=postprocess.d.ts.map

1
node_modules/micromark/dev/lib/postprocess.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"postprocess.d.ts","sourceRoot":"","sources":["postprocess.js"],"names":[],"mappings":"AAMA;;;;;GAKG;AACH,oCALW,KAAK,CAAC,KAAK,CAAC,GAEV,KAAK,CAAC,KAAK,CAAC,CASxB;2BAjBuB,sBAAsB"}

19
node_modules/micromark/dev/lib/postprocess.js generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/**
* @import {Event} from 'micromark-util-types'
*/
import {subtokenize} from 'micromark-util-subtokenize'
/**
* @param {Array<Event>} events
* Events.
* @returns {Array<Event>}
* Events.
*/
export function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}

13
node_modules/micromark/dev/lib/preprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,13 @@
/**
* @returns {Preprocessor}
* Preprocess a value.
*/
export function preprocess(): Preprocessor;
/**
* Preprocess a value.
*/
export type Preprocessor = (value: Value, encoding?: Encoding | null | undefined, end?: boolean | null | undefined) => Array<Chunk>;
import type { Value } from 'micromark-util-types';
import type { Encoding } from 'micromark-util-types';
import type { Chunk } from 'micromark-util-types';
//# sourceMappingURL=preprocess.d.ts.map

1
node_modules/micromark/dev/lib/preprocess.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"preprocess.d.ts","sourceRoot":"","sources":["preprocess.js"],"names":[],"mappings":"AAqBA;;;GAGG;AACH,8BAHa,YAAY,CAsHxB;;;;mCArIU,KAAK,aAEL,QAAQ,GAAG,IAAI,GAAG,SAAS,QAE3B,OAAO,GAAG,IAAI,GAAG,SAAS,KAExB,KAAK,CAAC,KAAK,CAAC;2BAZsB,sBAAsB;8BAAtB,sBAAsB;2BAAtB,sBAAsB"}

141
node_modules/micromark/dev/lib/preprocess.js generated vendored Normal file
View File

@@ -0,0 +1,141 @@
/**
* @import {Chunk, Code, Encoding, Value} from 'micromark-util-types'
*/
/**
* @callback Preprocessor
* Preprocess a value.
* @param {Value} value
* Value.
* @param {Encoding | null | undefined} [encoding]
* Encoding when `value` is a typed array (optional).
* @param {boolean | null | undefined} [end=false]
* Whether this is the last chunk (default: `false`).
* @returns {Array<Chunk>}
* Chunks.
*/
import {codes, constants} from 'micromark-util-symbol'
const search = /[\0\t\n\r]/g
/**
* @returns {Preprocessor}
* Preprocess a value.
*/
export function preprocess() {
let column = 1
let buffer = ''
/** @type {boolean | undefined} */
let start = true
/** @type {boolean | undefined} */
let atCarriageReturn
return preprocessor
/** @type {Preprocessor} */
// eslint-disable-next-line complexity
function preprocessor(value, encoding, end) {
/** @type {Array<Chunk>} */
const chunks = []
/** @type {RegExpMatchArray | null} */
let match
/** @type {number} */
let next
/** @type {number} */
let startPosition
/** @type {number} */
let endPosition
/** @type {Code} */
let code
value =
buffer +
(typeof value === 'string'
? value.toString()
: new TextDecoder(encoding || undefined).decode(value))
startPosition = 0
buffer = ''
if (start) {
// To do: `markdown-rs` actually parses BOMs (byte order mark).
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition =
match && match.index !== undefined ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn
) {
chunks.push(codes.carriageReturnLineFeed)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(codes.carriageReturn)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
switch (code) {
case codes.nul: {
chunks.push(codes.replacementCharacter)
column++
break
}
case codes.ht: {
next = Math.ceil(column / constants.tabSize) * constants.tabSize
chunks.push(codes.horizontalTab)
while (column++ < next) chunks.push(codes.virtualSpace)
break
}
case codes.lf: {
chunks.push(codes.lineFeed)
column = 1
break
}
default: {
atCarriageReturn = true
column = 1
}
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(codes.carriageReturn)
if (buffer) chunks.push(buffer)
chunks.push(codes.eof)
}
return chunks
}
}

35
node_modules/micromark/dev/stream.d.ts generated vendored Normal file
View File

@@ -0,0 +1,35 @@
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options?: Options | null | undefined): MinimalDuplex;
export type Options = import("micromark-util-types").Options;
/**
* Function called when write was successful.
*/
export type Callback = () => undefined;
/**
* Configuration for piping.
*/
export type PipeOptions = {
/**
* Whether to end the destination stream when the source stream ends.
*/
end?: boolean | null | undefined;
};
/**
* Duplex stream.
*/
export type MinimalDuplex = Omit<NodeJS.ReadableStream & NodeJS.WritableStream, "isPaused" | "pause" | "read" | "resume" | "setEncoding" | "unpipe" | "unshift" | "wrap">;
//# sourceMappingURL=stream.d.ts.map

1
node_modules/micromark/dev/stream.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"stream.d.ts","sourceRoot":"","sources":["stream.js"],"names":[],"mappings":"AA6BA;;;;;;;;;;;;;;GAcG;AACH,iCALW,OAAO,GAAG,IAAI,GAAG,SAAS,GAExB,aAAa,CAoOzB;sBAxQY,OAAO,sBAAsB,EAAE,OAAO;;;;6BAMtC,SAAS;;;;;;;;UAKR,OAAO,GAAG,IAAI,GAAG,SAAS;;;;;4BAG3B,IAAI,CAAC,MAAM,CAAC,cAAc,GAAG,MAAM,CAAC,cAAc,EAAE,UAAU,GAAG,OAAO,GAAG,MAAM,GAAG,QAAQ,GAAG,aAAa,GAAG,QAAQ,GAAG,SAAS,GAAG,MAAM,CAAC"}

270
node_modules/micromark/dev/stream.js generated vendored Normal file
View File

@@ -0,0 +1,270 @@
/**
* @import {Encoding, Value} from 'micromark-util-types'
*/
/**
* @typedef {import('micromark-util-types').Options} Options
*/
/**
* @callback Callback
* Function called when write was successful.
* @returns {undefined}
* Nothing.
*
* @typedef PipeOptions
* Configuration for piping.
* @property {boolean | null | undefined} [end]
* Whether to end the destination stream when the source stream ends.
*
* @typedef {Omit<NodeJS.ReadableStream & NodeJS.WritableStream, 'isPaused' | 'pause' | 'read' | 'resume' | 'setEncoding' | 'unpipe' | 'unshift' | 'wrap'>} MinimalDuplex
* Duplex stream.
*/
import {EventEmitter} from 'node:events'
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* Create a duplex (readable and writable) stream.
*
* Some of the work to parse markdown can be done streaming, but in the
* end buffering is required.
*
* micromark does not handle errors for you, so you must handle errors on whatever
* streams you pipe into it.
* As markdown does not know errors, `micromark` itself does not emit errors.
*
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {MinimalDuplex}
* Duplex stream.
*/
export function stream(options) {
const prep = preprocess()
const tokenize = parse(options).document().write
const comp = compile(options)
/** @type {boolean} */
let ended
const emitter = /** @type {MinimalDuplex} */ (new EventEmitter())
// @ts-expect-error: fine.
emitter.end = end
emitter.pipe = pipe
emitter.readable = true
emitter.writable = true
// @ts-expect-error: fine.
emitter.write = write
return emitter
/**
* Write a chunk into memory.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function write(chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(prep(chunk || '', encoding))
if (callback) {
callback()
}
// Signal successful write.
return true
}
/**
* End the writing.
*
* Passes all arguments as a final `write`.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @overload
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*
* @param {Callback | Value | null | undefined} [chunk]
* Slice of markdown to parse (`string` or `Uint8Array`).
* @param {Callback | Encoding | null | undefined} [encoding]
* Character encoding to understand `chunk` as when its a `Uint8Array`
* (`string`, default: `'utf8'`).
* @param {Callback | null | undefined} [callback]
* Function called when write was successful.
* @returns {boolean}
* Whether write was successful.
*/
function end(chunk, encoding, callback) {
if (typeof chunk === 'function') {
encoding = chunk
chunk = undefined
}
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
write(chunk, encoding, callback)
emitter.emit('data', comp(postprocess(tokenize(prep('', encoding, true)))))
emitter.emit('end')
ended = true
return true
}
/**
* Pipe the processor into a writable stream.
*
* Basically `Stream#pipe`, but inlined and simplified to keep the bundled
* size down.
* See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
*
* @template {NodeJS.WritableStream} Stream
* Writable stream.
* @param {Stream} destination
* Stream to pipe into.
* @param {PipeOptions | null | undefined} [options]
* Configuration.
* @returns {Stream}
* Destination stream.
*/
function pipe(destination, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `destination.end()` will be
// called when the `end` or `close` events are received.
// @ts-expect-error `_isStdio` is available on `std{err,out}`
if (!destination._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
destination.on('error', onerror)
destination.on('close', cleanup)
destination.emit('pipe', emitter)
return destination
/**
* End destination stream.
*
* @returns {undefined}
* Nothing.
*/
function onend() {
if (destination.end) {
destination.end()
}
}
/**
* Handle data.
*
* @param {string} chunk
* Data.
* @returns {undefined}
* Nothing.
*/
function ondata(chunk) {
if (destination.writable) {
destination.write(chunk)
}
}
/**
* Clean listeners.
*
* @returns {undefined}
* Nothing.
*/
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
destination.removeListener('error', onerror)
destination.removeListener('close', cleanup)
}
/**
* Close dangling pipes and handle unheard errors.
*
* @param {Error | null | undefined} [error]
* Error, if any.
* @returns {undefined}
* Nothing.
*/
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}