Refactor routing in App component to enhance navigation and improve error handling by integrating dynamic routes and updating the NotFound route.

This commit is contained in:
becarta
2025-05-23 12:43:00 +02:00
parent f40db0f5c9
commit a544759a3b
11127 changed files with 1647032 additions and 0 deletions

16
node_modules/micromark/lib/compile.d.ts generated vendored Normal file
View File

@@ -0,0 +1,16 @@
/**
* @param {CompileOptions | null | undefined} [options]
* @returns {Compile}
*/
export function compile(options?: CompileOptions | null | undefined): Compile;
export type Media = {
image?: boolean | undefined;
labelId?: string | undefined;
label?: string | undefined;
referenceId?: string | undefined;
destination?: string | undefined;
title?: string | undefined;
};
import type { CompileOptions } from 'micromark-util-types';
import type { Compile } from 'micromark-util-types';
//# sourceMappingURL=compile.d.ts.map

1
node_modules/micromark/lib/compile.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"compile.d.ts","sourceRoot":"","sources":["compile.js"],"names":[],"mappings":"AA6DA;;;GAGG;AACH,kCAHW,cAAc,GAAG,IAAI,GAAG,SAAS,GAC/B,OAAO,CAgkCnB;;YA/lCa,OAAO,GAAG,SAAS;cACnB,MAAM,GAAG,SAAS;YAClB,MAAM,GAAG,SAAS;kBAClB,MAAM,GAAG,SAAS;kBAClB,MAAM,GAAG,SAAS;YAClB,MAAM,GAAG,SAAS;;oCAVtB,sBAAsB;6BAAtB,sBAAsB"}

1060
node_modules/micromark/lib/compile.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

73
node_modules/micromark/lib/constructs.d.ts generated vendored Normal file
View File

@@ -0,0 +1,73 @@
/** @satisfies {Extension['document']} */
export const document: {
42: import("micromark-util-types").Construct;
43: import("micromark-util-types").Construct;
45: import("micromark-util-types").Construct;
48: import("micromark-util-types").Construct;
49: import("micromark-util-types").Construct;
50: import("micromark-util-types").Construct;
51: import("micromark-util-types").Construct;
52: import("micromark-util-types").Construct;
53: import("micromark-util-types").Construct;
54: import("micromark-util-types").Construct;
55: import("micromark-util-types").Construct;
56: import("micromark-util-types").Construct;
57: import("micromark-util-types").Construct;
62: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['contentInitial']} */
export const contentInitial: {
91: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['flowInitial']} */
export const flowInitial: {
[-2]: import("micromark-util-types").Construct;
[-1]: import("micromark-util-types").Construct;
32: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['flow']} */
export const flow: {
35: import("micromark-util-types").Construct;
42: import("micromark-util-types").Construct;
45: import("micromark-util-types").Construct[];
60: import("micromark-util-types").Construct;
61: import("micromark-util-types").Construct;
95: import("micromark-util-types").Construct;
96: import("micromark-util-types").Construct;
126: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['string']} */
export const string: {
38: import("micromark-util-types").Construct;
92: import("micromark-util-types").Construct;
};
/** @satisfies {Extension['text']} */
export const text: {
[-5]: import("micromark-util-types").Construct;
[-4]: import("micromark-util-types").Construct;
[-3]: import("micromark-util-types").Construct;
33: import("micromark-util-types").Construct;
38: import("micromark-util-types").Construct;
42: import("micromark-util-types").Construct;
60: import("micromark-util-types").Construct[];
91: import("micromark-util-types").Construct;
92: import("micromark-util-types").Construct[];
93: import("micromark-util-types").Construct;
95: import("micromark-util-types").Construct;
96: import("micromark-util-types").Construct;
};
export namespace insideSpan {
let _null: (import("micromark-util-types").Construct | {
resolveAll: import("micromark-util-types").Resolver;
})[];
export { _null as null };
}
export namespace attentionMarkers {
let _null_1: (42 | 95)[];
export { _null_1 as null };
}
export namespace disable {
let _null_2: never[];
export { _null_2 as null };
}
//# sourceMappingURL=constructs.d.ts.map

1
node_modules/micromark/lib/constructs.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"constructs.d.ts","sourceRoot":"","sources":["constructs.js"],"names":[],"mappings":"AA6BA,yCAAyC;AACzC;;;;;;;;;;;;;;;EAeC;AAED,+CAA+C;AAC/C;;EAEC;AAED,4CAA4C;AAC5C;;;;EAIC;AAED,qCAAqC;AACrC;;;;;;;;;EASC;AAED,uCAAuC;AACvC;;;EAGC;AAED,qCAAqC;AACrC;;;;;;;;;;;;;EAaC"}

85
node_modules/micromark/lib/constructs.js generated vendored Normal file
View File

@@ -0,0 +1,85 @@
/**
* @import {Extension} from 'micromark-util-types'
*/
import { attention, autolink, blockQuote, characterEscape, characterReference, codeFenced, codeIndented, codeText, definition, hardBreakEscape, headingAtx, htmlFlow, htmlText, labelEnd, labelStartImage, labelStartLink, lineEnding, list, setextUnderline, thematicBreak } from 'micromark-core-commonmark';
import { resolver as resolveText } from './initialize/text.js';
/** @satisfies {Extension['document']} */
export const document = {
[42]: list,
[43]: list,
[45]: list,
[48]: list,
[49]: list,
[50]: list,
[51]: list,
[52]: list,
[53]: list,
[54]: list,
[55]: list,
[56]: list,
[57]: list,
[62]: blockQuote
};
/** @satisfies {Extension['contentInitial']} */
export const contentInitial = {
[91]: definition
};
/** @satisfies {Extension['flowInitial']} */
export const flowInitial = {
[-2]: codeIndented,
[-1]: codeIndented,
[32]: codeIndented
};
/** @satisfies {Extension['flow']} */
export const flow = {
[35]: headingAtx,
[42]: thematicBreak,
[45]: [setextUnderline, thematicBreak],
[60]: htmlFlow,
[61]: setextUnderline,
[95]: thematicBreak,
[96]: codeFenced,
[126]: codeFenced
};
/** @satisfies {Extension['string']} */
export const string = {
[38]: characterReference,
[92]: characterEscape
};
/** @satisfies {Extension['text']} */
export const text = {
[-5]: lineEnding,
[-4]: lineEnding,
[-3]: lineEnding,
[33]: labelStartImage,
[38]: characterReference,
[42]: attention,
[60]: [autolink, htmlText],
[91]: labelStartLink,
[92]: [hardBreakEscape, characterEscape],
[93]: labelEnd,
[95]: attention,
[96]: codeText
};
/** @satisfies {Extension['insideSpan']} */
export const insideSpan = {
null: [attention, resolveText]
};
/** @satisfies {Extension['attentionMarkers']} */
export const attentionMarkers = {
null: [42, 95]
};
/** @satisfies {Extension['disable']} */
export const disable = {
null: []
};

46
node_modules/micromark/lib/create-tokenizer.d.ts generated vendored Normal file
View File

@@ -0,0 +1,46 @@
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* Parser.
* @param {InitialConstruct} initialize
* Construct.
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* Point (optional).
* @returns {TokenizeContext}
* Context.
*/
export function createTokenizer(parser: ParseContext, initialize: InitialConstruct, from?: Omit<Point, "_bufferIndex" | "_index"> | undefined): TokenizeContext;
/**
* Restore the state.
*/
export type Restore = () => undefined;
/**
* Info.
*/
export type Info = {
/**
* Restore.
*/
restore: Restore;
/**
* From.
*/
from: number;
};
/**
* Handle a successful run.
*/
export type ReturnHandle = (construct: Construct, info: Info) => undefined;
import type { ParseContext } from 'micromark-util-types';
import type { InitialConstruct } from 'micromark-util-types';
import type { Point } from 'micromark-util-types';
import type { TokenizeContext } from 'micromark-util-types';
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=create-tokenizer.d.ts.map

1
node_modules/micromark/lib/create-tokenizer.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"create-tokenizer.d.ts","sourceRoot":"","sources":["create-tokenizer.js"],"names":[],"mappings":"AAgDA;;;;;;;;;;;;;;;;;GAiBG;AACH,wCATW,YAAY,cAEZ,gBAAgB,SAEhB,IAAI,CAAC,KAAK,EAAE,cAAc,GAAG,QAAQ,CAAC,GAAG,SAAS,GAEhD,eAAe,CAwhB3B;;;;4BApkBY,SAAS;;;;;;;;aAKR,OAAO;;;;UAEP,MAAM;;;;;uCAKT,SAAS,QAET,IAAI,KAEF,SAAS;kCAtBZ,sBAAsB;sCAAtB,sBAAsB;2BAAtB,sBAAsB;qCAAtB,sBAAsB;+BAAtB,sBAAsB"}

611
node_modules/micromark/lib/create-tokenizer.js generated vendored Normal file
View File

@@ -0,0 +1,611 @@
/**
* @import {
* Chunk,
* Code,
* ConstructRecord,
* Construct,
* Effects,
* InitialConstruct,
* ParseContext,
* Point,
* State,
* TokenizeContext,
* Token
* } from 'micromark-util-types'
*/
/**
* @callback Restore
* Restore the state.
* @returns {undefined}
* Nothing.
*
* @typedef Info
* Info.
* @property {Restore} restore
* Restore.
* @property {number} from
* From.
*
* @callback ReturnHandle
* Handle a successful run.
* @param {Construct} construct
* Construct.
* @param {Info} info
* Info.
* @returns {undefined}
* Nothing.
*/
import { markdownLineEnding } from 'micromark-util-character';
import { push, splice } from 'micromark-util-chunked';
import { resolveAll } from 'micromark-util-resolve-all';
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* Parser.
* @param {InitialConstruct} initialize
* Construct.
* @param {Omit<Point, '_bufferIndex' | '_index'> | undefined} [from]
* Point (optional).
* @returns {TokenizeContext}
* Context.
*/
export function createTokenizer(parser, initialize, from) {
/** @type {Point} */
let point = {
_bufferIndex: -1,
_index: 0,
line: from && from.line || 1,
column: from && from.column || 1,
offset: from && from.offset || 0
};
/** @type {Record<string, number>} */
const columnStart = {};
/** @type {Array<Construct>} */
const resolveAllConstructs = [];
/** @type {Array<Chunk>} */
let chunks = [];
/** @type {Array<Token>} */
let stack = [];
/** @type {boolean | undefined} */
let consumed = true;
/**
* Tools used for tokenizing.
*
* @type {Effects}
*/
const effects = {
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
consume,
enter,
exit,
interrupt: constructFactory(onsuccessfulcheck, {
interrupt: true
})
};
/**
* State and tools for resolving and serializing.
*
* @type {TokenizeContext}
*/
const context = {
code: null,
containerState: {},
defineSkip,
events: [],
now,
parser,
previous: null,
sliceSerialize,
sliceStream,
write
};
/**
* The state function.
*
* @type {State | undefined}
*/
let state = initialize.tokenize.call(context, effects);
/**
* Track which character we expect to be consumed, to catch bugs.
*
* @type {Code}
*/
let expectedCode;
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize);
}
return context;
/** @type {TokenizeContext['write']} */
function write(slice) {
chunks = push(chunks, slice);
main();
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== null) {
return [];
}
addResult(initialize, 0);
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context);
return context.events;
}
//
// Tools.
//
/** @type {TokenizeContext['sliceSerialize']} */
function sliceSerialize(token, expandTabs) {
return serializeChunks(sliceStream(token), expandTabs);
}
/** @type {TokenizeContext['sliceStream']} */
function sliceStream(token) {
return sliceChunks(chunks, token);
}
/** @type {TokenizeContext['now']} */
function now() {
// This is a hot path, so we clone manually instead of `Object.assign({}, point)`
const {
_bufferIndex,
_index,
line,
column,
offset
} = point;
return {
_bufferIndex,
_index,
line,
column,
offset
};
}
/** @type {TokenizeContext['defineSkip']} */
function defineSkip(value) {
columnStart[value.line] = value.column;
accountForPotentialSkip();
}
//
// State management.
//
/**
* Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
* `consume`).
* Here is where we walk through the chunks, which either include strings of
* several characters, or numerical character codes.
* The reason to do this in a loop instead of a call is so the stack can
* drain.
*
* @returns {undefined}
* Nothing.
*/
function main() {
/** @type {number} */
let chunkIndex;
while (point._index < chunks.length) {
const chunk = chunks[point._index];
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index;
if (point._bufferIndex < 0) {
point._bufferIndex = 0;
}
while (point._index === chunkIndex && point._bufferIndex < chunk.length) {
go(chunk.charCodeAt(point._bufferIndex));
}
} else {
go(chunk);
}
}
}
/**
* Deal with one code.
*
* @param {Code} code
* Code.
* @returns {undefined}
* Nothing.
*/
function go(code) {
consumed = undefined;
expectedCode = code;
state = state(code);
}
/** @type {Effects['consume']} */
function consume(code) {
if (markdownLineEnding(code)) {
point.line++;
point.column = 1;
point.offset += code === -3 ? 2 : 1;
accountForPotentialSkip();
} else if (code !== -1) {
point.column++;
point.offset++;
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++;
} else {
point._bufferIndex++;
// At end of string chunk.
if (point._bufferIndex ===
// Points w/ non-negative `_bufferIndex` reference
// strings.
/** @type {string} */
chunks[point._index].length) {
point._bufferIndex = -1;
point._index++;
}
}
// Expose the previous character.
context.previous = code;
// Mark as consumed.
consumed = true;
}
/** @type {Effects['enter']} */
function enter(type, fields) {
/** @type {Token} */
// @ts-expect-error Patch instead of assign required fields to help GC.
const token = fields || {};
token.type = type;
token.start = now();
context.events.push(['enter', token, context]);
stack.push(token);
return token;
}
/** @type {Effects['exit']} */
function exit(type) {
const token = stack.pop();
token.end = now();
context.events.push(['exit', token, context]);
return token;
}
/**
* Use results.
*
* @type {ReturnHandle}
*/
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from);
}
/**
* Discard results.
*
* @type {ReturnHandle}
*/
function onsuccessfulcheck(_, info) {
info.restore();
}
/**
* Factory to attempt/check/interrupt.
*
* @param {ReturnHandle} onreturn
* Callback.
* @param {{interrupt?: boolean | undefined} | undefined} [fields]
* Fields.
*/
function constructFactory(onreturn, fields) {
return hook;
/**
* Handle either an object mapping codes to constructs, a list of
* constructs, or a single construct.
*
* @param {Array<Construct> | ConstructRecord | Construct} constructs
* Constructs.
* @param {State} returnState
* State.
* @param {State | undefined} [bogusState]
* State.
* @returns {State}
* State.
*/
function hook(constructs, returnState, bogusState) {
/** @type {ReadonlyArray<Construct>} */
let listOfConstructs;
/** @type {number} */
let constructIndex;
/** @type {Construct} */
let currentConstruct;
/** @type {Info} */
let info;
return Array.isArray(constructs) ? /* c8 ignore next 1 */
handleListOfConstructs(constructs) : 'tokenize' in constructs ?
// Looks like a construct.
handleListOfConstructs([(/** @type {Construct} */constructs)]) : handleMapOfConstructs(constructs);
/**
* Handle a list of construct.
*
* @param {ConstructRecord} map
* Constructs.
* @returns {State}
* State.
*/
function handleMapOfConstructs(map) {
return start;
/** @type {State} */
function start(code) {
const left = code !== null && map[code];
const all = code !== null && map.null;
const list = [
// To do: add more extension tests.
/* c8 ignore next 2 */
...(Array.isArray(left) ? left : left ? [left] : []), ...(Array.isArray(all) ? all : all ? [all] : [])];
return handleListOfConstructs(list)(code);
}
}
/**
* Handle a list of construct.
*
* @param {ReadonlyArray<Construct>} list
* Constructs.
* @returns {State}
* State.
*/
function handleListOfConstructs(list) {
listOfConstructs = list;
constructIndex = 0;
if (list.length === 0) {
return bogusState;
}
return handleConstruct(list[constructIndex]);
}
/**
* Handle a single construct.
*
* @param {Construct} construct
* Construct.
* @returns {State}
* State.
*/
function handleConstruct(construct) {
return start;
/** @type {State} */
function start(code) {
// To do: not needed to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store();
currentConstruct = construct;
if (!construct.partial) {
context.currentConstruct = construct;
}
// Always populated by defaults.
if (construct.name && context.parser.constructs.disable.null.includes(construct.name)) {
return nok(code);
}
return construct.tokenize.call(
// If we do have fields, create an object w/ `context` as its
// prototype.
// This allows a “live binding”, which is needed for `interrupt`.
fields ? Object.assign(Object.create(context), fields) : context, effects, ok, nok)(code);
}
}
/** @type {State} */
function ok(code) {
consumed = true;
onreturn(currentConstruct, info);
return returnState;
}
/** @type {State} */
function nok(code) {
consumed = true;
info.restore();
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex]);
}
return bogusState;
}
}
}
/**
* @param {Construct} construct
* Construct.
* @param {number} from
* From.
* @returns {undefined}
* Nothing.
*/
function addResult(construct, from) {
if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {
resolveAllConstructs.push(construct);
}
if (construct.resolve) {
splice(context.events, from, context.events.length - from, construct.resolve(context.events.slice(from), context));
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context);
}
}
/**
* Store state.
*
* @returns {Info}
* Info.
*/
function store() {
const startPoint = now();
const startPrevious = context.previous;
const startCurrentConstruct = context.currentConstruct;
const startEventsIndex = context.events.length;
const startStack = Array.from(stack);
return {
from: startEventsIndex,
restore
};
/**
* Restore state.
*
* @returns {undefined}
* Nothing.
*/
function restore() {
point = startPoint;
context.previous = startPrevious;
context.currentConstruct = startCurrentConstruct;
context.events.length = startEventsIndex;
stack = startStack;
accountForPotentialSkip();
}
}
/**
* Move the current point a bit forward in the line when its on a column
* skip.
*
* @returns {undefined}
* Nothing.
*/
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line];
point.offset += columnStart[point.line] - 1;
}
}
}
/**
* Get the chunks from a slice of chunks in the range of a token.
*
* @param {ReadonlyArray<Chunk>} chunks
* Chunks.
* @param {Pick<Token, 'end' | 'start'>} token
* Token.
* @returns {Array<Chunk>}
* Chunks.
*/
function sliceChunks(chunks, token) {
const startIndex = token.start._index;
const startBufferIndex = token.start._bufferIndex;
const endIndex = token.end._index;
const endBufferIndex = token.end._bufferIndex;
/** @type {Array<Chunk>} */
let view;
if (startIndex === endIndex) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)];
} else {
view = chunks.slice(startIndex, endIndex);
if (startBufferIndex > -1) {
const head = view[0];
if (typeof head === 'string') {
view[0] = head.slice(startBufferIndex);
/* c8 ignore next 4 -- used to be used, no longer */
} else {
view.shift();
}
}
if (endBufferIndex > 0) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view.push(chunks[endIndex].slice(0, endBufferIndex));
}
}
return view;
}
/**
* Get the string value of a slice of chunks.
*
* @param {ReadonlyArray<Chunk>} chunks
* Chunks.
* @param {boolean | undefined} [expandTabs=false]
* Whether to expand tabs (default: `false`).
* @returns {string}
* Result.
*/
function serializeChunks(chunks, expandTabs) {
let index = -1;
/** @type {Array<string>} */
const result = [];
/** @type {boolean | undefined} */
let atTab;
while (++index < chunks.length) {
const chunk = chunks[index];
/** @type {string} */
let value;
if (typeof chunk === 'string') {
value = chunk;
} else switch (chunk) {
case -5:
{
value = "\r";
break;
}
case -4:
{
value = "\n";
break;
}
case -3:
{
value = "\r" + "\n";
break;
}
case -2:
{
value = expandTabs ? " " : "\t";
break;
}
case -1:
{
if (!expandTabs && atTab) continue;
value = " ";
break;
}
default:
{
// Currently only replacement character.
value = String.fromCharCode(chunk);
}
}
atTab = chunk === -2;
result.push(value);
}
return result.join('');
}

4
node_modules/micromark/lib/initialize/content.d.ts generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/** @type {InitialConstruct} */
export const content: InitialConstruct;
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=content.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"content.d.ts","sourceRoot":"","sources":["content.js"],"names":[],"mappings":"AAeA,+BAA+B;AAC/B,sBADW,gBAAgB,CACyB;sCAT1C,sBAAsB"}

79
node_modules/micromark/lib/initialize/content.js generated vendored Normal file
View File

@@ -0,0 +1,79 @@
/**
* @import {
* InitialConstruct,
* Initializer,
* State,
* TokenizeContext,
* Token
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
/** @type {InitialConstruct} */
export const content = {
tokenize: initializeContent
};
/**
* @this {TokenizeContext}
* Context.
* @type {Initializer}
* Content.
*/
function initializeContent(effects) {
const contentStart = effects.attempt(this.parser.constructs.contentInitial, afterContentStartConstruct, paragraphInitial);
/** @type {Token} */
let previous;
return contentStart;
/** @type {State} */
function afterContentStartConstruct(code) {
if (code === null) {
effects.consume(code);
return;
}
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return factorySpace(effects, contentStart, "linePrefix");
}
/** @type {State} */
function paragraphInitial(code) {
effects.enter("paragraph");
return lineStart(code);
}
/** @type {State} */
function lineStart(code) {
const token = effects.enter("chunkText", {
contentType: "text",
previous
});
if (previous) {
previous.next = token;
}
previous = token;
return data(code);
}
/** @type {State} */
function data(code) {
if (code === null) {
effects.exit("chunkText");
effects.exit("paragraph");
effects.consume(code);
return;
}
if (markdownLineEnding(code)) {
effects.consume(code);
effects.exit("chunkText");
return lineStart;
}
// Data.
effects.consume(code);
return data;
}
}

10
node_modules/micromark/lib/initialize/document.d.ts generated vendored Normal file
View File

@@ -0,0 +1,10 @@
/** @type {InitialConstruct} */
export const document: InitialConstruct;
/**
* Construct and its state.
*/
export type StackItem = [Construct, ContainerState];
import type { InitialConstruct } from 'micromark-util-types';
import type { Construct } from 'micromark-util-types';
import type { ContainerState } from 'micromark-util-types';
//# sourceMappingURL=document.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"document.d.ts","sourceRoot":"","sources":["document.js"],"names":[],"mappings":"AAyBA,+BAA+B;AAC/B,uBADW,gBAAgB,CAC2B;;;;wBAXzC,CAAC,SAAS,EAAE,cAAc,CAAC;sCAJ9B,sBAAsB;+BAAtB,sBAAsB;oCAAtB,sBAAsB"}

362
node_modules/micromark/lib/initialize/document.js generated vendored Normal file
View File

@@ -0,0 +1,362 @@
/**
* @import {
* Construct,
* ContainerState,
* InitialConstruct,
* Initializer,
* Point,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
/**
* @typedef {[Construct, ContainerState]} StackItem
* Construct and its state.
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
import { splice } from 'micromark-util-chunked';
/** @type {InitialConstruct} */
export const document = {
tokenize: initializeDocument
};
/** @type {Construct} */
const containerConstruct = {
tokenize: tokenizeContainer
};
/**
* @this {TokenizeContext}
* Self.
* @type {Initializer}
* Initializer.
*/
function initializeDocument(effects) {
const self = this;
/** @type {Array<StackItem>} */
const stack = [];
let continued = 0;
/** @type {TokenizeContext | undefined} */
let childFlow;
/** @type {Token | undefined} */
let childToken;
/** @type {number} */
let lineStartOffset;
return start;
/** @type {State} */
function start(code) {
// First we iterate through the open blocks, starting with the root
// document, and descending through last children down to the last open
// block.
// Each block imposes a condition that the line must satisfy if the block is
// to remain open.
// For example, a block quote requires a `>` character.
// A paragraph requires a non-blank line.
// In this phase we may match all or just some of the open blocks.
// But we cannot close unmatched blocks yet, because we may have a lazy
// continuation line.
if (continued < stack.length) {
const item = stack[continued];
self.containerState = item[1];
return effects.attempt(item[0].continuation, documentContinue, checkNewContainers)(code);
}
// Done.
return checkNewContainers(code);
}
/** @type {State} */
function documentContinue(code) {
continued++;
// Note: this field is called `_closeFlow` but it also closes containers.
// Perhaps a good idea to rename it but its already used in the wild by
// extensions.
if (self.containerState._closeFlow) {
self.containerState._closeFlow = undefined;
if (childFlow) {
closeFlow();
}
// Note: this algorithm for moving events around is similar to the
// algorithm when dealing with lazy lines in `writeToChild`.
const indexBeforeExits = self.events.length;
let indexBeforeFlow = indexBeforeExits;
/** @type {Point | undefined} */
let point;
// Find the flow chunk.
while (indexBeforeFlow--) {
if (self.events[indexBeforeFlow][0] === 'exit' && self.events[indexBeforeFlow][1].type === "chunkFlow") {
point = self.events[indexBeforeFlow][1].end;
break;
}
}
exitContainers(continued);
// Fix positions.
let index = indexBeforeExits;
while (index < self.events.length) {
self.events[index][1].end = {
...point
};
index++;
}
// Inject the exits earlier (theyre still also at the end).
splice(self.events, indexBeforeFlow + 1, 0, self.events.slice(indexBeforeExits));
// Discard the duplicate exits.
self.events.length = index;
return checkNewContainers(code);
}
return start(code);
}
/** @type {State} */
function checkNewContainers(code) {
// Next, after consuming the continuation markers for existing blocks, we
// look for new block starts (e.g. `>` for a block quote).
// If we encounter a new block start, we close any blocks unmatched in
// step 1 before creating the new block as a child of the last matched
// block.
if (continued === stack.length) {
// No need to `check` whether theres a container, of `exitContainers`
// would be moot.
// We can instead immediately `attempt` to parse one.
if (!childFlow) {
return documentContinued(code);
}
// If we have concrete content, such as block HTML or fenced code,
// we cant have containers “pierce” into them, so we can immediately
// start.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
return flowStart(code);
}
// If we do have flow, it could still be a blank line,
// but wed be interrupting it w/ a new container if theres a current
// construct.
// To do: next major: remove `_gfmTableDynamicInterruptHack` (no longer
// needed in micromark-extension-gfm-table@1.0.6).
self.interrupt = Boolean(childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack);
}
// Check if there is a new container.
self.containerState = {};
return effects.check(containerConstruct, thereIsANewContainer, thereIsNoNewContainer)(code);
}
/** @type {State} */
function thereIsANewContainer(code) {
if (childFlow) closeFlow();
exitContainers(continued);
return documentContinued(code);
}
/** @type {State} */
function thereIsNoNewContainer(code) {
self.parser.lazy[self.now().line] = continued !== stack.length;
lineStartOffset = self.now().offset;
return flowStart(code);
}
/** @type {State} */
function documentContinued(code) {
// Try new containers.
self.containerState = {};
return effects.attempt(containerConstruct, containerContinue, flowStart)(code);
}
/** @type {State} */
function containerContinue(code) {
continued++;
stack.push([self.currentConstruct, self.containerState]);
// Try another.
return documentContinued(code);
}
/** @type {State} */
function flowStart(code) {
if (code === null) {
if (childFlow) closeFlow();
exitContainers(0);
effects.consume(code);
return;
}
childFlow = childFlow || self.parser.flow(self.now());
effects.enter("chunkFlow", {
_tokenizer: childFlow,
contentType: "flow",
previous: childToken
});
return flowContinue(code);
}
/** @type {State} */
function flowContinue(code) {
if (code === null) {
writeToChild(effects.exit("chunkFlow"), true);
exitContainers(0);
effects.consume(code);
return;
}
if (markdownLineEnding(code)) {
effects.consume(code);
writeToChild(effects.exit("chunkFlow"));
// Get ready for the next line.
continued = 0;
self.interrupt = undefined;
return start;
}
effects.consume(code);
return flowContinue;
}
/**
* @param {Token} token
* Token.
* @param {boolean | undefined} [endOfFile]
* Whether the token is at the end of the file (default: `false`).
* @returns {undefined}
* Nothing.
*/
function writeToChild(token, endOfFile) {
const stream = self.sliceStream(token);
if (endOfFile) stream.push(null);
token.previous = childToken;
if (childToken) childToken.next = token;
childToken = token;
childFlow.defineSkip(token.start);
childFlow.write(stream);
// Alright, so we just added a lazy line:
//
// ```markdown
// > a
// b.
//
// Or:
//
// > ~~~c
// d
//
// Or:
//
// > | e |
// f
// ```
//
// The construct in the second example (fenced code) does not accept lazy
// lines, so it marked itself as done at the end of its first line, and
// then the content construct parses `d`.
// Most constructs in markdown match on the first line: if the first line
// forms a construct, a non-lazy line cant “unmake” it.
//
// The construct in the third example is potentially a GFM table, and
// those are *weird*.
// It *could* be a table, from the first line, if the following line
// matches a condition.
// In this case, that second line is lazy, which “unmakes” the first line
// and turns the whole into one content block.
//
// Weve now parsed the non-lazy and the lazy line, and can figure out
// whether the lazy line started a new flow block.
// If it did, we exit the current containers between the two flow blocks.
if (self.parser.lazy[token.start.line]) {
let index = childFlow.events.length;
while (index--) {
if (
// The token starts before the line ending…
childFlow.events[index][1].start.offset < lineStartOffset && (
// …and either is not ended yet…
!childFlow.events[index][1].end ||
// …or ends after it.
childFlow.events[index][1].end.offset > lineStartOffset)) {
// Exit: theres still something open, which means its a lazy line
// part of something.
return;
}
}
// Note: this algorithm for moving events around is similar to the
// algorithm when closing flow in `documentContinue`.
const indexBeforeExits = self.events.length;
let indexBeforeFlow = indexBeforeExits;
/** @type {boolean | undefined} */
let seen;
/** @type {Point | undefined} */
let point;
// Find the previous chunk (the one before the lazy line).
while (indexBeforeFlow--) {
if (self.events[indexBeforeFlow][0] === 'exit' && self.events[indexBeforeFlow][1].type === "chunkFlow") {
if (seen) {
point = self.events[indexBeforeFlow][1].end;
break;
}
seen = true;
}
}
exitContainers(continued);
// Fix positions.
index = indexBeforeExits;
while (index < self.events.length) {
self.events[index][1].end = {
...point
};
index++;
}
// Inject the exits earlier (theyre still also at the end).
splice(self.events, indexBeforeFlow + 1, 0, self.events.slice(indexBeforeExits));
// Discard the duplicate exits.
self.events.length = index;
}
}
/**
* @param {number} size
* Size.
* @returns {undefined}
* Nothing.
*/
function exitContainers(size) {
let index = stack.length;
// Exit open containers.
while (index-- > size) {
const entry = stack[index];
self.containerState = entry[1];
entry[0].exit.call(self, effects);
}
stack.length = size;
}
function closeFlow() {
childFlow.write([null]);
childToken = undefined;
childFlow = undefined;
self.containerState._closeFlow = undefined;
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
* Tokenizer.
*/
function tokenizeContainer(effects, ok, nok) {
// Always populated by defaults.
return factorySpace(effects, effects.attempt(this.parser.constructs.document, ok, nok), "linePrefix", this.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4);
}

4
node_modules/micromark/lib/initialize/flow.d.ts generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/** @type {InitialConstruct} */
export const flow: InitialConstruct;
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=flow.d.ts.map

1
node_modules/micromark/lib/initialize/flow.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"flow.d.ts","sourceRoot":"","sources":["flow.js"],"names":[],"mappings":"AAeA,+BAA+B;AAC/B,mBADW,gBAAgB,CACmB;sCAVpC,sBAAsB"}

58
node_modules/micromark/lib/initialize/flow.js generated vendored Normal file
View File

@@ -0,0 +1,58 @@
/**
* @import {
* InitialConstruct,
* Initializer,
* State,
* TokenizeContext
* } from 'micromark-util-types'
*/
import { blankLine, content } from 'micromark-core-commonmark';
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
/** @type {InitialConstruct} */
export const flow = {
tokenize: initializeFlow
};
/**
* @this {TokenizeContext}
* Self.
* @type {Initializer}
* Initializer.
*/
function initializeFlow(effects) {
const self = this;
const initial = effects.attempt(
// Try to parse a blank line.
blankLine, atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(this.parser.constructs.flowInitial, afterConstruct, factorySpace(effects, effects.attempt(this.parser.constructs.flow, afterConstruct, effects.attempt(content, afterConstruct)), "linePrefix")));
return initial;
/** @type {State} */
function atBlankEnding(code) {
if (code === null) {
effects.consume(code);
return;
}
effects.enter("lineEndingBlank");
effects.consume(code);
effects.exit("lineEndingBlank");
self.currentConstruct = undefined;
return initial;
}
/** @type {State} */
function afterConstruct(code) {
if (code === null) {
effects.consume(code);
return;
}
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
self.currentConstruct = undefined;
return initial;
}
}

8
node_modules/micromark/lib/initialize/text.d.ts generated vendored Normal file
View File

@@ -0,0 +1,8 @@
export namespace resolver {
let resolveAll: Resolver;
}
export const string: InitialConstruct;
export const text: InitialConstruct;
import type { Resolver } from 'micromark-util-types';
import type { InitialConstruct } from 'micromark-util-types';
//# sourceMappingURL=text.d.ts.map

1
node_modules/micromark/lib/initialize/text.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["text.js"],"names":[],"mappings":";;;AAeA,sCAAiD;AACjD,oCAA6C;8BARnC,sBAAsB;sCAAtB,sBAAsB"}

212
node_modules/micromark/lib/initialize/text.js generated vendored Normal file
View File

@@ -0,0 +1,212 @@
/**
* @import {
* Code,
* InitialConstruct,
* Initializer,
* Resolver,
* State,
* TokenizeContext
* } from 'micromark-util-types'
*/
export const resolver = {
resolveAll: createResolver()
};
export const string = initializeFactory('string');
export const text = initializeFactory('text');
/**
* @param {'string' | 'text'} field
* Field.
* @returns {InitialConstruct}
* Construct.
*/
function initializeFactory(field) {
return {
resolveAll: createResolver(field === 'text' ? resolveAllLineSuffixes : undefined),
tokenize: initializeText
};
/**
* @this {TokenizeContext}
* Context.
* @type {Initializer}
*/
function initializeText(effects) {
const self = this;
const constructs = this.parser.constructs[field];
const text = effects.attempt(constructs, start, notText);
return start;
/** @type {State} */
function start(code) {
return atBreak(code) ? text(code) : notText(code);
}
/** @type {State} */
function notText(code) {
if (code === null) {
effects.consume(code);
return;
}
effects.enter("data");
effects.consume(code);
return data;
}
/** @type {State} */
function data(code) {
if (atBreak(code)) {
effects.exit("data");
return text(code);
}
// Data.
effects.consume(code);
return data;
}
/**
* @param {Code} code
* Code.
* @returns {boolean}
* Whether the code is a break.
*/
function atBreak(code) {
if (code === null) {
return true;
}
const list = constructs[code];
let index = -1;
if (list) {
// Always populated by defaults.
while (++index < list.length) {
const item = list[index];
if (!item.previous || item.previous.call(self, self.previous)) {
return true;
}
}
}
return false;
}
}
}
/**
* @param {Resolver | undefined} [extraResolver]
* Resolver.
* @returns {Resolver}
* Resolver.
*/
function createResolver(extraResolver) {
return resolveAllText;
/** @type {Resolver} */
function resolveAllText(events, context) {
let index = -1;
/** @type {number | undefined} */
let enter;
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === "data") {
enter = index;
index++;
}
} else if (!events[index] || events[index][1].type !== "data") {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end;
events.splice(enter + 2, index - enter - 2);
index = enter + 2;
}
enter = undefined;
}
}
return extraResolver ? extraResolver(events, context) : events;
}
}
/**
* A rather ugly set of instructions which again looks at chunks in the input
* stream.
* The reason to do this here is that it is *much* faster to parse in reverse.
* And that we cant hook into `null` to split the line suffix before an EOF.
* To do: figure out if we can make this into a clean utility, or even in core.
* As it will be useful for GFMs literal autolink extension (and maybe even
* tables?)
*
* @type {Resolver}
*/
function resolveAllLineSuffixes(events, context) {
let eventIndex = 0; // Skip first.
while (++eventIndex <= events.length) {
if ((eventIndex === events.length || events[eventIndex][1].type === "lineEnding") && events[eventIndex - 1][1].type === "data") {
const data = events[eventIndex - 1][1];
const chunks = context.sliceStream(data);
let index = chunks.length;
let bufferIndex = -1;
let size = 0;
/** @type {boolean | undefined} */
let tabs;
while (index--) {
const chunk = chunks[index];
if (typeof chunk === 'string') {
bufferIndex = chunk.length;
while (chunk.charCodeAt(bufferIndex - 1) === 32) {
size++;
bufferIndex--;
}
if (bufferIndex) break;
bufferIndex = -1;
}
// Number
else if (chunk === -2) {
tabs = true;
size++;
} else if (chunk === -1) {
// Empty
} else {
// Replacement character, exit.
index++;
break;
}
}
// Allow final trailing whitespace.
if (context._contentTypeTextTrailing && eventIndex === events.length) {
size = 0;
}
if (size) {
const token = {
type: eventIndex === events.length || tabs || size < 2 ? "lineSuffix" : "hardBreakTrailing",
start: {
_bufferIndex: index ? bufferIndex : data.start._bufferIndex + bufferIndex,
_index: data.start._index + index,
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size
},
end: {
...data.end
}
};
data.end = {
...token.start
};
if (data.start.offset === data.end.offset) {
Object.assign(data, token);
} else {
events.splice(eventIndex, 0, ['enter', token, context], ['exit', token, context]);
eventIndex += 2;
}
}
eventIndex++;
}
}
return events;
}

10
node_modules/micromark/lib/parse.d.ts generated vendored Normal file
View File

@@ -0,0 +1,10 @@
/**
* @param {ParseOptions | null | undefined} [options]
* Configuration (optional).
* @returns {ParseContext}
* Parser.
*/
export function parse(options?: ParseOptions | null | undefined): ParseContext;
import type { ParseOptions } from 'micromark-util-types';
import type { ParseContext } from 'micromark-util-types';
//# sourceMappingURL=parse.d.ts.map

1
node_modules/micromark/lib/parse.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"parse.d.ts","sourceRoot":"","sources":["parse.js"],"names":[],"mappings":"AAkBA;;;;;GAKG;AACH,gCALW,YAAY,GAAG,IAAI,GAAG,SAAS,GAE7B,YAAY,CAoCxB;kCAlDS,sBAAsB;kCAAtB,sBAAsB"}

56
node_modules/micromark/lib/parse.js generated vendored Normal file
View File

@@ -0,0 +1,56 @@
/**
* @import {
* Create,
* FullNormalizedExtension,
* InitialConstruct,
* ParseContext,
* ParseOptions
* } from 'micromark-util-types'
*/
import { combineExtensions } from 'micromark-util-combine-extensions';
import { content } from './initialize/content.js';
import { document } from './initialize/document.js';
import { flow } from './initialize/flow.js';
import { string, text } from './initialize/text.js';
import * as defaultConstructs from './constructs.js';
import { createTokenizer } from './create-tokenizer.js';
/**
* @param {ParseOptions | null | undefined} [options]
* Configuration (optional).
* @returns {ParseContext}
* Parser.
*/
export function parse(options) {
const settings = options || {};
const constructs = /** @type {FullNormalizedExtension} */
combineExtensions([defaultConstructs, ...(settings.extensions || [])]);
/** @type {ParseContext} */
const parser = {
constructs,
content: create(content),
defined: [],
document: create(document),
flow: create(flow),
lazy: {},
string: create(string),
text: create(text)
};
return parser;
/**
* @param {InitialConstruct} initial
* Construct to start with.
* @returns {Create}
* Create a tokenizer.
*/
function create(initial) {
return creator;
/** @type {Create} */
function creator(from) {
return createTokenizer(parser, initial, from);
}
}
}

9
node_modules/micromark/lib/postprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,9 @@
/**
* @param {Array<Event>} events
* Events.
* @returns {Array<Event>}
* Events.
*/
export function postprocess(events: Array<Event>): Array<Event>;
import type { Event } from 'micromark-util-types';
//# sourceMappingURL=postprocess.d.ts.map

1
node_modules/micromark/lib/postprocess.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"postprocess.d.ts","sourceRoot":"","sources":["postprocess.js"],"names":[],"mappings":"AAMA;;;;;GAKG;AACH,oCALW,KAAK,CAAC,KAAK,CAAC,GAEV,KAAK,CAAC,KAAK,CAAC,CASxB;2BAjBuB,sBAAsB"}

18
node_modules/micromark/lib/postprocess.js generated vendored Normal file
View File

@@ -0,0 +1,18 @@
/**
* @import {Event} from 'micromark-util-types'
*/
import { subtokenize } from 'micromark-util-subtokenize';
/**
* @param {Array<Event>} events
* Events.
* @returns {Array<Event>}
* Events.
*/
export function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events;
}

13
node_modules/micromark/lib/preprocess.d.ts generated vendored Normal file
View File

@@ -0,0 +1,13 @@
/**
* @returns {Preprocessor}
* Preprocess a value.
*/
export function preprocess(): Preprocessor;
/**
* Preprocess a value.
*/
export type Preprocessor = (value: Value, encoding?: Encoding | null | undefined, end?: boolean | null | undefined) => Array<Chunk>;
import type { Value } from 'micromark-util-types';
import type { Encoding } from 'micromark-util-types';
import type { Chunk } from 'micromark-util-types';
//# sourceMappingURL=preprocess.d.ts.map

1
node_modules/micromark/lib/preprocess.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"preprocess.d.ts","sourceRoot":"","sources":["preprocess.js"],"names":[],"mappings":"AAqBA;;;GAGG;AACH,8BAHa,YAAY,CAsHxB;;;;mCArIU,KAAK,aAEL,QAAQ,GAAG,IAAI,GAAG,SAAS,QAE3B,OAAO,GAAG,IAAI,GAAG,SAAS,KAExB,KAAK,CAAC,KAAK,CAAC;2BAZsB,sBAAsB;8BAAtB,sBAAsB;2BAAtB,sBAAsB"}

115
node_modules/micromark/lib/preprocess.js generated vendored Normal file
View File

@@ -0,0 +1,115 @@
/**
* @import {Chunk, Code, Encoding, Value} from 'micromark-util-types'
*/
/**
* @callback Preprocessor
* Preprocess a value.
* @param {Value} value
* Value.
* @param {Encoding | null | undefined} [encoding]
* Encoding when `value` is a typed array (optional).
* @param {boolean | null | undefined} [end=false]
* Whether this is the last chunk (default: `false`).
* @returns {Array<Chunk>}
* Chunks.
*/
const search = /[\0\t\n\r]/g;
/**
* @returns {Preprocessor}
* Preprocess a value.
*/
export function preprocess() {
let column = 1;
let buffer = '';
/** @type {boolean | undefined} */
let start = true;
/** @type {boolean | undefined} */
let atCarriageReturn;
return preprocessor;
/** @type {Preprocessor} */
// eslint-disable-next-line complexity
function preprocessor(value, encoding, end) {
/** @type {Array<Chunk>} */
const chunks = [];
/** @type {RegExpMatchArray | null} */
let match;
/** @type {number} */
let next;
/** @type {number} */
let startPosition;
/** @type {number} */
let endPosition;
/** @type {Code} */
let code;
value = buffer + (typeof value === 'string' ? value.toString() : new TextDecoder(encoding || undefined).decode(value));
startPosition = 0;
buffer = '';
if (start) {
// To do: `markdown-rs` actually parses BOMs (byte order mark).
if (value.charCodeAt(0) === 65279) {
startPosition++;
}
start = undefined;
}
while (startPosition < value.length) {
search.lastIndex = startPosition;
match = search.exec(value);
endPosition = match && match.index !== undefined ? match.index : value.length;
code = value.charCodeAt(endPosition);
if (!match) {
buffer = value.slice(startPosition);
break;
}
if (code === 10 && startPosition === endPosition && atCarriageReturn) {
chunks.push(-3);
atCarriageReturn = undefined;
} else {
if (atCarriageReturn) {
chunks.push(-5);
atCarriageReturn = undefined;
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition));
column += endPosition - startPosition;
}
switch (code) {
case 0:
{
chunks.push(65533);
column++;
break;
}
case 9:
{
next = Math.ceil(column / 4) * 4;
chunks.push(-2);
while (column++ < next) chunks.push(-1);
break;
}
case 10:
{
chunks.push(-4);
column = 1;
break;
}
default:
{
atCarriageReturn = true;
column = 1;
}
}
}
startPosition = endPosition + 1;
}
if (end) {
if (atCarriageReturn) chunks.push(-5);
if (buffer) chunks.push(buffer);
chunks.push(null);
}
return chunks;
}
}