Refactor routing in App component to enhance navigation and improve error handling by integrating dynamic routes and updating the NotFound route.

This commit is contained in:
becarta
2025-05-23 12:43:00 +02:00
parent f40db0f5c9
commit a544759a3b
11127 changed files with 1647032 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const attention: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=attention.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"attention.d.ts","sourceRoot":"","sources":["attention.js"],"names":[],"mappings":"AAoBA,wBAAwB;AACxB,wBADW,SAAS,CAKnB;+BAdS,sBAAsB"}

241
node_modules/micromark-core-commonmark/lib/attention.js generated vendored Normal file
View File

@@ -0,0 +1,241 @@
/**
* @import {
* Code,
* Construct,
* Event,
* Point,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { push, splice } from 'micromark-util-chunked';
import { classifyCharacter } from 'micromark-util-classify-character';
import { resolveAll } from 'micromark-util-resolve-all';
/** @type {Construct} */
export const attention = {
name: 'attention',
resolveAll: resolveAllAttention,
tokenize: tokenizeAttention
};
/**
* Take all events and resolve attention to emphasis or strong.
*
* @type {Resolver}
*/
// eslint-disable-next-line complexity
function resolveAllAttention(events, context) {
let index = -1;
/** @type {number} */
let open;
/** @type {Token} */
let group;
/** @type {Token} */
let text;
/** @type {Token} */
let openingSequence;
/** @type {Token} */
let closingSequence;
/** @type {number} */
let use;
/** @type {Array<Event>} */
let nextEvents;
/** @type {number} */
let offset;
// Walk through all events.
//
// Note: performance of this is fine on an mb of normal markdown, but its
// a bottleneck for malicious stuff.
while (++index < events.length) {
// Find a token that can close.
if (events[index][0] === 'enter' && events[index][1].type === 'attentionSequence' && events[index][1]._close) {
open = index;
// Now walk back to find an opener.
while (open--) {
// Find a token that can open the closer.
if (events[open][0] === 'exit' && events[open][1].type === 'attentionSequence' && events[open][1]._open &&
// If the markers are the same:
context.sliceSerialize(events[open][1]).charCodeAt(0) === context.sliceSerialize(events[index][1]).charCodeAt(0)) {
// If the opening can close or the closing can open,
// and the close size *is not* a multiple of three,
// but the sum of the opening and closing size *is* multiple of three,
// then dont match.
if ((events[open][1]._close || events[index][1]._open) && (events[index][1].end.offset - events[index][1].start.offset) % 3 && !((events[open][1].end.offset - events[open][1].start.offset + events[index][1].end.offset - events[index][1].start.offset) % 3)) {
continue;
}
// Number of markers to use from the sequence.
use = events[open][1].end.offset - events[open][1].start.offset > 1 && events[index][1].end.offset - events[index][1].start.offset > 1 ? 2 : 1;
const start = {
...events[open][1].end
};
const end = {
...events[index][1].start
};
movePoint(start, -use);
movePoint(end, use);
openingSequence = {
type: use > 1 ? "strongSequence" : "emphasisSequence",
start,
end: {
...events[open][1].end
}
};
closingSequence = {
type: use > 1 ? "strongSequence" : "emphasisSequence",
start: {
...events[index][1].start
},
end
};
text = {
type: use > 1 ? "strongText" : "emphasisText",
start: {
...events[open][1].end
},
end: {
...events[index][1].start
}
};
group = {
type: use > 1 ? "strong" : "emphasis",
start: {
...openingSequence.start
},
end: {
...closingSequence.end
}
};
events[open][1].end = {
...openingSequence.start
};
events[index][1].start = {
...closingSequence.end
};
nextEvents = [];
// If there are more markers in the opening, add them before.
if (events[open][1].end.offset - events[open][1].start.offset) {
nextEvents = push(nextEvents, [['enter', events[open][1], context], ['exit', events[open][1], context]]);
}
// Opening.
nextEvents = push(nextEvents, [['enter', group, context], ['enter', openingSequence, context], ['exit', openingSequence, context], ['enter', text, context]]);
// Always populated by defaults.
// Between.
nextEvents = push(nextEvents, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + 1, index), context));
// Closing.
nextEvents = push(nextEvents, [['exit', text, context], ['enter', closingSequence, context], ['exit', closingSequence, context], ['exit', group, context]]);
// If there are more markers in the closing, add them after.
if (events[index][1].end.offset - events[index][1].start.offset) {
offset = 2;
nextEvents = push(nextEvents, [['enter', events[index][1], context], ['exit', events[index][1], context]]);
} else {
offset = 0;
}
splice(events, open - 1, index - open + 3, nextEvents);
index = open + nextEvents.length - offset - 2;
break;
}
}
}
}
// Remove remaining sequences.
index = -1;
while (++index < events.length) {
if (events[index][1].type === 'attentionSequence') {
events[index][1].type = 'data';
}
}
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeAttention(effects, ok) {
const attentionMarkers = this.parser.constructs.attentionMarkers.null;
const previous = this.previous;
const before = classifyCharacter(previous);
/** @type {NonNullable<Code>} */
let marker;
return start;
/**
* Before a sequence.
*
* ```markdown
* > | **
* ^
* ```
*
* @type {State}
*/
function start(code) {
marker = code;
effects.enter('attentionSequence');
return inside(code);
}
/**
* In a sequence.
*
* ```markdown
* > | **
* ^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code);
return inside;
}
const token = effects.exit('attentionSequence');
// To do: next major: move this to resolver, just like `markdown-rs`.
const after = classifyCharacter(code);
// Always populated by defaults.
const open = !after || after === 2 && before || attentionMarkers.includes(code);
const close = !before || before === 2 && after || attentionMarkers.includes(previous);
token._open = Boolean(marker === 42 ? open : open && (before || !close));
token._close = Boolean(marker === 42 ? close : close && (after || !open));
return ok(code);
}
}
/**
* Move a point a bit.
*
* Note: `move` only works inside lines! Its not possible to move past other
* chunks (replacement characters, tabs, or line endings).
*
* @param {Point} point
* Point.
* @param {number} offset
* Amount to move.
* @returns {undefined}
* Nothing.
*/
function movePoint(point, offset) {
point.column += offset;
point.offset += offset;
point._bufferIndex += offset;
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const autolink: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=autolink.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"autolink.d.ts","sourceRoot":"","sources":["autolink.js"],"names":[],"mappings":"AAkBA,wBAAwB;AACxB,uBADW,SAAS,CACkD;+BAb5D,sBAAsB"}

233
node_modules/micromark-core-commonmark/lib/autolink.js generated vendored Normal file
View File

@@ -0,0 +1,233 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { asciiAlphanumeric, asciiAlpha, asciiAtext, asciiControl } from 'micromark-util-character';
/** @type {Construct} */
export const autolink = {
name: 'autolink',
tokenize: tokenizeAutolink
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeAutolink(effects, ok, nok) {
let size = 0;
return start;
/**
* Start of an autolink.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("autolink");
effects.enter("autolinkMarker");
effects.consume(code);
effects.exit("autolinkMarker");
effects.enter("autolinkProtocol");
return open;
}
/**
* After `<`, at protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (asciiAlpha(code)) {
effects.consume(code);
return schemeOrEmailAtext;
}
if (code === 64) {
return nok(code);
}
return emailAtext(code);
}
/**
* At second byte of protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeOrEmailAtext(code) {
// ASCII alphanumeric and `+`, `-`, and `.`.
if (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) {
// Count the previous alphabetical from `open` too.
size = 1;
return schemeInsideOrEmailAtext(code);
}
return emailAtext(code);
}
/**
* In ambiguous protocol or atext.
*
* ```markdown
* > | a<https://example.com>b
* ^
* > | a<user@example.com>b
* ^
* ```
*
* @type {State}
*/
function schemeInsideOrEmailAtext(code) {
if (code === 58) {
effects.consume(code);
size = 0;
return urlInside;
}
// ASCII alphanumeric and `+`, `-`, and `.`.
if ((code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) && size++ < 32) {
effects.consume(code);
return schemeInsideOrEmailAtext;
}
size = 0;
return emailAtext(code);
}
/**
* After protocol, in URL.
*
* ```markdown
* > | a<https://example.com>b
* ^
* ```
*
* @type {State}
*/
function urlInside(code) {
if (code === 62) {
effects.exit("autolinkProtocol");
effects.enter("autolinkMarker");
effects.consume(code);
effects.exit("autolinkMarker");
effects.exit("autolink");
return ok;
}
// ASCII control, space, or `<`.
if (code === null || code === 32 || code === 60 || asciiControl(code)) {
return nok(code);
}
effects.consume(code);
return urlInside;
}
/**
* In email atext.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailAtext(code) {
if (code === 64) {
effects.consume(code);
return emailAtSignOrDot;
}
if (asciiAtext(code)) {
effects.consume(code);
return emailAtext;
}
return nok(code);
}
/**
* In label, after at-sign or dot.
*
* ```markdown
* > | a<user.name@example.com>b
* ^ ^
* ```
*
* @type {State}
*/
function emailAtSignOrDot(code) {
return asciiAlphanumeric(code) ? emailLabel(code) : nok(code);
}
/**
* In label, where `.` and `>` are allowed.
*
* ```markdown
* > | a<user.name@example.com>b
* ^
* ```
*
* @type {State}
*/
function emailLabel(code) {
if (code === 46) {
effects.consume(code);
size = 0;
return emailAtSignOrDot;
}
if (code === 62) {
// Exit, then change the token type.
effects.exit("autolinkProtocol").type = "autolinkEmail";
effects.enter("autolinkMarker");
effects.consume(code);
effects.exit("autolinkMarker");
effects.exit("autolink");
return ok;
}
return emailValue(code);
}
/**
* In label, where `.` and `>` are *not* allowed.
*
* Though, this is also used in `emailLabel` to parse other values.
*
* ```markdown
* > | a<user.name@ex-ample.com>b
* ^
* ```
*
* @type {State}
*/
function emailValue(code) {
// ASCII alphanumeric or `-`.
if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) {
const next = code === 45 ? emailValue : emailLabel;
effects.consume(code);
return next;
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const blankLine: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=blank-line.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"blank-line.d.ts","sourceRoot":"","sources":["blank-line.js"],"names":[],"mappings":"AAaA,wBAAwB;AACxB,wBADW,SAAS,CACiD;+BAR3D,sBAAsB"}

View File

@@ -0,0 +1,61 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const blankLine = {
partial: true,
tokenize: tokenizeBlankLine
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeBlankLine(effects, ok, nok) {
return start;
/**
* Start of blank line.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function start(code) {
return markdownSpace(code) ? factorySpace(effects, after, "linePrefix")(code) : after(code);
}
/**
* At eof/eol, after optional whitespace.
*
* > 👉 **Note**: `␠` represents a space character.
*
* ```markdown
* > | ␠␠␊
* ^
* > | ␊
* ^
* ```
*
* @type {State}
*/
function after(code) {
return code === null || markdownLineEnding(code) ? ok(code) : nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const blockQuote: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=block-quote.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"block-quote.d.ts","sourceRoot":"","sources":["block-quote.js"],"names":[],"mappings":"AAeA,wBAAwB;AACxB,yBADW,SAAS,CAMnB;+BAdS,sBAAsB"}

View File

@@ -0,0 +1,143 @@
/**
* @import {
* Construct,
* Exiter,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const blockQuote = {
continuation: {
tokenize: tokenizeBlockQuoteContinuation
},
exit,
name: 'blockQuote',
tokenize: tokenizeBlockQuoteStart
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeBlockQuoteStart(effects, ok, nok) {
const self = this;
return start;
/**
* Start of block quote.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function start(code) {
if (code === 62) {
const state = self.containerState;
if (!state.open) {
effects.enter("blockQuote", {
_container: true
});
state.open = true;
}
effects.enter("blockQuotePrefix");
effects.enter("blockQuoteMarker");
effects.consume(code);
effects.exit("blockQuoteMarker");
return after;
}
return nok(code);
}
/**
* After `>`, before optional whitespace.
*
* ```markdown
* > | > a
* ^
* ```
*
* @type {State}
*/
function after(code) {
if (markdownSpace(code)) {
effects.enter("blockQuotePrefixWhitespace");
effects.consume(code);
effects.exit("blockQuotePrefixWhitespace");
effects.exit("blockQuotePrefix");
return ok;
}
effects.exit("blockQuotePrefix");
return ok(code);
}
}
/**
* Start of block quote continuation.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeBlockQuoteContinuation(effects, ok, nok) {
const self = this;
return contStart;
/**
* Start of block quote continuation.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contStart(code) {
if (markdownSpace(code)) {
// Always populated by defaults.
return factorySpace(effects, contBefore, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code);
}
return contBefore(code);
}
/**
* At `>`, after optional whitespace.
*
* Also used to parse the first block quote opening.
*
* ```markdown
* | > a
* > | > b
* ^
* ```
*
* @type {State}
*/
function contBefore(code) {
return effects.attempt(blockQuote, ok, nok)(code);
}
}
/** @type {Exiter} */
function exit(effects) {
effects.exit("blockQuote");
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const characterEscape: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=character-escape.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"character-escape.d.ts","sourceRoot":"","sources":["character-escape.js"],"names":[],"mappings":"AAaA,wBAAwB;AACxB,8BADW,SAAS,CAInB;+BAXS,sBAAsB"}

View File

@@ -0,0 +1,64 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { asciiPunctuation } from 'micromark-util-character';
/** @type {Construct} */
export const characterEscape = {
name: 'characterEscape',
tokenize: tokenizeCharacterEscape
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCharacterEscape(effects, ok, nok) {
return start;
/**
* Start of character escape.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("characterEscape");
effects.enter("escapeMarker");
effects.consume(code);
effects.exit("escapeMarker");
return inside;
}
/**
* After `\`, at punctuation.
*
* ```markdown
* > | a\*b
* ^
* ```
*
* @type {State}
*/
function inside(code) {
// ASCII punctuation.
if (asciiPunctuation(code)) {
effects.enter("characterEscapeValue");
effects.consume(code);
effects.exit("characterEscapeValue");
effects.exit("characterEscape");
return ok;
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const characterReference: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=character-reference.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"character-reference.d.ts","sourceRoot":"","sources":["character-reference.js"],"names":[],"mappings":"AAmBA,wBAAwB;AACxB,iCADW,SAAS,CAInB;+BAhBS,sBAAsB"}

View File

@@ -0,0 +1,149 @@
/**
* @import {
* Code,
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { decodeNamedCharacterReference } from 'decode-named-character-reference';
import { asciiAlphanumeric, asciiDigit, asciiHexDigit } from 'micromark-util-character';
/** @type {Construct} */
export const characterReference = {
name: 'characterReference',
tokenize: tokenizeCharacterReference
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCharacterReference(effects, ok, nok) {
const self = this;
let size = 0;
/** @type {number} */
let max;
/** @type {(code: Code) => boolean} */
let test;
return start;
/**
* Start of character reference.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("characterReference");
effects.enter("characterReferenceMarker");
effects.consume(code);
effects.exit("characterReferenceMarker");
return open;
}
/**
* After `&`, at `#` for numeric references or alphanumeric for named
* references.
*
* ```markdown
* > | a&amp;b
* ^
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === 35) {
effects.enter("characterReferenceMarkerNumeric");
effects.consume(code);
effects.exit("characterReferenceMarkerNumeric");
return numeric;
}
effects.enter("characterReferenceValue");
max = 31;
test = asciiAlphanumeric;
return value(code);
}
/**
* After `#`, at `x` for hexadecimals or digit for decimals.
*
* ```markdown
* > | a&#123;b
* ^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function numeric(code) {
if (code === 88 || code === 120) {
effects.enter("characterReferenceMarkerHexadecimal");
effects.consume(code);
effects.exit("characterReferenceMarkerHexadecimal");
effects.enter("characterReferenceValue");
max = 6;
test = asciiHexDigit;
return value;
}
effects.enter("characterReferenceValue");
max = 7;
test = asciiDigit;
return value(code);
}
/**
* After markers (`&#x`, `&#`, or `&`), in value, before `;`.
*
* The character reference kind defines what and how many characters are
* allowed.
*
* ```markdown
* > | a&amp;b
* ^^^
* > | a&#123;b
* ^^^
* > | a&#x9;b
* ^
* ```
*
* @type {State}
*/
function value(code) {
if (code === 59 && size) {
const token = effects.exit("characterReferenceValue");
if (test === asciiAlphanumeric && !decodeNamedCharacterReference(self.sliceSerialize(token))) {
return nok(code);
}
// To do: `markdown-rs` uses a different name:
// `CharacterReferenceMarkerSemi`.
effects.enter("characterReferenceMarker");
effects.consume(code);
effects.exit("characterReferenceMarker");
effects.exit("characterReference");
return ok;
}
if (test(code) && size++ < max) {
effects.consume(code);
return value;
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const codeFenced: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=code-fenced.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"code-fenced.d.ts","sourceRoot":"","sources":["code-fenced.js"],"names":[],"mappings":"AAqBA,wBAAwB;AACxB,yBADW,SAAS,CAKnB;+BAnBS,sBAAsB"}

View File

@@ -0,0 +1,460 @@
/**
* @import {
* Code,
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
const nonLazyContinuation = {
partial: true,
tokenize: tokenizeNonLazyContinuation
};
/** @type {Construct} */
export const codeFenced = {
concrete: true,
name: 'codeFenced',
tokenize: tokenizeCodeFenced
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCodeFenced(effects, ok, nok) {
const self = this;
/** @type {Construct} */
const closeStart = {
partial: true,
tokenize: tokenizeCloseStart
};
let initialPrefix = 0;
let sizeOpen = 0;
/** @type {NonNullable<Code>} */
let marker;
return start;
/**
* Start of code.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse whitespace like `markdown-rs`.
return beforeSequenceOpen(code);
}
/**
* In opening fence, after prefix, at sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function beforeSequenceOpen(code) {
const tail = self.events[self.events.length - 1];
initialPrefix = tail && tail[1].type === "linePrefix" ? tail[2].sliceSerialize(tail[1], true).length : 0;
marker = code;
effects.enter("codeFenced");
effects.enter("codeFencedFence");
effects.enter("codeFencedFenceSequence");
return sequenceOpen(code);
}
/**
* In opening fence sequence.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === marker) {
sizeOpen++;
effects.consume(code);
return sequenceOpen;
}
if (sizeOpen < 3) {
return nok(code);
}
effects.exit("codeFencedFenceSequence");
return markdownSpace(code) ? factorySpace(effects, infoBefore, "whitespace")(code) : infoBefore(code);
}
/**
* In opening fence, after the sequence (and optional whitespace), before info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function infoBefore(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("codeFencedFence");
return self.interrupt ? ok(code) : effects.check(nonLazyContinuation, atNonLazyBreak, after)(code);
}
effects.enter("codeFencedFenceInfo");
effects.enter("chunkString", {
contentType: "string"
});
return info(code);
}
/**
* In info.
*
* ```markdown
* > | ~~~js
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function info(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("chunkString");
effects.exit("codeFencedFenceInfo");
return infoBefore(code);
}
if (markdownSpace(code)) {
effects.exit("chunkString");
effects.exit("codeFencedFenceInfo");
return factorySpace(effects, metaBefore, "whitespace")(code);
}
if (code === 96 && code === marker) {
return nok(code);
}
effects.consume(code);
return info;
}
/**
* In opening fence, after info and whitespace, before meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function metaBefore(code) {
if (code === null || markdownLineEnding(code)) {
return infoBefore(code);
}
effects.enter("codeFencedFenceMeta");
effects.enter("chunkString", {
contentType: "string"
});
return meta(code);
}
/**
* In meta.
*
* ```markdown
* > | ~~~js eval
* ^
* | alert(1)
* | ~~~
* ```
*
* @type {State}
*/
function meta(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("chunkString");
effects.exit("codeFencedFenceMeta");
return infoBefore(code);
}
if (code === 96 && code === marker) {
return nok(code);
}
effects.consume(code);
return meta;
}
/**
* At eol/eof in code, before a non-lazy closing fence or content.
*
* ```markdown
* > | ~~~js
* ^
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function atNonLazyBreak(code) {
return effects.attempt(closeStart, after, contentBefore)(code);
}
/**
* Before code content, not a closing fence, at eol.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentBefore(code) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return contentStart;
}
/**
* Before code content, not a closing fence.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function contentStart(code) {
return initialPrefix > 0 && markdownSpace(code) ? factorySpace(effects, beforeContentChunk, "linePrefix", initialPrefix + 1)(code) : beforeContentChunk(code);
}
/**
* Before code content, after optional prefix.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^
* | ~~~
* ```
*
* @type {State}
*/
function beforeContentChunk(code) {
if (code === null || markdownLineEnding(code)) {
return effects.check(nonLazyContinuation, atNonLazyBreak, after)(code);
}
effects.enter("codeFlowValue");
return contentChunk(code);
}
/**
* In code content.
*
* ```markdown
* | ~~~js
* > | alert(1)
* ^^^^^^^^
* | ~~~
* ```
*
* @type {State}
*/
function contentChunk(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("codeFlowValue");
return beforeContentChunk(code);
}
effects.consume(code);
return contentChunk;
}
/**
* After code.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function after(code) {
effects.exit("codeFenced");
return ok(code);
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCloseStart(effects, ok, nok) {
let size = 0;
return startBefore;
/**
*
*
* @type {State}
*/
function startBefore(code) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return start;
}
/**
* Before closing fence, at optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Always populated by defaults.
// To do: `enter` here or in next state?
effects.enter("codeFencedFence");
return markdownSpace(code) ? factorySpace(effects, beforeSequenceClose, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : beforeSequenceClose(code);
}
/**
* In closing fence, after optional whitespace, at sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function beforeSequenceClose(code) {
if (code === marker) {
effects.enter("codeFencedFenceSequence");
return sequenceClose(code);
}
return nok(code);
}
/**
* In closing fence sequence.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
if (code === marker) {
size++;
effects.consume(code);
return sequenceClose;
}
if (size >= sizeOpen) {
effects.exit("codeFencedFenceSequence");
return markdownSpace(code) ? factorySpace(effects, sequenceCloseAfter, "whitespace")(code) : sequenceCloseAfter(code);
}
return nok(code);
}
/**
* After closing fence sequence, after optional whitespace.
*
* ```markdown
* | ~~~js
* | alert(1)
* > | ~~~
* ^
* ```
*
* @type {State}
*/
function sequenceCloseAfter(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("codeFencedFence");
return ok(code);
}
return nok(code);
}
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuation(effects, ok, nok) {
const self = this;
return start;
/**
*
*
* @type {State}
*/
function start(code) {
if (code === null) {
return nok(code);
}
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return lineStart;
}
/**
*
*
* @type {State}
*/
function lineStart(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const codeIndented: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=code-indented.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"code-indented.d.ts","sourceRoot":"","sources":["code-indented.js"],"names":[],"mappings":"AAcA,wBAAwB;AACxB,2BADW,SAAS,CAInB;+BAZS,sBAAsB"}

View File

@@ -0,0 +1,177 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const codeIndented = {
name: 'codeIndented',
tokenize: tokenizeCodeIndented
};
/** @type {Construct} */
const furtherStart = {
partial: true,
tokenize: tokenizeFurtherStart
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCodeIndented(effects, ok, nok) {
const self = this;
return start;
/**
* Start of code (indented).
*
* > **Parsing note**: it is not needed to check if this first line is a
* > filled line (that it has a non-whitespace character), because blank lines
* > are parsed already, so we never run into that.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: manually check if interrupting like `markdown-rs`.
effects.enter("codeIndented");
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code);
}
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {
const tail = self.events[self.events.length - 1];
return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? atBreak(code) : nok(code);
}
/**
* At a break.
*
* ```markdown
* > | aaa
* ^ ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === null) {
return after(code);
}
if (markdownLineEnding(code)) {
return effects.attempt(furtherStart, atBreak, after)(code);
}
effects.enter("codeFlowValue");
return inside(code);
}
/**
* In code content.
*
* ```markdown
* > | aaa
* ^^^^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("codeFlowValue");
return atBreak(code);
}
effects.consume(code);
return inside;
}
/** @type {State} */
function after(code) {
effects.exit("codeIndented");
// To do: allow interrupting like `markdown-rs`.
// Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeFurtherStart(effects, ok, nok) {
const self = this;
return furtherStart;
/**
* At eol, trying to parse another indent.
*
* ```markdown
* > | aaa
* ^
* | bbb
* ```
*
* @type {State}
*/
function furtherStart(code) {
// To do: improve `lazy` / `pierce` handling.
// If this is a lazy line, it cant be code.
if (self.parser.lazy[self.now().line]) {
return nok(code);
}
if (markdownLineEnding(code)) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return furtherStart;
}
// To do: the code here in `micromark-js` is a bit different from
// `markdown-rs` because there it can attempt spaces.
// We cant yet.
//
// To do: use an improved `space_or_tab` function like `markdown-rs`,
// so that we can drop the next state.
return factorySpace(effects, afterPrefix, "linePrefix", 4 + 1)(code);
}
/**
* At start, after 1 or 4 spaces.
*
* ```markdown
* > | aaa
* ^
* ```
*
* @type {State}
*/
function afterPrefix(code) {
const tail = self.events[self.events.length - 1];
return tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4 ? ok(code) : markdownLineEnding(code) ? furtherStart(code) : nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const codeText: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=code-text.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"code-text.d.ts","sourceRoot":"","sources":["code-text.js"],"names":[],"mappings":"AAgBA,wBAAwB;AACxB,uBADW,SAAS,CAMnB;+BAbS,sBAAsB"}

225
node_modules/micromark-core-commonmark/lib/code-text.js generated vendored Normal file
View File

@@ -0,0 +1,225 @@
/**
* @import {
* Construct,
* Previous,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { markdownLineEnding } from 'micromark-util-character';
/** @type {Construct} */
export const codeText = {
name: 'codeText',
previous,
resolve: resolveCodeText,
tokenize: tokenizeCodeText
};
// To do: next major: dont resolve, like `markdown-rs`.
/** @type {Resolver} */
function resolveCodeText(events) {
let tailExitIndex = events.length - 4;
let headEnterIndex = 3;
/** @type {number} */
let index;
/** @type {number | undefined} */
let enter;
// If we start and end with an EOL or a space.
if ((events[headEnterIndex][1].type === "lineEnding" || events[headEnterIndex][1].type === 'space') && (events[tailExitIndex][1].type === "lineEnding" || events[tailExitIndex][1].type === 'space')) {
index = headEnterIndex;
// And we have data.
while (++index < tailExitIndex) {
if (events[index][1].type === "codeTextData") {
// Then we have padding.
events[headEnterIndex][1].type = "codeTextPadding";
events[tailExitIndex][1].type = "codeTextPadding";
headEnterIndex += 2;
tailExitIndex -= 2;
break;
}
}
}
// Merge adjacent spaces and data.
index = headEnterIndex - 1;
tailExitIndex++;
while (++index <= tailExitIndex) {
if (enter === undefined) {
if (index !== tailExitIndex && events[index][1].type !== "lineEnding") {
enter = index;
}
} else if (index === tailExitIndex || events[index][1].type === "lineEnding") {
events[enter][1].type = "codeTextData";
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end;
events.splice(enter + 2, index - enter - 2);
tailExitIndex -= index - enter - 2;
index = enter + 2;
}
enter = undefined;
}
}
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Previous}
*/
function previous(code) {
// If there is a previous code, there will always be a tail.
return code !== 96 || this.events[this.events.length - 1][1].type === "characterEscape";
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeCodeText(effects, ok, nok) {
const self = this;
let sizeOpen = 0;
/** @type {number} */
let size;
/** @type {Token} */
let token;
return start;
/**
* Start of code (text).
*
* ```markdown
* > | `a`
* ^
* > | \`a`
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("codeText");
effects.enter("codeTextSequence");
return sequenceOpen(code);
}
/**
* In opening sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === 96) {
effects.consume(code);
sizeOpen++;
return sequenceOpen;
}
effects.exit("codeTextSequence");
return between(code);
}
/**
* Between something and something else.
*
* ```markdown
* > | `a`
* ^^
* ```
*
* @type {State}
*/
function between(code) {
// EOF.
if (code === null) {
return nok(code);
}
// To do: next major: dont do spaces in resolve, but when compiling,
// like `markdown-rs`.
// Tabs dont work, and virtual spaces dont make sense.
if (code === 32) {
effects.enter('space');
effects.consume(code);
effects.exit('space');
return between;
}
// Closing fence? Could also be data.
if (code === 96) {
token = effects.enter("codeTextSequence");
size = 0;
return sequenceClose(code);
}
if (markdownLineEnding(code)) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return between;
}
// Data.
effects.enter("codeTextData");
return data(code);
}
/**
* In data.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function data(code) {
if (code === null || code === 32 || code === 96 || markdownLineEnding(code)) {
effects.exit("codeTextData");
return between(code);
}
effects.consume(code);
return data;
}
/**
* In closing sequence.
*
* ```markdown
* > | `a`
* ^
* ```
*
* @type {State}
*/
function sequenceClose(code) {
// More.
if (code === 96) {
effects.consume(code);
size++;
return sequenceClose;
}
// Done!
if (size === sizeOpen) {
effects.exit("codeTextSequence");
effects.exit("codeText");
return ok(code);
}
// More or less accents: mark as data.
token.type = "codeTextData";
return data(code);
}
}

View File

@@ -0,0 +1,7 @@
/**
* No name because it must not be turned off.
* @type {Construct}
*/
export const content: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=content.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"content.d.ts","sourceRoot":"","sources":["content.js"],"names":[],"mappings":"AAiBA;;;GAGG;AACH,sBAFU,SAAS,CAEwD;+BAbjE,sBAAsB"}

163
node_modules/micromark-core-commonmark/lib/content.js generated vendored Normal file
View File

@@ -0,0 +1,163 @@
/**
* @import {
* Construct,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
import { subtokenize } from 'micromark-util-subtokenize';
/**
* No name because it must not be turned off.
* @type {Construct}
*/
export const content = {
resolve: resolveContent,
tokenize: tokenizeContent
};
/** @type {Construct} */
const continuationConstruct = {
partial: true,
tokenize: tokenizeContinuation
};
/**
* Content is transparent: its parsed right now. That way, definitions are also
* parsed right now: before text in paragraphs (specifically, media) are parsed.
*
* @type {Resolver}
*/
function resolveContent(events) {
subtokenize(events);
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeContent(effects, ok) {
/** @type {Token | undefined} */
let previous;
return chunkStart;
/**
* Before a content chunk.
*
* ```markdown
* > | abc
* ^
* ```
*
* @type {State}
*/
function chunkStart(code) {
effects.enter("content");
previous = effects.enter("chunkContent", {
contentType: "content"
});
return chunkInside(code);
}
/**
* In a content chunk.
*
* ```markdown
* > | abc
* ^^^
* ```
*
* @type {State}
*/
function chunkInside(code) {
if (code === null) {
return contentEnd(code);
}
// To do: in `markdown-rs`, each line is parsed on its own, and everything
// is stitched together resolving.
if (markdownLineEnding(code)) {
return effects.check(continuationConstruct, contentContinue, contentEnd)(code);
}
// Data.
effects.consume(code);
return chunkInside;
}
/**
*
*
* @type {State}
*/
function contentEnd(code) {
effects.exit("chunkContent");
effects.exit("content");
return ok(code);
}
/**
*
*
* @type {State}
*/
function contentContinue(code) {
effects.consume(code);
effects.exit("chunkContent");
previous.next = effects.enter("chunkContent", {
contentType: "content",
previous
});
previous = previous.next;
return chunkInside;
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeContinuation(effects, ok, nok) {
const self = this;
return startLookahead;
/**
*
*
* @type {State}
*/
function startLookahead(code) {
effects.exit("chunkContent");
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return factorySpace(effects, prefixed, "linePrefix");
}
/**
*
*
* @type {State}
*/
function prefixed(code) {
if (code === null || markdownLineEnding(code)) {
return nok(code);
}
// Always populated by defaults.
const tail = self.events[self.events.length - 1];
if (!self.parser.constructs.disable.null.includes('codeIndented') && tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4) {
return ok(code);
}
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const definition: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=definition.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"definition.d.ts","sourceRoot":"","sources":["definition.js"],"names":[],"mappings":"AAuBA,wBAAwB;AACxB,yBADW,SAAS,CACwD;+BAlBlE,sBAAsB"}

View File

@@ -0,0 +1,254 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factoryDestination } from 'micromark-factory-destination';
import { factoryLabel } from 'micromark-factory-label';
import { factorySpace } from 'micromark-factory-space';
import { factoryTitle } from 'micromark-factory-title';
import { factoryWhitespace } from 'micromark-factory-whitespace';
import { markdownLineEndingOrSpace, markdownLineEnding, markdownSpace } from 'micromark-util-character';
import { normalizeIdentifier } from 'micromark-util-normalize-identifier';
/** @type {Construct} */
export const definition = {
name: 'definition',
tokenize: tokenizeDefinition
};
/** @type {Construct} */
const titleBefore = {
partial: true,
tokenize: tokenizeTitleBefore
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeDefinition(effects, ok, nok) {
const self = this;
/** @type {string} */
let identifier;
return start;
/**
* At start of a definition.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function start(code) {
// Do not interrupt paragraphs (but do follow definitions).
// To do: do `interrupt` the way `markdown-rs` does.
// To do: parse whitespace the way `markdown-rs` does.
effects.enter("definition");
return before(code);
}
/**
* After optional whitespace, at `[`.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function before(code) {
// To do: parse whitespace the way `markdown-rs` does.
return factoryLabel.call(self, effects, labelAfter,
// Note: we dont need to reset the way `markdown-rs` does.
nok, "definitionLabel", "definitionLabelMarker", "definitionLabelString")(code);
}
/**
* After label.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function labelAfter(code) {
identifier = normalizeIdentifier(self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1));
if (code === 58) {
effects.enter("definitionMarker");
effects.consume(code);
effects.exit("definitionMarker");
return markerAfter;
}
return nok(code);
}
/**
* After marker.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function markerAfter(code) {
// Note: whitespace is optional.
return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, destinationBefore)(code) : destinationBefore(code);
}
/**
* Before destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationBefore(code) {
return factoryDestination(effects, destinationAfter,
// Note: we dont need to reset the way `markdown-rs` does.
nok, "definitionDestination", "definitionDestinationLiteral", "definitionDestinationLiteralMarker", "definitionDestinationRaw", "definitionDestinationString")(code);
}
/**
* After destination.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function destinationAfter(code) {
return effects.attempt(titleBefore, after, after)(code);
}
/**
* After definition.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function after(code) {
return markdownSpace(code) ? factorySpace(effects, afterWhitespace, "whitespace")(code) : afterWhitespace(code);
}
/**
* After definition, after optional whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function afterWhitespace(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("definition");
// Note: we dont care about uniqueness.
// Its likely that that doesnt happen very frequently.
// It is more likely that it wastes precious time.
self.parser.defined.push(identifier);
// To do: `markdown-rs` interrupt.
// // Youd be interrupting.
// tokenizer.interrupt = true
return ok(code);
}
return nok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeTitleBefore(effects, ok, nok) {
return titleBefore;
/**
* After destination, at whitespace.
*
* ```markdown
* > | [a]: b
* ^
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleBefore(code) {
return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, beforeMarker)(code) : nok(code);
}
/**
* At title.
*
* ```markdown
* | [a]: b
* > | "c"
* ^
* ```
*
* @type {State}
*/
function beforeMarker(code) {
return factoryTitle(effects, titleAfter, nok, "definitionTitle", "definitionTitleMarker", "definitionTitleString")(code);
}
/**
* After title.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfter(code) {
return markdownSpace(code) ? factorySpace(effects, titleAfterOptionalWhitespace, "whitespace")(code) : titleAfterOptionalWhitespace(code);
}
/**
* After title, after optional whitespace.
*
* ```markdown
* > | [a]: b "c"
* ^
* ```
*
* @type {State}
*/
function titleAfterOptionalWhitespace(code) {
return code === null || markdownLineEnding(code) ? ok(code) : nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const hardBreakEscape: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=hard-break-escape.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"hard-break-escape.d.ts","sourceRoot":"","sources":["hard-break-escape.js"],"names":[],"mappings":"AAaA,wBAAwB;AACxB,8BADW,SAAS,CAInB;+BAXS,sBAAsB"}

View File

@@ -0,0 +1,60 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { markdownLineEnding } from 'micromark-util-character';
/** @type {Construct} */
export const hardBreakEscape = {
name: 'hardBreakEscape',
tokenize: tokenizeHardBreakEscape
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeHardBreakEscape(effects, ok, nok) {
return start;
/**
* Start of a hard break (escape).
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("hardBreakEscape");
effects.consume(code);
return after;
}
/**
* After `\`, at eol.
*
* ```markdown
* > | a\
* ^
* | b
* ```
*
* @type {State}
*/
function after(code) {
if (markdownLineEnding(code)) {
effects.exit("hardBreakEscape");
return ok(code);
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const headingAtx: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=heading-atx.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"heading-atx.d.ts","sourceRoot":"","sources":["heading-atx.js"],"names":[],"mappings":"AAqBA,wBAAwB;AACxB,yBADW,SAAS,CAKnB;+BAlBS,sBAAsB"}

View File

@@ -0,0 +1,195 @@
/**
* @import {
* Construct,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEndingOrSpace, markdownLineEnding, markdownSpace } from 'micromark-util-character';
import { splice } from 'micromark-util-chunked';
/** @type {Construct} */
export const headingAtx = {
name: 'headingAtx',
resolve: resolveHeadingAtx,
tokenize: tokenizeHeadingAtx
};
/** @type {Resolver} */
function resolveHeadingAtx(events, context) {
let contentEnd = events.length - 2;
let contentStart = 3;
/** @type {Token} */
let content;
/** @type {Token} */
let text;
// Prefix whitespace, part of the opening.
if (events[contentStart][1].type === "whitespace") {
contentStart += 2;
}
// Suffix whitespace, part of the closing.
if (contentEnd - 2 > contentStart && events[contentEnd][1].type === "whitespace") {
contentEnd -= 2;
}
if (events[contentEnd][1].type === "atxHeadingSequence" && (contentStart === contentEnd - 1 || contentEnd - 4 > contentStart && events[contentEnd - 2][1].type === "whitespace")) {
contentEnd -= contentStart + 1 === contentEnd ? 2 : 4;
}
if (contentEnd > contentStart) {
content = {
type: "atxHeadingText",
start: events[contentStart][1].start,
end: events[contentEnd][1].end
};
text = {
type: "chunkText",
start: events[contentStart][1].start,
end: events[contentEnd][1].end,
contentType: "text"
};
splice(events, contentStart, contentEnd - contentStart + 1, [['enter', content, context], ['enter', text, context], ['exit', text, context], ['exit', content, context]]);
}
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeHeadingAtx(effects, ok, nok) {
let size = 0;
return start;
/**
* Start of a heading (atx).
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
effects.enter("atxHeading");
return before(code);
}
/**
* After optional whitespace, at `#`.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter("atxHeadingSequence");
return sequenceOpen(code);
}
/**
* In opening sequence.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function sequenceOpen(code) {
if (code === 35 && size++ < 6) {
effects.consume(code);
return sequenceOpen;
}
// Always at least one `#`.
if (code === null || markdownLineEndingOrSpace(code)) {
effects.exit("atxHeadingSequence");
return atBreak(code);
}
return nok(code);
}
/**
* After something, before something else.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === 35) {
effects.enter("atxHeadingSequence");
return sequenceFurther(code);
}
if (code === null || markdownLineEnding(code)) {
effects.exit("atxHeading");
// To do: interrupt like `markdown-rs`.
// // Feel free to interrupt.
// tokenizer.interrupt = false
return ok(code);
}
if (markdownSpace(code)) {
return factorySpace(effects, atBreak, "whitespace")(code);
}
// To do: generate `data` tokens, add the `text` token later.
// Needs edit map, see: `markdown.rs`.
effects.enter("atxHeadingText");
return data(code);
}
/**
* In further sequence (after whitespace).
*
* Could be normal “visible” hashes in the heading or a final sequence.
*
* ```markdown
* > | ## aa ##
* ^
* ```
*
* @type {State}
*/
function sequenceFurther(code) {
if (code === 35) {
effects.consume(code);
return sequenceFurther;
}
effects.exit("atxHeadingSequence");
return atBreak(code);
}
/**
* In text.
*
* ```markdown
* > | ## aa
* ^
* ```
*
* @type {State}
*/
function data(code) {
if (code === null || code === 35 || markdownLineEndingOrSpace(code)) {
effects.exit("atxHeadingText");
return atBreak(code);
}
effects.consume(code);
return data;
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const htmlFlow: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=html-flow.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"html-flow.d.ts","sourceRoot":"","sources":["html-flow.js"],"names":[],"mappings":"AAuBA,wBAAwB;AACxB,uBADW,SAAS,CAMnB;+BArBS,sBAAsB"}

876
node_modules/micromark-core-commonmark/lib/html-flow.js generated vendored Normal file
View File

@@ -0,0 +1,876 @@
/**
* @import {
* Code,
* Construct,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { asciiAlphanumeric, asciiAlpha, markdownLineEndingOrSpace, markdownLineEnding, markdownSpace } from 'micromark-util-character';
import { htmlBlockNames, htmlRawNames } from 'micromark-util-html-tag-name';
import { blankLine } from './blank-line.js';
/** @type {Construct} */
export const htmlFlow = {
concrete: true,
name: 'htmlFlow',
resolveTo: resolveToHtmlFlow,
tokenize: tokenizeHtmlFlow
};
/** @type {Construct} */
const blankLineBefore = {
partial: true,
tokenize: tokenizeBlankLineBefore
};
const nonLazyContinuationStart = {
partial: true,
tokenize: tokenizeNonLazyContinuationStart
};
/** @type {Resolver} */
function resolveToHtmlFlow(events) {
let index = events.length;
while (index--) {
if (events[index][0] === 'enter' && events[index][1].type === "htmlFlow") {
break;
}
}
if (index > 1 && events[index - 2][1].type === "linePrefix") {
// Add the prefix start to the HTML token.
events[index][1].start = events[index - 2][1].start;
// Add the prefix start to the HTML line token.
events[index + 1][1].start = events[index - 2][1].start;
// Remove the line prefix.
events.splice(index - 2, 2);
}
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeHtmlFlow(effects, ok, nok) {
const self = this;
/** @type {number} */
let marker;
/** @type {boolean} */
let closingTag;
/** @type {string} */
let buffer;
/** @type {number} */
let index;
/** @type {Code} */
let markerB;
return start;
/**
* Start of HTML (flow).
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function start(code) {
// To do: parse indent like `markdown-rs`.
return before(code);
}
/**
* At `<`, after optional whitespace.
*
* ```markdown
* > | <x />
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter("htmlFlow");
effects.enter("htmlFlowData");
effects.consume(code);
return open;
}
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | <x />
* ^
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === 33) {
effects.consume(code);
return declarationOpen;
}
if (code === 47) {
effects.consume(code);
closingTag = true;
return tagCloseStart;
}
if (code === 63) {
effects.consume(code);
marker = 3;
// To do:
// tokenizer.concrete = true
// To do: use `markdown-rs` style interrupt.
// While were in an instruction instead of a declaration, were on a `?`
// right now, so we do need to search for `>`, similar to declarations.
return self.interrupt ? ok : continuationDeclarationInside;
}
// ASCII alphabetical.
if (asciiAlpha(code)) {
// Always the case.
effects.consume(code);
buffer = String.fromCharCode(code);
return tagName;
}
return nok(code);
}
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | <!doctype>
* ^
* > | <!--xxx-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === 45) {
effects.consume(code);
marker = 2;
return commentOpenInside;
}
if (code === 91) {
effects.consume(code);
marker = 5;
index = 0;
return cdataOpenInside;
}
// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code);
marker = 4;
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside;
}
return nok(code);
}
/**
* After `<!-`, inside a comment, at another `-`.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === 45) {
effects.consume(code);
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuationDeclarationInside;
}
return nok(code);
}
/**
* After `<![`, inside CDATA, expecting `CDATA[`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
const value = "CDATA[";
if (code === value.charCodeAt(index++)) {
effects.consume(code);
if (index === value.length) {
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation;
}
return cdataOpenInside;
}
return nok(code);
}
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | </x>
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
if (asciiAlpha(code)) {
// Always the case.
effects.consume(code);
buffer = String.fromCharCode(code);
return tagName;
}
return nok(code);
}
/**
* In tag name.
*
* ```markdown
* > | <ab>
* ^^
* > | </ab>
* ^^
* ```
*
* @type {State}
*/
function tagName(code) {
if (code === null || code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
const slash = code === 47;
const name = buffer.toLowerCase();
if (!slash && !closingTag && htmlRawNames.includes(name)) {
marker = 1;
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code);
}
if (htmlBlockNames.includes(buffer.toLowerCase())) {
marker = 6;
if (slash) {
effects.consume(code);
return basicSelfClosing;
}
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok(code) : continuation(code);
}
marker = 7;
// Do not support complete HTML when interrupting.
return self.interrupt && !self.parser.lazy[self.now().line] ? nok(code) : closingTag ? completeClosingTagAfter(code) : completeAttributeNameBefore(code);
}
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {
effects.consume(code);
buffer += String.fromCharCode(code);
return tagName;
}
return nok(code);
}
/**
* After closing slash of a basic tag name.
*
* ```markdown
* > | <div/>
* ^
* ```
*
* @type {State}
*/
function basicSelfClosing(code) {
if (code === 62) {
effects.consume(code);
// // Do not form containers.
// tokenizer.concrete = true
return self.interrupt ? ok : continuation;
}
return nok(code);
}
/**
* After closing slash of a complete tag name.
*
* ```markdown
* > | <x/>
* ^
* ```
*
* @type {State}
*/
function completeClosingTagAfter(code) {
if (markdownSpace(code)) {
effects.consume(code);
return completeClosingTagAfter;
}
return completeEnd(code);
}
/**
* At an attribute name.
*
* At first, this state is used after a complete tag name, after whitespace,
* where it expects optional attributes or the end of the tag.
* It is also reused after attributes, when expecting more optional
* attributes.
*
* ```markdown
* > | <a />
* ^
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* > | <a >
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameBefore(code) {
if (code === 47) {
effects.consume(code);
return completeEnd;
}
// ASCII alphanumerical and `:` and `_`.
if (code === 58 || code === 95 || asciiAlpha(code)) {
effects.consume(code);
return completeAttributeName;
}
if (markdownSpace(code)) {
effects.consume(code);
return completeAttributeNameBefore;
}
return completeEnd(code);
}
/**
* In attribute name.
*
* ```markdown
* > | <a :b>
* ^
* > | <a _b>
* ^
* > | <a b>
* ^
* ```
*
* @type {State}
*/
function completeAttributeName(code) {
// ASCII alphanumerical and `-`, `.`, `:`, and `_`.
if (code === 45 || code === 46 || code === 58 || code === 95 || asciiAlphanumeric(code)) {
effects.consume(code);
return completeAttributeName;
}
return completeAttributeNameAfter(code);
}
/**
* After attribute name, at an optional initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | <a b>
* ^
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeNameAfter(code) {
if (code === 61) {
effects.consume(code);
return completeAttributeValueBefore;
}
if (markdownSpace(code)) {
effects.consume(code);
return completeAttributeNameAfter;
}
return completeAttributeNameBefore(code);
}
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | <a b=c>
* ^
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueBefore(code) {
if (code === null || code === 60 || code === 61 || code === 62 || code === 96) {
return nok(code);
}
if (code === 34 || code === 39) {
effects.consume(code);
markerB = code;
return completeAttributeValueQuoted;
}
if (markdownSpace(code)) {
effects.consume(code);
return completeAttributeValueBefore;
}
return completeAttributeValueUnquoted(code);
}
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | <a b="c">
* ^
* > | <a b='c'>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuoted(code) {
if (code === markerB) {
effects.consume(code);
markerB = null;
return completeAttributeValueQuotedAfter;
}
if (code === null || markdownLineEnding(code)) {
return nok(code);
}
effects.consume(code);
return completeAttributeValueQuoted;
}
/**
* In unquoted attribute value.
*
* ```markdown
* > | <a b=c>
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueUnquoted(code) {
if (code === null || code === 34 || code === 39 || code === 47 || code === 60 || code === 61 || code === 62 || code === 96 || markdownLineEndingOrSpace(code)) {
return completeAttributeNameAfter(code);
}
effects.consume(code);
return completeAttributeValueUnquoted;
}
/**
* After double or single quoted attribute value, before whitespace or the
* end of the tag.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeAttributeValueQuotedAfter(code) {
if (code === 47 || code === 62 || markdownSpace(code)) {
return completeAttributeNameBefore(code);
}
return nok(code);
}
/**
* In certain circumstances of a complete tag where only an `>` is allowed.
*
* ```markdown
* > | <a b="c">
* ^
* ```
*
* @type {State}
*/
function completeEnd(code) {
if (code === 62) {
effects.consume(code);
return completeAfter;
}
return nok(code);
}
/**
* After `>` in a complete tag.
*
* ```markdown
* > | <x>
* ^
* ```
*
* @type {State}
*/
function completeAfter(code) {
if (code === null || markdownLineEnding(code)) {
// // Do not form containers.
// tokenizer.concrete = true
return continuation(code);
}
if (markdownSpace(code)) {
effects.consume(code);
return completeAfter;
}
return nok(code);
}
/**
* In continuation of any HTML kind.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuation(code) {
if (code === 45 && marker === 2) {
effects.consume(code);
return continuationCommentInside;
}
if (code === 60 && marker === 1) {
effects.consume(code);
return continuationRawTagOpen;
}
if (code === 62 && marker === 4) {
effects.consume(code);
return continuationClose;
}
if (code === 63 && marker === 3) {
effects.consume(code);
return continuationDeclarationInside;
}
if (code === 93 && marker === 5) {
effects.consume(code);
return continuationCdataInside;
}
if (markdownLineEnding(code) && (marker === 6 || marker === 7)) {
effects.exit("htmlFlowData");
return effects.check(blankLineBefore, continuationAfter, continuationStart)(code);
}
if (code === null || markdownLineEnding(code)) {
effects.exit("htmlFlowData");
return continuationStart(code);
}
effects.consume(code);
return continuation;
}
/**
* In continuation, at eol.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStart(code) {
return effects.check(nonLazyContinuationStart, continuationStartNonLazy, continuationAfter)(code);
}
/**
* In continuation, at eol, before non-lazy content.
*
* ```markdown
* > | <x>
* ^
* | asd
* ```
*
* @type {State}
*/
function continuationStartNonLazy(code) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return continuationBefore;
}
/**
* In continuation, before non-lazy content.
*
* ```markdown
* | <x>
* > | asd
* ^
* ```
*
* @type {State}
*/
function continuationBefore(code) {
if (code === null || markdownLineEnding(code)) {
return continuationStart(code);
}
effects.enter("htmlFlowData");
return continuation(code);
}
/**
* In comment continuation, after one `-`, expecting another.
*
* ```markdown
* > | <!--xxx-->
* ^
* ```
*
* @type {State}
*/
function continuationCommentInside(code) {
if (code === 45) {
effects.consume(code);
return continuationDeclarationInside;
}
return continuation(code);
}
/**
* In raw continuation, after `<`, at `/`.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^
* ```
*
* @type {State}
*/
function continuationRawTagOpen(code) {
if (code === 47) {
effects.consume(code);
buffer = '';
return continuationRawEndTag;
}
return continuation(code);
}
/**
* In raw continuation, after `</`, in a raw tag name.
*
* ```markdown
* > | <script>console.log(1)</script>
* ^^^^^^
* ```
*
* @type {State}
*/
function continuationRawEndTag(code) {
if (code === 62) {
const name = buffer.toLowerCase();
if (htmlRawNames.includes(name)) {
effects.consume(code);
return continuationClose;
}
return continuation(code);
}
if (asciiAlpha(code) && buffer.length < 8) {
// Always the case.
effects.consume(code);
buffer += String.fromCharCode(code);
return continuationRawEndTag;
}
return continuation(code);
}
/**
* In cdata continuation, after `]`, expecting `]>`.
*
* ```markdown
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationCdataInside(code) {
if (code === 93) {
effects.consume(code);
return continuationDeclarationInside;
}
return continuation(code);
}
/**
* In declaration or instruction continuation, at `>`.
*
* ```markdown
* > | <!-->
* ^
* > | <?>
* ^
* > | <!q>
* ^
* > | <!--ab-->
* ^
* > | <![CDATA[>&<]]>
* ^
* ```
*
* @type {State}
*/
function continuationDeclarationInside(code) {
if (code === 62) {
effects.consume(code);
return continuationClose;
}
// More dashes.
if (code === 45 && marker === 2) {
effects.consume(code);
return continuationDeclarationInside;
}
return continuation(code);
}
/**
* In closed continuation: everything we get until the eol/eof is part of it.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationClose(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("htmlFlowData");
return continuationAfter(code);
}
effects.consume(code);
return continuationClose;
}
/**
* Done.
*
* ```markdown
* > | <!doctype>
* ^
* ```
*
* @type {State}
*/
function continuationAfter(code) {
effects.exit("htmlFlow");
// // Feel free to interrupt.
// tokenizer.interrupt = false
// // No longer concrete.
// tokenizer.concrete = false
return ok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeNonLazyContinuationStart(effects, ok, nok) {
const self = this;
return start;
/**
* At eol, before continuation.
*
* ```markdown
* > | * ```js
* ^
* | b
* ```
*
* @type {State}
*/
function start(code) {
if (markdownLineEnding(code)) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return after;
}
return nok(code);
}
/**
* A continuation.
*
* ```markdown
* | * ```js
* > | b
* ^
* ```
*
* @type {State}
*/
function after(code) {
return self.parser.lazy[self.now().line] ? nok(code) : ok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeBlankLineBefore(effects, ok, nok) {
return start;
/**
* Before eol, expecting blank line.
*
* ```markdown
* > | <div>
* ^
* |
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return effects.attempt(blankLine, ok, nok);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const htmlText: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=html-text.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"html-text.d.ts","sourceRoot":"","sources":["html-text.js"],"names":[],"mappings":"AAqBA,wBAAwB;AACxB,uBADW,SAAS,CACkD;+BAf5D,sBAAsB"}

678
node_modules/micromark-core-commonmark/lib/html-text.js generated vendored Normal file
View File

@@ -0,0 +1,678 @@
/**
* @import {
* Code,
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { asciiAlphanumeric, asciiAlpha, markdownLineEndingOrSpace, markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const htmlText = {
name: 'htmlText',
tokenize: tokenizeHtmlText
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeHtmlText(effects, ok, nok) {
const self = this;
/** @type {NonNullable<Code> | undefined} */
let marker;
/** @type {number} */
let index;
/** @type {State} */
let returnState;
return start;
/**
* Start of HTML (text).
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("htmlText");
effects.enter("htmlTextData");
effects.consume(code);
return open;
}
/**
* After `<`, at tag name or other stuff.
*
* ```markdown
* > | a <b> c
* ^
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === 33) {
effects.consume(code);
return declarationOpen;
}
if (code === 47) {
effects.consume(code);
return tagCloseStart;
}
if (code === 63) {
effects.consume(code);
return instruction;
}
// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code);
return tagOpen;
}
return nok(code);
}
/**
* After `<!`, at declaration, comment, or CDATA.
*
* ```markdown
* > | a <!doctype> c
* ^
* > | a <!--b--> c
* ^
* > | a <![CDATA[>&<]]> c
* ^
* ```
*
* @type {State}
*/
function declarationOpen(code) {
if (code === 45) {
effects.consume(code);
return commentOpenInside;
}
if (code === 91) {
effects.consume(code);
index = 0;
return cdataOpenInside;
}
if (asciiAlpha(code)) {
effects.consume(code);
return declaration;
}
return nok(code);
}
/**
* In a comment, after `<!-`, at another `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentOpenInside(code) {
if (code === 45) {
effects.consume(code);
return commentEnd;
}
return nok(code);
}
/**
* In comment.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function comment(code) {
if (code === null) {
return nok(code);
}
if (code === 45) {
effects.consume(code);
return commentClose;
}
if (markdownLineEnding(code)) {
returnState = comment;
return lineEndingBefore(code);
}
effects.consume(code);
return comment;
}
/**
* In comment, after `-`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentClose(code) {
if (code === 45) {
effects.consume(code);
return commentEnd;
}
return comment(code);
}
/**
* In comment, after `--`.
*
* ```markdown
* > | a <!--b--> c
* ^
* ```
*
* @type {State}
*/
function commentEnd(code) {
return code === 62 ? end(code) : code === 45 ? commentClose(code) : comment(code);
}
/**
* After `<![`, in CDATA, expecting `CDATA[`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^^^^
* ```
*
* @type {State}
*/
function cdataOpenInside(code) {
const value = "CDATA[";
if (code === value.charCodeAt(index++)) {
effects.consume(code);
return index === value.length ? cdata : cdataOpenInside;
}
return nok(code);
}
/**
* In CDATA.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^^^
* ```
*
* @type {State}
*/
function cdata(code) {
if (code === null) {
return nok(code);
}
if (code === 93) {
effects.consume(code);
return cdataClose;
}
if (markdownLineEnding(code)) {
returnState = cdata;
return lineEndingBefore(code);
}
effects.consume(code);
return cdata;
}
/**
* In CDATA, after `]`, at another `]`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataClose(code) {
if (code === 93) {
effects.consume(code);
return cdataEnd;
}
return cdata(code);
}
/**
* In CDATA, after `]]`, at `>`.
*
* ```markdown
* > | a <![CDATA[>&<]]> b
* ^
* ```
*
* @type {State}
*/
function cdataEnd(code) {
if (code === 62) {
return end(code);
}
if (code === 93) {
effects.consume(code);
return cdataEnd;
}
return cdata(code);
}
/**
* In declaration.
*
* ```markdown
* > | a <!b> c
* ^
* ```
*
* @type {State}
*/
function declaration(code) {
if (code === null || code === 62) {
return end(code);
}
if (markdownLineEnding(code)) {
returnState = declaration;
return lineEndingBefore(code);
}
effects.consume(code);
return declaration;
}
/**
* In instruction.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instruction(code) {
if (code === null) {
return nok(code);
}
if (code === 63) {
effects.consume(code);
return instructionClose;
}
if (markdownLineEnding(code)) {
returnState = instruction;
return lineEndingBefore(code);
}
effects.consume(code);
return instruction;
}
/**
* In instruction, after `?`, at `>`.
*
* ```markdown
* > | a <?b?> c
* ^
* ```
*
* @type {State}
*/
function instructionClose(code) {
return code === 62 ? end(code) : instruction(code);
}
/**
* After `</`, in closing tag, at tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseStart(code) {
// ASCII alphabetical.
if (asciiAlpha(code)) {
effects.consume(code);
return tagClose;
}
return nok(code);
}
/**
* After `</x`, in a tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagClose(code) {
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {
effects.consume(code);
return tagClose;
}
return tagCloseBetween(code);
}
/**
* In closing tag, after tag name.
*
* ```markdown
* > | a </b> c
* ^
* ```
*
* @type {State}
*/
function tagCloseBetween(code) {
if (markdownLineEnding(code)) {
returnState = tagCloseBetween;
return lineEndingBefore(code);
}
if (markdownSpace(code)) {
effects.consume(code);
return tagCloseBetween;
}
return end(code);
}
/**
* After `<x`, in opening tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpen(code) {
// ASCII alphanumerical and `-`.
if (code === 45 || asciiAlphanumeric(code)) {
effects.consume(code);
return tagOpen;
}
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code);
}
return nok(code);
}
/**
* In opening tag, after tag name.
*
* ```markdown
* > | a <b> c
* ^
* ```
*
* @type {State}
*/
function tagOpenBetween(code) {
if (code === 47) {
effects.consume(code);
return end;
}
// ASCII alphabetical and `:` and `_`.
if (code === 58 || code === 95 || asciiAlpha(code)) {
effects.consume(code);
return tagOpenAttributeName;
}
if (markdownLineEnding(code)) {
returnState = tagOpenBetween;
return lineEndingBefore(code);
}
if (markdownSpace(code)) {
effects.consume(code);
return tagOpenBetween;
}
return end(code);
}
/**
* In attribute name.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeName(code) {
// ASCII alphabetical and `-`, `.`, `:`, and `_`.
if (code === 45 || code === 46 || code === 58 || code === 95 || asciiAlphanumeric(code)) {
effects.consume(code);
return tagOpenAttributeName;
}
return tagOpenAttributeNameAfter(code);
}
/**
* After attribute name, before initializer, the end of the tag, or
* whitespace.
*
* ```markdown
* > | a <b c> d
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeNameAfter(code) {
if (code === 61) {
effects.consume(code);
return tagOpenAttributeValueBefore;
}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeNameAfter;
return lineEndingBefore(code);
}
if (markdownSpace(code)) {
effects.consume(code);
return tagOpenAttributeNameAfter;
}
return tagOpenBetween(code);
}
/**
* Before unquoted, double quoted, or single quoted attribute value, allowing
* whitespace.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueBefore(code) {
if (code === null || code === 60 || code === 61 || code === 62 || code === 96) {
return nok(code);
}
if (code === 34 || code === 39) {
effects.consume(code);
marker = code;
return tagOpenAttributeValueQuoted;
}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeValueBefore;
return lineEndingBefore(code);
}
if (markdownSpace(code)) {
effects.consume(code);
return tagOpenAttributeValueBefore;
}
effects.consume(code);
return tagOpenAttributeValueUnquoted;
}
/**
* In double or single quoted attribute value.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuoted(code) {
if (code === marker) {
effects.consume(code);
marker = undefined;
return tagOpenAttributeValueQuotedAfter;
}
if (code === null) {
return nok(code);
}
if (markdownLineEnding(code)) {
returnState = tagOpenAttributeValueQuoted;
return lineEndingBefore(code);
}
effects.consume(code);
return tagOpenAttributeValueQuoted;
}
/**
* In unquoted attribute value.
*
* ```markdown
* > | a <b c=d> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueUnquoted(code) {
if (code === null || code === 34 || code === 39 || code === 60 || code === 61 || code === 96) {
return nok(code);
}
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code);
}
effects.consume(code);
return tagOpenAttributeValueUnquoted;
}
/**
* After double or single quoted attribute value, before whitespace or the end
* of the tag.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function tagOpenAttributeValueQuotedAfter(code) {
if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {
return tagOpenBetween(code);
}
return nok(code);
}
/**
* In certain circumstances of a tag where only an `>` is allowed.
*
* ```markdown
* > | a <b c="d"> e
* ^
* ```
*
* @type {State}
*/
function end(code) {
if (code === 62) {
effects.consume(code);
effects.exit("htmlTextData");
effects.exit("htmlText");
return ok;
}
return nok(code);
}
/**
* At eol.
*
* > 👉 **Note**: we cant have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* > | a <!--a
* ^
* | b-->
* ```
*
* @type {State}
*/
function lineEndingBefore(code) {
effects.exit("htmlTextData");
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return lineEndingAfter;
}
/**
* After eol, at optional whitespace.
*
* > 👉 **Note**: we cant have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfter(code) {
// Always populated by defaults.
return markdownSpace(code) ? factorySpace(effects, lineEndingAfterPrefix, "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : lineEndingAfterPrefix(code);
}
/**
* After eol, after optional whitespace.
*
* > 👉 **Note**: we cant have blank lines in text, so no need to worry about
* > empty tokens.
*
* ```markdown
* | a <!--a
* > | b-->
* ^
* ```
*
* @type {State}
*/
function lineEndingAfterPrefix(code) {
effects.enter("htmlTextData");
return returnState(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const labelEnd: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=label-end.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"label-end.d.ts","sourceRoot":"","sources":["label-end.js"],"names":[],"mappings":"AAuBA,wBAAwB;AACxB,uBADW,SAAS,CAMnB;+BApBS,sBAAsB"}

560
node_modules/micromark-core-commonmark/lib/label-end.js generated vendored Normal file
View File

@@ -0,0 +1,560 @@
/**
* @import {
* Construct,
* Event,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { factoryDestination } from 'micromark-factory-destination';
import { factoryLabel } from 'micromark-factory-label';
import { factoryTitle } from 'micromark-factory-title';
import { factoryWhitespace } from 'micromark-factory-whitespace';
import { markdownLineEndingOrSpace } from 'micromark-util-character';
import { push, splice } from 'micromark-util-chunked';
import { normalizeIdentifier } from 'micromark-util-normalize-identifier';
import { resolveAll } from 'micromark-util-resolve-all';
/** @type {Construct} */
export const labelEnd = {
name: 'labelEnd',
resolveAll: resolveAllLabelEnd,
resolveTo: resolveToLabelEnd,
tokenize: tokenizeLabelEnd
};
/** @type {Construct} */
const resourceConstruct = {
tokenize: tokenizeResource
};
/** @type {Construct} */
const referenceFullConstruct = {
tokenize: tokenizeReferenceFull
};
/** @type {Construct} */
const referenceCollapsedConstruct = {
tokenize: tokenizeReferenceCollapsed
};
/** @type {Resolver} */
function resolveAllLabelEnd(events) {
let index = -1;
/** @type {Array<Event>} */
const newEvents = [];
while (++index < events.length) {
const token = events[index][1];
newEvents.push(events[index]);
if (token.type === "labelImage" || token.type === "labelLink" || token.type === "labelEnd") {
// Remove the marker.
const offset = token.type === "labelImage" ? 4 : 2;
token.type = "data";
index += offset;
}
}
// If the events are equal, we don't have to copy newEvents to events
if (events.length !== newEvents.length) {
splice(events, 0, events.length, newEvents);
}
return events;
}
/** @type {Resolver} */
function resolveToLabelEnd(events, context) {
let index = events.length;
let offset = 0;
/** @type {Token} */
let token;
/** @type {number | undefined} */
let open;
/** @type {number | undefined} */
let close;
/** @type {Array<Event>} */
let media;
// Find an opening.
while (index--) {
token = events[index][1];
if (open) {
// If we see another link, or inactive link label, weve been here before.
if (token.type === "link" || token.type === "labelLink" && token._inactive) {
break;
}
// Mark other link openings as inactive, as we cant have links in
// links.
if (events[index][0] === 'enter' && token.type === "labelLink") {
token._inactive = true;
}
} else if (close) {
if (events[index][0] === 'enter' && (token.type === "labelImage" || token.type === "labelLink") && !token._balanced) {
open = index;
if (token.type !== "labelLink") {
offset = 2;
break;
}
}
} else if (token.type === "labelEnd") {
close = index;
}
}
const group = {
type: events[open][1].type === "labelLink" ? "link" : "image",
start: {
...events[open][1].start
},
end: {
...events[events.length - 1][1].end
}
};
const label = {
type: "label",
start: {
...events[open][1].start
},
end: {
...events[close][1].end
}
};
const text = {
type: "labelText",
start: {
...events[open + offset + 2][1].end
},
end: {
...events[close - 2][1].start
}
};
media = [['enter', group, context], ['enter', label, context]];
// Opening marker.
media = push(media, events.slice(open + 1, open + offset + 3));
// Text open.
media = push(media, [['enter', text, context]]);
// Always populated by defaults.
// Between.
media = push(media, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + offset + 4, close - 3), context));
// Text close, marker close, label close.
media = push(media, [['exit', text, context], events[close - 2], events[close - 1], ['exit', label, context]]);
// Reference, resource, or so.
media = push(media, events.slice(close + 1));
// Media close.
media = push(media, [['exit', group, context]]);
splice(events, open, events.length, media);
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeLabelEnd(effects, ok, nok) {
const self = this;
let index = self.events.length;
/** @type {Token} */
let labelStart;
/** @type {boolean} */
let defined;
// Find an opening.
while (index--) {
if ((self.events[index][1].type === "labelImage" || self.events[index][1].type === "labelLink") && !self.events[index][1]._balanced) {
labelStart = self.events[index][1];
break;
}
}
return start;
/**
* Start of label end.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ```
*
* @type {State}
*/
function start(code) {
// If there is not an okay opening.
if (!labelStart) {
return nok(code);
}
// If the corresponding label (link) start is marked as inactive,
// it means wed be wrapping a link, like this:
//
// ```markdown
// > | a [b [c](d) e](f) g.
// ^
// ```
//
// We cant have that, so its just balanced brackets.
if (labelStart._inactive) {
return labelEndNok(code);
}
defined = self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize({
start: labelStart.end,
end: self.now()
})));
effects.enter("labelEnd");
effects.enter("labelMarker");
effects.consume(code);
effects.exit("labelMarker");
effects.exit("labelEnd");
return after;
}
/**
* After `]`.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function after(code) {
// Note: `markdown-rs` also parses GFM footnotes here, which for us is in
// an extension.
// Resource (`[asd](fgh)`)?
if (code === 40) {
return effects.attempt(resourceConstruct, labelEndOk, defined ? labelEndOk : labelEndNok)(code);
}
// Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?
if (code === 91) {
return effects.attempt(referenceFullConstruct, labelEndOk, defined ? referenceNotFull : labelEndNok)(code);
}
// Shortcut (`[asd]`) reference?
return defined ? labelEndOk(code) : labelEndNok(code);
}
/**
* After `]`, at `[`, but not at a full reference.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function referenceNotFull(code) {
return effects.attempt(referenceCollapsedConstruct, labelEndOk, labelEndNok)(code);
}
/**
* Done, we found something.
*
* ```markdown
* > | [a](b) c
* ^
* > | [a][b] c
* ^
* > | [a][] b
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndOk(code) {
// Note: `markdown-rs` does a bunch of stuff here.
return ok(code);
}
/**
* Done, its nothing.
*
* There was an okay opening, but we didnt match anything.
*
* ```markdown
* > | [a](b c
* ^
* > | [a][b c
* ^
* > | [a] b
* ^
* ```
*
* @type {State}
*/
function labelEndNok(code) {
labelStart._balanced = true;
return nok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeResource(effects, ok, nok) {
return resourceStart;
/**
* At a resource.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceStart(code) {
effects.enter("resource");
effects.enter("resourceMarker");
effects.consume(code);
effects.exit("resourceMarker");
return resourceBefore;
}
/**
* In resource, after `(`, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceBefore(code) {
return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceOpen)(code) : resourceOpen(code);
}
/**
* In resource, after optional whitespace, at `)` or a destination.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceOpen(code) {
if (code === 41) {
return resourceEnd(code);
}
return factoryDestination(effects, resourceDestinationAfter, resourceDestinationMissing, "resourceDestination", "resourceDestinationLiteral", "resourceDestinationLiteralMarker", "resourceDestinationRaw", "resourceDestinationString", 32)(code);
}
/**
* In resource, after destination, at optional whitespace.
*
* ```markdown
* > | [a](b) c
* ^
* ```
*
* @type {State}
*/
function resourceDestinationAfter(code) {
return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceBetween)(code) : resourceEnd(code);
}
/**
* At invalid destination.
*
* ```markdown
* > | [a](<<) b
* ^
* ```
*
* @type {State}
*/
function resourceDestinationMissing(code) {
return nok(code);
}
/**
* In resource, after destination and whitespace, at `(` or title.
*
* ```markdown
* > | [a](b ) c
* ^
* ```
*
* @type {State}
*/
function resourceBetween(code) {
if (code === 34 || code === 39 || code === 40) {
return factoryTitle(effects, resourceTitleAfter, nok, "resourceTitle", "resourceTitleMarker", "resourceTitleString")(code);
}
return resourceEnd(code);
}
/**
* In resource, after title, at optional whitespace.
*
* ```markdown
* > | [a](b "c") d
* ^
* ```
*
* @type {State}
*/
function resourceTitleAfter(code) {
return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceEnd)(code) : resourceEnd(code);
}
/**
* In resource, at `)`.
*
* ```markdown
* > | [a](b) d
* ^
* ```
*
* @type {State}
*/
function resourceEnd(code) {
if (code === 41) {
effects.enter("resourceMarker");
effects.consume(code);
effects.exit("resourceMarker");
effects.exit("resource");
return ok;
}
return nok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeReferenceFull(effects, ok, nok) {
const self = this;
return referenceFull;
/**
* In a reference (full), at the `[`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFull(code) {
return factoryLabel.call(self, effects, referenceFullAfter, referenceFullMissing, "reference", "referenceMarker", "referenceString")(code);
}
/**
* In a reference (full), after `]`.
*
* ```markdown
* > | [a][b] d
* ^
* ```
*
* @type {State}
*/
function referenceFullAfter(code) {
return self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1))) ? ok(code) : nok(code);
}
/**
* In reference (full) that was missing.
*
* ```markdown
* > | [a][b d
* ^
* ```
*
* @type {State}
*/
function referenceFullMissing(code) {
return nok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeReferenceCollapsed(effects, ok, nok) {
return referenceCollapsedStart;
/**
* In reference (collapsed), at `[`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedStart(code) {
// We only attempt a collapsed label if theres a `[`.
effects.enter("reference");
effects.enter("referenceMarker");
effects.consume(code);
effects.exit("referenceMarker");
return referenceCollapsedOpen;
}
/**
* In reference (collapsed), at `]`.
*
* > 👉 **Note**: we only get here if the label is defined.
*
* ```markdown
* > | [a][] d
* ^
* ```
*
* @type {State}
*/
function referenceCollapsedOpen(code) {
if (code === 93) {
effects.enter("referenceMarker");
effects.consume(code);
effects.exit("referenceMarker");
effects.exit("reference");
return ok;
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const labelStartImage: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=label-start-image.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"label-start-image.d.ts","sourceRoot":"","sources":["label-start-image.js"],"names":[],"mappings":"AAaA,wBAAwB;AACxB,8BADW,SAAS,CAKnB;+BAZS,sBAAsB"}

View File

@@ -0,0 +1,102 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { labelEnd } from './label-end.js';
/** @type {Construct} */
export const labelStartImage = {
name: 'labelStartImage',
resolveAll: labelEnd.resolveAll,
tokenize: tokenizeLabelStartImage
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeLabelStartImage(effects, ok, nok) {
const self = this;
return start;
/**
* Start of label (image) start.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("labelImage");
effects.enter("labelImageMarker");
effects.consume(code);
effects.exit("labelImageMarker");
return open;
}
/**
* After `!`, at `[`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* @type {State}
*/
function open(code) {
if (code === 91) {
effects.enter("labelMarker");
effects.consume(code);
effects.exit("labelMarker");
effects.exit("labelImage");
return after;
}
return nok(code);
}
/**
* After `![`.
*
* ```markdown
* > | a ![b] c
* ^
* ```
*
* This is needed in because, when GFM footnotes are enabled, images never
* form when started with a `^`.
* Instead, links form:
*
* ```markdown
* ![^a](b)
*
* ![^a][b]
*
* [b]: c
* ```
*
* ```html
* <p>!<a href=\"b\">^a</a></p>
* <p>!<a href=\"c\">^a</a></p>
* ```
*
* @type {State}
*/
function after(code) {
// To do: use a new field to do this, this is still needed for
// `micromark-extension-gfm-footnote`, but the `label-start-link`
// behavior isnt.
// Hidden footnotes hook.
/* c8 ignore next 3 */
return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const labelStartLink: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=label-start-link.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"label-start-link.d.ts","sourceRoot":"","sources":["label-start-link.js"],"names":[],"mappings":"AAaA,wBAAwB;AACxB,6BADW,SAAS,CAKnB;+BAZS,sBAAsB"}

View File

@@ -0,0 +1,55 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { labelEnd } from './label-end.js';
/** @type {Construct} */
export const labelStartLink = {
name: 'labelStartLink',
resolveAll: labelEnd.resolveAll,
tokenize: tokenizeLabelStartLink
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeLabelStartLink(effects, ok, nok) {
const self = this;
return start;
/**
* Start of label (link) start.
*
* ```markdown
* > | a [b] c
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("labelLink");
effects.enter("labelMarker");
effects.consume(code);
effects.exit("labelMarker");
effects.exit("labelLink");
return after;
}
/** @type {State} */
function after(code) {
// To do: this isnt needed in `micromark-extension-gfm-footnote`,
// remove.
// Hidden footnotes hook.
/* c8 ignore next 3 */
return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const lineEnding: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=line-ending.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"line-ending.d.ts","sourceRoot":"","sources":["line-ending.js"],"names":[],"mappings":"AAcA,wBAAwB;AACxB,yBADW,SAAS,CACwD;+BATlE,sBAAsB"}

View File

@@ -0,0 +1,33 @@
/**
* @import {
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
/** @type {Construct} */
export const lineEnding = {
name: 'lineEnding',
tokenize: tokenizeLineEnding
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeLineEnding(effects, ok) {
return start;
/** @type {State} */
function start(code) {
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return factorySpace(effects, ok, "linePrefix");
}
}

4
node_modules/micromark-core-commonmark/lib/list.d.ts generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const list: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=list.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"list.d.ts","sourceRoot":"","sources":["list.js"],"names":[],"mappings":"AAkBA,wBAAwB;AACxB,mBADW,SAAS,CAMnB;+BAhBS,sBAAsB"}

213
node_modules/micromark-core-commonmark/lib/list.js generated vendored Normal file
View File

@@ -0,0 +1,213 @@
/**
* @import {
* Code,
* Construct,
* Exiter,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { asciiDigit, markdownSpace } from 'micromark-util-character';
import { blankLine } from './blank-line.js';
import { thematicBreak } from './thematic-break.js';
/** @type {Construct} */
export const list = {
continuation: {
tokenize: tokenizeListContinuation
},
exit: tokenizeListEnd,
name: 'list',
tokenize: tokenizeListStart
};
/** @type {Construct} */
const listItemPrefixWhitespaceConstruct = {
partial: true,
tokenize: tokenizeListItemPrefixWhitespace
};
/** @type {Construct} */
const indentConstruct = {
partial: true,
tokenize: tokenizeIndent
};
// To do: `markdown-rs` parses list items on their own and later stitches them
// together.
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeListStart(effects, ok, nok) {
const self = this;
const tail = self.events[self.events.length - 1];
let initialSize = tail && tail[1].type === "linePrefix" ? tail[2].sliceSerialize(tail[1], true).length : 0;
let size = 0;
return start;
/** @type {State} */
function start(code) {
const kind = self.containerState.type || (code === 42 || code === 43 || code === 45 ? "listUnordered" : "listOrdered");
if (kind === "listUnordered" ? !self.containerState.marker || code === self.containerState.marker : asciiDigit(code)) {
if (!self.containerState.type) {
self.containerState.type = kind;
effects.enter(kind, {
_container: true
});
}
if (kind === "listUnordered") {
effects.enter("listItemPrefix");
return code === 42 || code === 45 ? effects.check(thematicBreak, nok, atMarker)(code) : atMarker(code);
}
if (!self.interrupt || code === 49) {
effects.enter("listItemPrefix");
effects.enter("listItemValue");
return inside(code);
}
}
return nok(code);
}
/** @type {State} */
function inside(code) {
if (asciiDigit(code) && ++size < 10) {
effects.consume(code);
return inside;
}
if ((!self.interrupt || size < 2) && (self.containerState.marker ? code === self.containerState.marker : code === 41 || code === 46)) {
effects.exit("listItemValue");
return atMarker(code);
}
return nok(code);
}
/**
* @type {State}
**/
function atMarker(code) {
effects.enter("listItemMarker");
effects.consume(code);
effects.exit("listItemMarker");
self.containerState.marker = self.containerState.marker || code;
return effects.check(blankLine,
// Cant be empty when interrupting.
self.interrupt ? nok : onBlank, effects.attempt(listItemPrefixWhitespaceConstruct, endOfPrefix, otherPrefix));
}
/** @type {State} */
function onBlank(code) {
self.containerState.initialBlankLine = true;
initialSize++;
return endOfPrefix(code);
}
/** @type {State} */
function otherPrefix(code) {
if (markdownSpace(code)) {
effects.enter("listItemPrefixWhitespace");
effects.consume(code);
effects.exit("listItemPrefixWhitespace");
return endOfPrefix;
}
return nok(code);
}
/** @type {State} */
function endOfPrefix(code) {
self.containerState.size = initialSize + self.sliceSerialize(effects.exit("listItemPrefix"), true).length;
return ok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeListContinuation(effects, ok, nok) {
const self = this;
self.containerState._closeFlow = undefined;
return effects.check(blankLine, onBlank, notBlank);
/** @type {State} */
function onBlank(code) {
self.containerState.furtherBlankLines = self.containerState.furtherBlankLines || self.containerState.initialBlankLine;
// We have a blank line.
// Still, try to consume at most the items size.
return factorySpace(effects, ok, "listItemIndent", self.containerState.size + 1)(code);
}
/** @type {State} */
function notBlank(code) {
if (self.containerState.furtherBlankLines || !markdownSpace(code)) {
self.containerState.furtherBlankLines = undefined;
self.containerState.initialBlankLine = undefined;
return notInCurrentItem(code);
}
self.containerState.furtherBlankLines = undefined;
self.containerState.initialBlankLine = undefined;
return effects.attempt(indentConstruct, ok, notInCurrentItem)(code);
}
/** @type {State} */
function notInCurrentItem(code) {
// While we do continue, we signal that the flow should be closed.
self.containerState._closeFlow = true;
// As were closing flow, were no longer interrupting.
self.interrupt = undefined;
// Always populated by defaults.
return factorySpace(effects, effects.attempt(list, ok, nok), "linePrefix", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeIndent(effects, ok, nok) {
const self = this;
return factorySpace(effects, afterPrefix, "listItemIndent", self.containerState.size + 1);
/** @type {State} */
function afterPrefix(code) {
const tail = self.events[self.events.length - 1];
return tail && tail[1].type === "listItemIndent" && tail[2].sliceSerialize(tail[1], true).length === self.containerState.size ? ok(code) : nok(code);
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Exiter}
*/
function tokenizeListEnd(effects) {
effects.exit(this.containerState.type);
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeListItemPrefixWhitespace(effects, ok, nok) {
const self = this;
// Always populated by defaults.
return factorySpace(effects, afterPrefix, "listItemPrefixWhitespace", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4 + 1);
/** @type {State} */
function afterPrefix(code) {
const tail = self.events[self.events.length - 1];
return !markdownSpace(code) && tail && tail[1].type === "listItemPrefixWhitespace" ? ok(code) : nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const setextUnderline: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=setext-underline.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"setext-underline.d.ts","sourceRoot":"","sources":["setext-underline.js"],"names":[],"mappings":"AAgBA,wBAAwB;AACxB,8BADW,SAAS,CAKnB;+BAbS,sBAAsB"}

View File

@@ -0,0 +1,185 @@
/**
* @import {
* Code,
* Construct,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const setextUnderline = {
name: 'setextUnderline',
resolveTo: resolveToSetextUnderline,
tokenize: tokenizeSetextUnderline
};
/** @type {Resolver} */
function resolveToSetextUnderline(events, context) {
// To do: resolve like `markdown-rs`.
let index = events.length;
/** @type {number | undefined} */
let content;
/** @type {number | undefined} */
let text;
/** @type {number | undefined} */
let definition;
// Find the opening of the content.
// Itll always exist: we dont tokenize if it isnt there.
while (index--) {
if (events[index][0] === 'enter') {
if (events[index][1].type === "content") {
content = index;
break;
}
if (events[index][1].type === "paragraph") {
text = index;
}
}
// Exit
else {
if (events[index][1].type === "content") {
// Remove the content end (if needed well add it later)
events.splice(index, 1);
}
if (!definition && events[index][1].type === "definition") {
definition = index;
}
}
}
const heading = {
type: "setextHeading",
start: {
...events[content][1].start
},
end: {
...events[events.length - 1][1].end
}
};
// Change the paragraph to setext heading text.
events[text][1].type = "setextHeadingText";
// If we have definitions in the content, well keep on having content,
// but we need move it.
if (definition) {
events.splice(text, 0, ['enter', heading, context]);
events.splice(definition + 1, 0, ['exit', events[content][1], context]);
events[content][1].end = {
...events[definition][1].end
};
} else {
events[content][1] = heading;
}
// Add the heading exit at the end.
events.push(['exit', heading, context]);
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeSetextUnderline(effects, ok, nok) {
const self = this;
/** @type {NonNullable<Code>} */
let marker;
return start;
/**
* At start of heading (setext) underline.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function start(code) {
let index = self.events.length;
/** @type {boolean | undefined} */
let paragraph;
// Find an opening.
while (index--) {
// Skip enter/exit of line ending, line prefix, and content.
// We can now either have a definition or a paragraph.
if (self.events[index][1].type !== "lineEnding" && self.events[index][1].type !== "linePrefix" && self.events[index][1].type !== "content") {
paragraph = self.events[index][1].type === "paragraph";
break;
}
}
// To do: handle lazy/pierce like `markdown-rs`.
// To do: parse indent like `markdown-rs`.
if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) {
effects.enter("setextHeadingLine");
marker = code;
return before(code);
}
return nok(code);
}
/**
* After optional whitespace, at `-` or `=`.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function before(code) {
effects.enter("setextHeadingLineSequence");
return inside(code);
}
/**
* In sequence.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function inside(code) {
if (code === marker) {
effects.consume(code);
return inside;
}
effects.exit("setextHeadingLineSequence");
return markdownSpace(code) ? factorySpace(effects, after, "lineSuffix")(code) : after(code);
}
/**
* After sequence, after optional whitespace.
*
* ```markdown
* | aa
* > | ==
* ^
* ```
*
* @type {State}
*/
function after(code) {
if (code === null || markdownLineEnding(code)) {
effects.exit("setextHeadingLine");
return ok(code);
}
return nok(code);
}
}

View File

@@ -0,0 +1,4 @@
/** @type {Construct} */
export const thematicBreak: Construct;
import type { Construct } from 'micromark-util-types';
//# sourceMappingURL=thematic-break.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"thematic-break.d.ts","sourceRoot":"","sources":["thematic-break.js"],"names":[],"mappings":"AAeA,wBAAwB;AACxB,4BADW,SAAS,CAInB;+BAZS,sBAAsB"}

View File

@@ -0,0 +1,102 @@
/**
* @import {
* Code,
* Construct,
* State,
* TokenizeContext,
* Tokenizer
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding, markdownSpace } from 'micromark-util-character';
/** @type {Construct} */
export const thematicBreak = {
name: 'thematicBreak',
tokenize: tokenizeThematicBreak
};
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeThematicBreak(effects, ok, nok) {
let size = 0;
/** @type {NonNullable<Code>} */
let marker;
return start;
/**
* Start of thematic break.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function start(code) {
effects.enter("thematicBreak");
// To do: parse indent like `markdown-rs`.
return before(code);
}
/**
* After optional whitespace, at marker.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function before(code) {
marker = code;
return atBreak(code);
}
/**
* After something, before something else.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function atBreak(code) {
if (code === marker) {
effects.enter("thematicBreakSequence");
return sequence(code);
}
if (size >= 3 && (code === null || markdownLineEnding(code))) {
effects.exit("thematicBreak");
return ok(code);
}
return nok(code);
}
/**
* In sequence.
*
* ```markdown
* > | ***
* ^
* ```
*
* @type {State}
*/
function sequence(code) {
if (code === marker) {
effects.consume(code);
size++;
return sequence;
}
effects.exit("thematicBreakSequence");
return markdownSpace(code) ? factorySpace(effects, atBreak, "whitespace")(code) : atBreak(code);
}
}