Refactor routing in App component to enhance navigation and improve error handling by integrating dynamic routes and updating the NotFound route.

This commit is contained in:
becarta
2025-05-23 12:43:00 +02:00
parent f40db0f5c9
commit a544759a3b
11127 changed files with 1647032 additions and 0 deletions

43
node_modules/hast-util-raw/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,43 @@
import type {VFile} from 'vfile'
export {raw} from './lib/index.js'
/**
* Configuration.
*/
export interface Options {
/**
* Corresponding virtual file representing the input document (optional).
*/
file?: VFile | null | undefined
/**
* List of custom hast node types to pass through (as in, keep) (optional).
*
* If the passed through nodes have children, those children are expected to
* be hast again and will be handled.
*/
passThrough?: Array<string> | null | undefined
/**
* Whether to disallow irregular tags in `raw` nodes according to GFM
* tagfilter
* (default: `false`).
*
* This affects the following tags,
* grouped by their kind:
*
* * `RAWTEXT`: `iframe`, `noembed`, `noframes`, `style`, `xmp`
* * `RCDATA`: `textarea`, `title`
* * `SCRIPT_DATA`: `script`
* * `PLAINTEXT`: `plaintext`
*
* When you know that you do not want authors to write these tags,
* you can enable this option to prevent their use from running amok.
*
* See:
* [*Disallowed Raw HTML* in
* `cmark-gfm`](https://github.github.com/gfm/#disallowed-raw-html-extension-).
*/
tagfilter?: boolean | null | undefined
}

2
node_modules/hast-util-raw/index.js generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Note: types exposed from `index.d.ts`.
export {raw} from './lib/index.js'

54
node_modules/hast-util-raw/lib/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,54 @@
/**
* Pass a hast tree through an HTML parser, which will fix nesting, and turn
* raw nodes into actual nodes.
*
* @param {Nodes} tree
* Original hast tree to transform.
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {Nodes}
* Parsed again tree.
*/
export function raw(tree: Nodes, options?: Options | null | undefined): Nodes;
/**
* Info passed around about the current state.
*/
export type State = {
/**
* Add a hast node to the parser.
*/
handle: (node: Nodes) => undefined;
/**
* User configuration.
*/
options: Options;
/**
* Current parser.
*/
parser: Parser<DefaultTreeAdapterMap>;
/**
* Whether there are stitches.
*/
stitches: boolean;
};
/**
* Custom comment-like value we pass through parse5, which contains a
* replacement node that well swap back in afterwards.
*/
export type Stitch = {
/**
* Node type.
*/
type: "comment";
/**
* Replacement value.
*/
value: {
stitch: Nodes;
};
};
import type { Nodes } from 'hast';
import type { Options } from 'hast-util-raw';
import { Parser } from 'parse5';
import type { DefaultTreeAdapterMap } from 'parse5';
//# sourceMappingURL=index.d.ts.map

1
node_modules/hast-util-raw/lib/index.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["index.js"],"names":[],"mappings":"AAyDA;;;;;;;;;;GAUG;AACH,0BAPW,KAAK,YAEL,OAAO,GAAG,IAAI,GAAG,SAAS,GAExB,KAAK,CAuDjB;;;;;;;;YA7Ga,CAAC,IAAI,EAAE,KAAK,KAAK,SAAS;;;;aAE1B,OAAO;;;;YAEP,MAAM,CAAC,qBAAqB,CAAC;;;;cAE7B,OAAO;;;;;;;;;;UAQP,SAAS;;;;WAET;QAAC,MAAM,EAAE,KAAK,CAAA;KAAC;;2BAzB+C,MAAM;6BADxD,eAAe;uBAkCQ,QAAQ;2CA/BF,QAAQ"}

682
node_modules/hast-util-raw/lib/index.js generated vendored Normal file
View File

@@ -0,0 +1,682 @@
/**
* @import {Options} from 'hast-util-raw'
* @import {Comment, Doctype, Element, Nodes, RootContent, Root, Text} from 'hast'
* @import {Raw} from 'mdast-util-to-hast'
* @import {DefaultTreeAdapterMap, ParserOptions} from 'parse5'
* @import {Point} from 'unist'
*/
/**
* @typedef State
* Info passed around about the current state.
* @property {(node: Nodes) => undefined} handle
* Add a hast node to the parser.
* @property {Options} options
* User configuration.
* @property {Parser<DefaultTreeAdapterMap>} parser
* Current parser.
* @property {boolean} stitches
* Whether there are stitches.
*/
/**
* @typedef Stitch
* Custom comment-like value we pass through parse5, which contains a
* replacement node that well swap back in afterwards.
* @property {'comment'} type
* Node type.
* @property {{stitch: Nodes}} value
* Replacement value.
*/
import structuredClone from '@ungap/structured-clone'
import {fromParse5} from 'hast-util-from-parse5'
import {toParse5} from 'hast-util-to-parse5'
import {htmlVoidElements} from 'html-void-elements'
import {Parser, Token, TokenizerMode, html} from 'parse5'
import {pointEnd, pointStart} from 'unist-util-position'
import {visit} from 'unist-util-visit'
import {webNamespaces} from 'web-namespaces'
import {zwitch} from 'zwitch'
const gfmTagfilterExpression =
/<(\/?)(iframe|noembed|noframes|plaintext|script|style|textarea|title|xmp)(?=[\t\n\f\r />])/gi
// Node types associated with MDX.
// <https://github.com/mdx-js/mdx/blob/8a56312/packages/mdx/lib/node-types.js>
const knownMdxNames = new Set([
'mdxFlowExpression',
'mdxJsxFlowElement',
'mdxJsxTextElement',
'mdxTextExpression',
'mdxjsEsm'
])
/** @type {ParserOptions<DefaultTreeAdapterMap>} */
const parseOptions = {sourceCodeLocationInfo: true, scriptingEnabled: false}
/**
* Pass a hast tree through an HTML parser, which will fix nesting, and turn
* raw nodes into actual nodes.
*
* @param {Nodes} tree
* Original hast tree to transform.
* @param {Options | null | undefined} [options]
* Configuration (optional).
* @returns {Nodes}
* Parsed again tree.
*/
export function raw(tree, options) {
const document = documentMode(tree)
/** @type {(node: Nodes, state: State) => undefined} */
const one = zwitch('type', {
handlers: {root, element, text, comment, doctype, raw: handleRaw},
unknown
})
/** @type {State} */
const state = {
parser: document
? new Parser(parseOptions)
: Parser.getFragmentParser(undefined, parseOptions),
handle(node) {
one(node, state)
},
stitches: false,
options: options || {}
}
one(tree, state)
resetTokenizer(state, pointStart())
const p5 = document ? state.parser.document : state.parser.getFragment()
const result = fromParse5(p5, {
// To do: support `space`?
file: state.options.file
})
if (state.stitches) {
visit(result, 'comment', function (node, index, parent) {
const stitch = /** @type {Stitch} */ (/** @type {unknown} */ (node))
if (stitch.value.stitch && parent && index !== undefined) {
/** @type {Array<RootContent>} */
const siblings = parent.children
// @ts-expect-error: assume the stitch is allowed.
siblings[index] = stitch.value.stitch
return index
}
})
}
// Unpack if possible and when not given a `root`.
if (
result.type === 'root' &&
result.children.length === 1 &&
result.children[0].type === tree.type
) {
return result.children[0]
}
return result
}
/**
* Transform all nodes
*
* @param {Array<RootContent>} nodes
* hast content.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function all(nodes, state) {
let index = -1
/* istanbul ignore else - invalid nodes, see rehypejs/rehype-raw#7. */
if (nodes) {
while (++index < nodes.length) {
state.handle(nodes[index])
}
}
}
/**
* Transform a root.
*
* @param {Root} node
* hast root node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function root(node, state) {
all(node.children, state)
}
/**
* Transform an element.
*
* @param {Element} node
* hast element node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function element(node, state) {
startTag(node, state)
all(node.children, state)
endTag(node, state)
}
/**
* Transform a text.
*
* @param {Text} node
* hast text node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function text(node, state) {
// Allow `DATA` through `PLAINTEXT`,
// but when hanging in a tag for example,
// switch back to `DATA`.
// Note: `State` is not exposed by `parse5`, so these numbers are fragile.
// See: <https://github.com/inikulin/parse5/blob/46cba43/packages/parse5/lib/tokenizer/index.ts#L58>
if (state.parser.tokenizer.state > 4) {
state.parser.tokenizer.state = 0
}
/** @type {Token.CharacterToken} */
const token = {
type: Token.TokenType.CHARACTER,
chars: node.value,
location: createParse5Location(node)
}
resetTokenizer(state, pointStart(node))
// @ts-expect-error: private.
state.parser.currentToken = token
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
}
/**
* Transform a doctype.
*
* @param {Doctype} node
* hast doctype node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function doctype(node, state) {
/** @type {Token.DoctypeToken} */
const token = {
type: Token.TokenType.DOCTYPE,
name: 'html',
forceQuirks: false,
publicId: '',
systemId: '',
location: createParse5Location(node)
}
resetTokenizer(state, pointStart(node))
// @ts-expect-error: private.
state.parser.currentToken = token
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
}
/**
* Transform a stitch.
*
* @param {Nodes} node
* unknown node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function stitch(node, state) {
// Mark that there are stitches, so we need to walk the tree and revert them.
state.stitches = true
/** @type {Nodes} */
const clone = cloneWithoutChildren(node)
// Recurse, because to somewhat handle `[<x>]</x>` (where `[]` denotes the
// passed through node).
if ('children' in node && 'children' in clone) {
// Root in root out.
const fakeRoot = /** @type {Root} */ (
raw({type: 'root', children: node.children}, state.options)
)
clone.children = fakeRoot.children
}
// Hack: `value` is supposed to be a string, but as none of the tools
// (`parse5` or `hast-util-from-parse5`) looks at it, we can pass nodes
// through.
comment({type: 'comment', value: {stitch: clone}}, state)
}
/**
* Transform a comment (or stitch).
*
* @param {Comment | Stitch} node
* hast comment node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function comment(node, state) {
/** @type {string} */
// @ts-expect-error: we pass stitches through.
const data = node.value
/** @type {Token.CommentToken} */
const token = {
type: Token.TokenType.COMMENT,
data,
location: createParse5Location(node)
}
resetTokenizer(state, pointStart(node))
// @ts-expect-error: private.
state.parser.currentToken = token
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
}
/**
* Transform a raw node.
*
* @param {Raw} node
* hast raw node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function handleRaw(node, state) {
// Reset preprocessor:
// See: <https://github.com/inikulin/parse5/blob/6f7ca60/packages/parse5/lib/tokenizer/preprocessor.ts#L18-L31>.
state.parser.tokenizer.preprocessor.html = ''
state.parser.tokenizer.preprocessor.pos = -1
// @ts-expect-error: private.
// type-coverage:ignore-next-line
state.parser.tokenizer.preprocessor.lastGapPos = -2
// @ts-expect-error: private.
// type-coverage:ignore-next-line
state.parser.tokenizer.preprocessor.gapStack = []
// @ts-expect-error: private.
// type-coverage:ignore-next-line
state.parser.tokenizer.preprocessor.skipNextNewLine = false
state.parser.tokenizer.preprocessor.lastChunkWritten = false
state.parser.tokenizer.preprocessor.endOfChunkHit = false
// @ts-expect-error: private.
// type-coverage:ignore-next-line
state.parser.tokenizer.preprocessor.isEol = false
// Now pass `node.value`.
setPoint(state, pointStart(node))
state.parser.tokenizer.write(
state.options.tagfilter
? node.value.replace(gfmTagfilterExpression, '&lt;$1$2')
: node.value,
false
)
// @ts-expect-error: private.
state.parser.tokenizer._runParsingLoop()
// Character references hang, so if we ended there, we need to flush
// those too.
// We reset the preprocessor as if the document ends here.
// Then one single call to the relevant state does the trick, parse5
// consumes the whole token.
// Note: `State` is not exposed by `parse5`, so these numbers are fragile.
// See: <https://github.com/inikulin/parse5/blob/46cba43/packages/parse5/lib/tokenizer/index.ts#L58>
// Note: a change to `parse5`, which breaks this, was merged but not released.
// Investigate when it is.
// To do: remove next major.
/* c8 ignore next 12 -- removed in <https://github.com/inikulin/parse5/pull/897> */
if (
state.parser.tokenizer.state === 72 /* NAMED_CHARACTER_REFERENCE */ ||
// @ts-expect-error: removed.
state.parser.tokenizer.state === 78 /* NUMERIC_CHARACTER_REFERENCE_END */
) {
state.parser.tokenizer.preprocessor.lastChunkWritten = true
/** @type {number} */
// @ts-expect-error: private.
const cp = state.parser.tokenizer._consume()
// @ts-expect-error: private.
state.parser.tokenizer._callState(cp)
}
}
/**
* Crash on an unknown node.
*
* @param {unknown} node_
* unknown node.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Never.
*/
function unknown(node_, state) {
const node = /** @type {Nodes} */ (node_)
if (
state.options.passThrough &&
state.options.passThrough.includes(node.type)
) {
stitch(node, state)
} else {
let extra = ''
if (knownMdxNames.has(node.type)) {
extra =
". It looks like you are using MDX nodes with `hast-util-raw` (or `rehype-raw`). If you use this because you are using remark or rehype plugins that inject `'html'` nodes, then please raise an issue with that plugin, as its a bad and slow idea. If you use this because you are using markdown syntax, then you have to configure this utility (or plugin) to pass through these nodes (see `passThrough` in docs), but you can also migrate to use the MDX syntax"
}
throw new Error('Cannot compile `' + node.type + '` node' + extra)
}
}
/**
* Reset the tokenizer of a parser.
*
* @param {State} state
* Info passed around about the current state.
* @param {Point | undefined} point
* Point.
* @returns {undefined}
* Nothing.
*/
function resetTokenizer(state, point) {
setPoint(state, point)
// Process final characters if theyre still there after hibernating.
/** @type {Token.CharacterToken} */
// @ts-expect-error: private.
const token = state.parser.tokenizer.currentCharacterToken
if (token && token.location) {
token.location.endLine = state.parser.tokenizer.preprocessor.line
token.location.endCol = state.parser.tokenizer.preprocessor.col + 1
token.location.endOffset = state.parser.tokenizer.preprocessor.offset + 1
// @ts-expect-error: private.
state.parser.currentToken = token
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
}
// Reset tokenizer:
// See: <https://github.com/inikulin/parse5/blob/6f7ca60/packages/parse5/lib/tokenizer/index.ts#L187-L223>.
// Especially putting it back in the `data` state is useful: some elements,
// like textareas and iframes, change the state.
// See GH-7.
// But also if broken HTML is in `raw`, and then a correct element is given.
// See GH-11.
// @ts-expect-error: private.
state.parser.tokenizer.paused = false
// @ts-expect-error: private.
state.parser.tokenizer.inLoop = false
// Note: dont reset `state`, `inForeignNode`, or `lastStartTagName`, we
// manually update those when needed.
state.parser.tokenizer.active = false
// @ts-expect-error: private.
state.parser.tokenizer.returnState = TokenizerMode.DATA
// @ts-expect-error: private.
state.parser.tokenizer.charRefCode = -1
// @ts-expect-error: private.
state.parser.tokenizer.consumedAfterSnapshot = -1
// @ts-expect-error: private.
state.parser.tokenizer.currentLocation = null
// @ts-expect-error: private.
state.parser.tokenizer.currentCharacterToken = null
// @ts-expect-error: private.
state.parser.tokenizer.currentToken = null
// @ts-expect-error: private.
state.parser.tokenizer.currentAttr = {name: '', value: ''}
}
/**
* Set current location.
*
* @param {State} state
* Info passed around about the current state.
* @param {Point | undefined} point
* Point.
* @returns {undefined}
* Nothing.
*/
function setPoint(state, point) {
if (point && point.offset !== undefined) {
/** @type {Token.Location} */
const location = {
startLine: point.line,
startCol: point.column,
startOffset: point.offset,
endLine: -1,
endCol: -1,
endOffset: -1
}
// @ts-expect-error: private.
// type-coverage:ignore-next-line
state.parser.tokenizer.preprocessor.lineStartPos = -point.column + 1 // Looks weird, but ensures we get correct positional info.
state.parser.tokenizer.preprocessor.droppedBufferSize = point.offset
state.parser.tokenizer.preprocessor.line = point.line
// @ts-expect-error: private.
state.parser.tokenizer.currentLocation = location
}
}
/**
* Emit a start tag.
*
* @param {Element} node
* Element.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function startTag(node, state) {
const tagName = node.tagName.toLowerCase()
// Ignore tags if were in plain text.
if (state.parser.tokenizer.state === TokenizerMode.PLAINTEXT) return
resetTokenizer(state, pointStart(node))
const current = state.parser.openElements.current
let ns = 'namespaceURI' in current ? current.namespaceURI : webNamespaces.html
if (ns === webNamespaces.html && tagName === 'svg') {
ns = webNamespaces.svg
}
const result = toParse5(
// Shallow clone to not delve into `children`: we only need the attributes.
{...node, children: []},
{space: ns === webNamespaces.svg ? 'svg' : 'html'}
)
/** @type {Token.TagToken} */
const tag = {
type: Token.TokenType.START_TAG,
tagName,
tagID: html.getTagID(tagName),
// We always send start and end tags.
selfClosing: false,
ackSelfClosing: false,
// Always element.
/* c8 ignore next */
attrs: 'attrs' in result ? result.attrs : [],
location: createParse5Location(node)
}
// The HTML parsing algorithm works by doing half of the state management in
// the tokenizer and half in the parser.
// We cant use the tokenizer here, as we dont have strings.
// So we act *as if* the tokenizer emits tokens:
// @ts-expect-error: private.
state.parser.currentToken = tag
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
// …but then we still need a bunch of work that the tokenizer would normally
// do, such as:
// Set a tag name, similar to how the tokenizer would do it.
state.parser.tokenizer.lastStartTagName = tagName
// `inForeignNode` is correctly set by the parser.
}
/**
* Emit an end tag.
*
* @param {Element} node
* Element.
* @param {State} state
* Info passed around about the current state.
* @returns {undefined}
* Nothing.
*/
function endTag(node, state) {
const tagName = node.tagName.toLowerCase()
// Do not emit closing tags for HTML void elements.
if (
!state.parser.tokenizer.inForeignNode &&
htmlVoidElements.includes(tagName)
) {
return
}
// Ignore tags if were in plain text.
if (state.parser.tokenizer.state === TokenizerMode.PLAINTEXT) return
resetTokenizer(state, pointEnd(node))
/** @type {Token.TagToken} */
const tag = {
type: Token.TokenType.END_TAG,
tagName,
tagID: html.getTagID(tagName),
selfClosing: false,
ackSelfClosing: false,
attrs: [],
location: createParse5Location(node)
}
// The HTML parsing algorithm works by doing half of the state management in
// the tokenizer and half in the parser.
// We cant use the tokenizer here, as we dont have strings.
// So we act *as if* the tokenizer emits tokens:
// @ts-expect-error: private.
state.parser.currentToken = tag
// @ts-expect-error: private.
state.parser._processToken(state.parser.currentToken)
// …but then we still need a bunch of work that the tokenizer would normally
// do, such as:
// Switch back to the data state after alternative states that dont accept
// tags:
if (
// Current element is closed.
tagName === state.parser.tokenizer.lastStartTagName &&
// `<textarea>` and `<title>`
(state.parser.tokenizer.state === TokenizerMode.RCDATA ||
// `<iframe>`, `<noembed>`, `<noframes>`, `<style>`, `<xmp>`
state.parser.tokenizer.state === TokenizerMode.RAWTEXT ||
// `<script>`
state.parser.tokenizer.state === TokenizerMode.SCRIPT_DATA)
// Note: `<plaintext>` not needed, as its the last element.
) {
state.parser.tokenizer.state = TokenizerMode.DATA
}
}
/**
* Check if `node` represents a whole document or a fragment.
*
* @param {Nodes} node
* hast node.
* @returns {boolean}
* Whether this represents a whole document or a fragment.
*/
function documentMode(node) {
const head = node.type === 'root' ? node.children[0] : node
return Boolean(
head &&
(head.type === 'doctype' ||
(head.type === 'element' && head.tagName.toLowerCase() === 'html'))
)
}
/**
* Get a `parse5` location from a node.
*
* @param {Nodes | Stitch} node
* hast node.
* @returns {Token.Location}
* `parse5` location.
*/
function createParse5Location(node) {
const start = pointStart(node) || {
line: undefined,
column: undefined,
offset: undefined
}
const end = pointEnd(node) || {
line: undefined,
column: undefined,
offset: undefined
}
/** @type {Record<keyof Token.Location, number | undefined>} */
const location = {
startLine: start.line,
startCol: start.column,
startOffset: start.offset,
endLine: end.line,
endCol: end.column,
endOffset: end.offset
}
// @ts-expect-error: unist point values can be `undefined` in hast, which
// `parse5` types dont want.
return location
}
/**
* @template {Nodes} NodeType
* Node type.
* @param {NodeType} node
* Node to clone.
* @returns {NodeType}
* Cloned node, without children.
*/
function cloneWithoutChildren(node) {
return 'children' in node
? structuredClone({...node, children: []})
: structuredClone(node)
}

22
node_modules/hast-util-raw/license generated vendored Normal file
View File

@@ -0,0 +1,22 @@
(The MIT License)
Copyright (c) Titus Wormer <tituswormer@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

123
node_modules/hast-util-raw/package.json generated vendored Normal file
View File

@@ -0,0 +1,123 @@
{
"author": "Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)",
"bugs": "https://github.com/syntax-tree/hast-util-raw/issues",
"contributors": [
"Christian Murphy <christian.murphy.42@gmail.com>",
"Titus Wormer <tituswormer@gmail.com> (https://wooorm.com)"
],
"dependencies": {
"@types/hast": "^3.0.0",
"@types/unist": "^3.0.0",
"@ungap/structured-clone": "^1.0.0",
"hast-util-from-parse5": "^8.0.0",
"hast-util-to-parse5": "^8.0.0",
"html-void-elements": "^3.0.0",
"mdast-util-to-hast": "^13.0.0",
"parse5": "^7.0.0",
"unist-util-position": "^5.0.0",
"unist-util-visit": "^5.0.0",
"vfile": "^6.0.0",
"web-namespaces": "^2.0.0",
"zwitch": "^2.0.0"
},
"description": "hast utility to reparse a tree",
"devDependencies": {
"@types/node": "^22.0.0",
"@types/ungap__structured-clone": "^1.0.0",
"c8": "^10.0.0",
"hast-util-to-html": "^9.0.0",
"hastscript": "^9.0.0",
"mdast-util-from-markdown": "^2.0.0",
"prettier": "^3.0.0",
"remark-api": "^1.0.0",
"remark-cli": "^12.0.0",
"remark-preset-wooorm": "^10.0.0",
"type-coverage": "^2.0.0",
"typescript": "^5.0.0",
"unist-builder": "^4.0.0",
"xo": "^0.59.0"
},
"exports": "./index.js",
"files": [
"index.d.ts.map",
"index.d.ts",
"index.js",
"lib/"
],
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
},
"keywords": [
"hast-util",
"hast",
"html",
"unist",
"utility",
"util",
"parse",
"raw"
],
"license": "MIT",
"name": "hast-util-raw",
"prettier": {
"bracketSpacing": false,
"semi": false,
"singleQuote": true,
"tabWidth": 2,
"trailingComma": "none",
"useTabs": false
},
"remarkConfig": {
"plugins": [
"remark-preset-wooorm",
"remark-api"
]
},
"repository": "syntax-tree/hast-util-raw",
"scripts": {
"build": "tsc --build --clean && tsc --build && type-coverage",
"format": "remark . -qfo && prettier . -w --log-level warn && xo --fix",
"test-api": "node --conditions development test.js",
"test-coverage": "c8 --100 --reporter lcov npm run test-api",
"test": "npm run build && npm run format && npm run test-coverage"
},
"sideEffects": false,
"typeCoverage": {
"atLeast": 100,
"strict": true
},
"type": "module",
"version": "9.1.0",
"xo": {
"overrides": [
{
"files": [
"**/*.ts"
],
"rules": {
"@typescript-eslint/array-type": [
"error",
{
"default": "generic"
}
],
"@typescript-eslint/ban-types": [
"error",
{
"extendDefaults": true
}
],
"@typescript-eslint/consistent-type-definitions": [
"error",
"interface"
]
}
}
],
"prettier": true,
"rules": {
"unicorn/prefer-string-replace-all": "off"
}
}
}

288
node_modules/hast-util-raw/readme.md generated vendored Normal file
View File

@@ -0,0 +1,288 @@
# hast-util-raw
[![Build][build-badge]][build]
[![Coverage][coverage-badge]][coverage]
[![Downloads][downloads-badge]][downloads]
[![Size][size-badge]][size]
[![Sponsors][sponsors-badge]][collective]
[![Backers][backers-badge]][collective]
[![Chat][chat-badge]][chat]
[hast][] utility to parse the tree and semistandard `raw` nodes (strings of
HTML) again, keeping positional info okay.
## Contents
* [What is this?](#what-is-this)
* [When should I use this?](#when-should-i-use-this)
* [Install](#install)
* [Use](#use)
* [API](#api)
* [`Options`](#options)
* [`raw(tree, options)`](#rawtree-options)
* [Types](#types)
* [Compatibility](#compatibility)
* [Security](#security)
* [Related](#related)
* [Contribute](#contribute)
* [License](#license)
## What is this?
This package is a utility to parse a document again.
It passes each node and embedded raw HTML through an HTML parser
([`parse5`][parse5]), to recreate a tree exactly as how a browser would parse
it, while keeping the original data and positional info intact.
## When should I use this?
This utility is particularly useful when coming from markdown and wanting to
support HTML embedded inside that markdown (which requires passing
`allowDangerousHtml: true` to [`mdast-util-to-hast`][mdast-util-to-hast]).
Markdown dictates how, say, a list item or emphasis can be parsed.
We can use that to turn the markdown syntax tree into an HTML syntax tree.
But markdown also dictates that things that look like HTML, are passed through
untouched, even when it just looks like XML but doesnt really make sense, so we
cant normally use these strings of “HTML” to create an HTML syntax tree.
This utility can.
It can be used to take those strings of HTML and include them into the syntax
tree as actual nodes.
If your final result is HTML and you trust content, then “strings” are fine
(you can pass `allowDangerousHtml: true` to `hast-util-to-html`, which passes
HTML through untouched).
But there are two main cases where a proper syntax tree is preferred:
* hast utilities need a proper syntax tree as they operate on actual nodes to
inspect or transform things, they cant operate on strings of HTML
* other output formats (React, MDX, etc) need actual nodes and cant handle
strings of HTML
The plugin [`rehype-raw`][rehype-raw] wraps this utility at a higher-level
(easier) abstraction.
## Install
This package is [ESM only][esm].
In Node.js (version 16+), install with [npm][]:
```sh
npm install hast-util-raw
```
In Deno with [`esm.sh`][esmsh]:
```js
import {raw} from 'https://esm.sh/hast-util-raw@9'
```
In browsers with [`esm.sh`][esmsh]:
```html
<script type="module">
import {raw} from 'https://esm.sh/hast-util-raw@9?bundle'
</script>
```
## Use
```js
import {h} from 'hastscript'
import {raw} from 'hast-util-raw'
const tree = h('div', [h('h1', ['Foo ', h('h2', 'Bar'), ' Baz'])])
const reformatted = raw(tree)
console.log(reformatted)
```
Yields:
```js
{ type: 'element',
tagName: 'div',
properties: {},
children:
[ { type: 'element',
tagName: 'h1',
properties: {},
children: [Object] },
{ type: 'element',
tagName: 'h2',
properties: {},
children: [Object] },
{ type: 'text', value: ' Baz' } ] }
```
## API
### `Options`
Configuration.
###### Fields
* `file?` (`VFile | null | undefined`)
— corresponding virtual file representing the input document (optional)
* `passThrough?` (`Array<string> | null | undefined`)
List of custom hast node types to pass through (as in, keep) (optional).
If the passed through nodes have children, those children are expected to
be hast again and will be handled.
* `tagfilter?` (`boolean | null | undefined`)
Whether to disallow irregular tags in `raw` nodes according to GFM
tagfilter
(default: `false`).
This affects the following tags,
grouped by their kind:
* `RAWTEXT`: `iframe`, `noembed`, `noframes`, `style`, `xmp`
* `RCDATA`: `textarea`, `title`
* `SCRIPT_DATA`: `script`
* `PLAINTEXT`: `plaintext`
When you know that you do not want authors to write these tags,
you can enable this option to prevent their use from running amok.
See:
[*Disallowed Raw HTML* in
`cmark-gfm`](https://github.github.com/gfm/#disallowed-raw-html-extension-).
### `raw(tree, options)`
Pass a hast tree through an HTML parser, which will fix nesting, and turn
raw nodes into actual nodes.
###### Parameters
* `tree` (`Root | RootContent`)
— original hast tree to transform
* `options?` (`Options | null | undefined`)
— configuration (optional)
###### Returns
Parsed again tree (`Root | RootContent`).
## Types
This package is fully typed with [TypeScript][].
It exports the additional type [`Options`][api-options].
The `Raw` node type is registered by and exposed from
[`mdast-util-to-hast`][mdast-util-to-hast].
## Compatibility
Projects maintained by the unified collective are compatible with maintained
versions of Node.js.
When we cut a new major release, we drop support for unmaintained versions of
Node.
This means we try to keep the current release line, `hast-util-raw@^9`,
compatible with Node.js 16.
## Security
Use of `hast-util-raw` can open you up to a [cross-site scripting (XSS)][xss]
attack as `raw` nodes are unsafe.
The following example shows how a raw node is used to inject a script that runs
when loaded in a browser.
```js
raw(u('root', [u('raw', '<script>alert(1)</script>')]))
```
Yields:
```html
<script>alert(1)</script>
```
Either do not use this utility in combination with user input, or use
[`hast-util-santize`][hast-util-sanitize].
## Related
* [`mdast-util-to-hast`][mdast-util-to-hast]
— transform mdast to hast
* [`rehype-raw`](https://github.com/rehypejs/rehype-raw)
— rehype plugin
## Contribute
See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for
ways to get started.
See [`support.md`][support] for ways to get help.
This project has a [code of conduct][coc].
By interacting with this repository, organization, or community you agree to
abide by its terms.
## License
[MIT][license] © [Titus Wormer][author]
<!-- Definitions -->
[build-badge]: https://github.com/syntax-tree/hast-util-raw/workflows/main/badge.svg
[build]: https://github.com/syntax-tree/hast-util-raw/actions
[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/hast-util-raw.svg
[coverage]: https://codecov.io/github/syntax-tree/hast-util-raw
[downloads-badge]: https://img.shields.io/npm/dm/hast-util-raw.svg
[downloads]: https://www.npmjs.com/package/hast-util-raw
[size-badge]: https://img.shields.io/badge/dynamic/json?label=minzipped%20size&query=$.size.compressedSize&url=https://deno.bundlejs.com/?q=hast-util-raw
[size]: https://bundlejs.com/?q=hast-util-raw
[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg
[backers-badge]: https://opencollective.com/unified/backers/badge.svg
[collective]: https://opencollective.com/unified
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg
[chat]: https://github.com/syntax-tree/unist/discussions
[npm]: https://docs.npmjs.com/cli/install
[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c
[esmsh]: https://esm.sh
[typescript]: https://www.typescriptlang.org
[license]: license
[author]: https://wooorm.com
[health]: https://github.com/syntax-tree/.github
[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md
[support]: https://github.com/syntax-tree/.github/blob/main/support.md
[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md
[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting
[hast]: https://github.com/syntax-tree/hast
[mdast-util-to-hast]: https://github.com/syntax-tree/mdast-util-to-hast
[hast-util-sanitize]: https://github.com/syntax-tree/hast-util-sanitize
[rehype-raw]: https://github.com/rehypejs/rehype-raw
[parse5]: https://github.com/inikulin/parse5
[api-options]: #options