full site update

This commit is contained in:
2025-07-24 18:46:24 +02:00
parent bfe2b90d8d
commit 37a6e0ab31
6912 changed files with 540482 additions and 361712 deletions

8
node_modules/unicode-trie/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,8 @@
Copyright 2018
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

77
node_modules/unicode-trie/README.md generated vendored Normal file
View File

@@ -0,0 +1,77 @@
# unicode-trie
A data structure for fast Unicode character metadata lookup, ported from ICU
## Background
When implementing many Unicode algorithms such as text segmentation,
normalization, bidi processing, etc., fast access to character metadata
is crucial to good performance. There over a million code points in the
Unicode standard, many of which produce the same result when looked up,
so an array or hash table is not appropriate - those data structures are
fast but would require a lot of memory. The data is generally
grouped in ranges, so you could do a binary search, but that is not
fast enough for some applications.
The [International Components for Unicode](http://site.icu-project.org) (ICU) project
came up with a data structure based on a [Trie](http://en.wikipedia.org/wiki/Trie) that provides fast access
to Unicode metadata. The range data is precompiled to a serialized
and flattened trie, which is then used at runtime to lookup the necessary
data. According to my own tests, this is generally at least 50% faster
than binary search, with not too much additional memory required.
## Installation
npm install unicode-trie
## Building a Trie
Unicode Tries are generally precompiled from data in the Unicode database
for faster runtime performance. To build a Unicode Trie, use the
`UnicodeTrieBuilder` class.
```js
const UnicodeTrieBuilder = require('unicode-trie/builder');
const fs = require('fs');
// create a trie
let t = new UnicodeTrieBuilder();
// optional parameters for default value, and error value
// if not provided, both are set to 0
t = new UnicodeTrieBuilder(10, 999);
// set individual values and ranges
t.set(0x4567, 99);
t.setRange(0x40, 0xe7, 0x1234);
// you can lookup a value if you like
t.get(0x4567); // => 99
// get a compiled trie (returns a UnicodeTrie object)
const trie = t.freeze();
// write compressed trie to a binary file
fs.writeFileSync('data.trie', t.toBuffer());
```
## Using a precompiled Trie
Once you've built a precompiled trie, you can load it into the
`UnicodeTrie` class, which is a readonly representation of the
trie. From there, you can lookup values.
```js
const UnicodeTrie = require('unicode-trie');
const fs = require('fs');
// load serialized trie from binary file
const data = fs.readFileSync('data.trie');
const trie = new UnicodeTrie(data);
// lookup a value
trie.get(0x4567); // => 99
```
## License
MIT

966
node_modules/unicode-trie/builder.js generated vendored Normal file
View File

@@ -0,0 +1,966 @@
const UnicodeTrie = require('./');
const pako = require('pako');
const { swap32LE } = require('./swap');
// Shift size for getting the index-1 table offset.
const SHIFT_1 = 6 + 5;
// Shift size for getting the index-2 table offset.
const SHIFT_2 = 5;
// Difference between the two shift sizes,
// for getting an index-1 offset from an index-2 offset. 6=11-5
const SHIFT_1_2 = SHIFT_1 - SHIFT_2;
// Number of index-1 entries for the BMP. 32=0x20
// This part of the index-1 table is omitted from the serialized form.
const OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1;
// Number of code points per index-1 table entry. 2048=0x800
const CP_PER_INDEX_1_ENTRY = 1 << SHIFT_1;
// Number of entries in an index-2 block. 64=0x40
const INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2;
// Mask for getting the lower bits for the in-index-2-block offset. */
const INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1;
// Number of entries in a data block. 32=0x20
const DATA_BLOCK_LENGTH = 1 << SHIFT_2;
// Mask for getting the lower bits for the in-data-block offset.
const DATA_MASK = DATA_BLOCK_LENGTH - 1;
// Shift size for shifting left the index array values.
// Increases possible data size with 16-bit index values at the cost
// of compactability.
// This requires data blocks to be aligned by DATA_GRANULARITY.
const INDEX_SHIFT = 2;
// The alignment size of a data block. Also the granularity for compaction.
const DATA_GRANULARITY = 1 << INDEX_SHIFT;
// The BMP part of the index-2 table is fixed and linear and starts at offset 0.
// Length=2048=0x800=0x10000>>SHIFT_2.
const INDEX_2_OFFSET = 0;
// The part of the index-2 table for U+D800..U+DBFF stores values for
// lead surrogate code _units_ not code _points_.
// Values for lead surrogate code _points_ are indexed with this portion of the table.
// Length=32=0x20=0x400>>SHIFT_2. (There are 1024=0x400 lead surrogates.)
const LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2;
const LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2;
// Count the lengths of both BMP pieces. 2080=0x820
const INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH;
// The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
// Length 32=0x20 for lead bytes C0..DF, regardless of SHIFT_2.
const UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH;
const UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6; // U+0800 is the first code point after 2-byte UTF-8
// The index-1 table, only used for supplementary code points, at offset 2112=0x840.
// Variable length, for code points up to highStart, where the last single-value range starts.
// Maximum length 512=0x200=0x100000>>SHIFT_1.
// (For 0x100000 supplementary code points U+10000..U+10ffff.)
//
// The part of the index-2 table for supplementary code points starts
// after this index-1 table.
//
// Both the index-1 table and the following part of the index-2 table
// are omitted completely if there is only BMP data.
const INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH;
const MAX_INDEX_1_LENGTH = 0x100000 >> SHIFT_1;
// The illegal-UTF-8 data block follows the ASCII block, at offset 128=0x80.
// Used with linear access for single bytes 0..0xbf for simple error handling.
// Length 64=0x40, not DATA_BLOCK_LENGTH.
const BAD_UTF8_DATA_OFFSET = 0x80;
// The start of non-linear-ASCII data blocks, at offset 192=0xc0.
// !!!!
const DATA_START_OFFSET = 0xc0;
// The null data block.
// Length 64=0x40 even if DATA_BLOCK_LENGTH is smaller,
// to work with 6-bit trail bytes from 2-byte UTF-8.
const DATA_NULL_OFFSET = DATA_START_OFFSET;
// The start of allocated data blocks.
const NEW_DATA_START_OFFSET = DATA_NULL_OFFSET + 0x40;
// The start of data blocks for U+0800 and above.
// Below, compaction uses a block length of 64 for 2-byte UTF-8.
// From here on, compaction uses DATA_BLOCK_LENGTH.
// Data values for 0x780 code points beyond ASCII.
const DATA_0800_OFFSET = NEW_DATA_START_OFFSET + 0x780;
// Start with allocation of 16k data entries. */
const INITIAL_DATA_LENGTH = 1 << 14;
// Grow about 8x each time.
const MEDIUM_DATA_LENGTH = 1 << 17;
// Maximum length of the runtime data array.
// Limited by 16-bit index values that are left-shifted by INDEX_SHIFT,
// and by uint16_t UTrie2Header.shiftedDataLength.
const MAX_DATA_LENGTH_RUNTIME = 0xffff << INDEX_SHIFT;
const INDEX_1_LENGTH = 0x110000 >> SHIFT_1;
// Maximum length of the build-time data array.
// One entry per 0x110000 code points, plus the illegal-UTF-8 block and the null block,
// plus values for the 0x400 surrogate code units.
const MAX_DATA_LENGTH_BUILDTIME = 0x110000 + 0x40 + 0x40 + 0x400;
// At build time, leave a gap in the index-2 table,
// at least as long as the maximum lengths of the 2-byte UTF-8 index-2 table
// and the supplementary index-1 table.
// Round up to INDEX_2_BLOCK_LENGTH for proper compacting.
const INDEX_GAP_OFFSET = INDEX_2_BMP_LENGTH;
const INDEX_GAP_LENGTH = ((UTF8_2B_INDEX_2_LENGTH + MAX_INDEX_1_LENGTH) + INDEX_2_MASK) & ~INDEX_2_MASK;
// Maximum length of the build-time index-2 array.
// Maximum number of Unicode code points (0x110000) shifted right by SHIFT_2,
// plus the part of the index-2 table for lead surrogate code points,
// plus the build-time index gap,
// plus the null index-2 block.)
const MAX_INDEX_2_LENGTH = (0x110000 >> SHIFT_2) + LSCP_INDEX_2_LENGTH + INDEX_GAP_LENGTH + INDEX_2_BLOCK_LENGTH;
// The null index-2 block, following the gap in the index-2 table.
const INDEX_2_NULL_OFFSET = INDEX_GAP_OFFSET + INDEX_GAP_LENGTH;
// The start of allocated index-2 blocks.
const INDEX_2_START_OFFSET = INDEX_2_NULL_OFFSET + INDEX_2_BLOCK_LENGTH;
// Maximum length of the runtime index array.
// Limited by its own 16-bit index values, and by uint16_t UTrie2Header.indexLength.
// (The actual maximum length is lower,
// (0x110000>>SHIFT_2)+UTF8_2B_INDEX_2_LENGTH+MAX_INDEX_1_LENGTH.)
const MAX_INDEX_LENGTH = 0xffff;
const equal_int = (a, s, t, length) => {
for (let i = 0; i < length; i++) {
if (a[s + i] !== a[t + i]) {
return false;
}
}
return true;
};
class UnicodeTrieBuilder {
constructor(initialValue, errorValue) {
let i, j;
if (initialValue == null) {
initialValue = 0;
}
this.initialValue = initialValue;
if (errorValue == null) {
errorValue = 0;
}
this.errorValue = errorValue;
this.index1 = new Int32Array(INDEX_1_LENGTH);
this.index2 = new Int32Array(MAX_INDEX_2_LENGTH);
this.highStart = 0x110000;
this.data = new Uint32Array(INITIAL_DATA_LENGTH);
this.dataCapacity = INITIAL_DATA_LENGTH;
this.firstFreeBlock = 0;
this.isCompacted = false;
// Multi-purpose per-data-block table.
//
// Before compacting:
//
// Per-data-block reference counters/free-block list.
// 0: unused
// >0: reference counter (number of index-2 entries pointing here)
// <0: next free data block in free-block list
//
// While compacting:
//
// Map of adjusted indexes, used in compactData() and compactIndex2().
// Maps from original indexes to new ones.
this.map = new Int32Array(MAX_DATA_LENGTH_BUILDTIME >> SHIFT_2);
for (i = 0; i < 0x80; i++) {
this.data[i] = this.initialValue;
}
for (i = i; i < 0xc0; i++) {
this.data[i] = this.errorValue;
}
for (i = DATA_NULL_OFFSET; i < NEW_DATA_START_OFFSET; i++) {
this.data[i] = this.initialValue;
}
this.dataNullOffset = DATA_NULL_OFFSET;
this.dataLength = NEW_DATA_START_OFFSET;
// set the index-2 indexes for the 2=0x80>>SHIFT_2 ASCII data blocks
i = 0;
for (j = 0; j < 0x80; j += DATA_BLOCK_LENGTH) {
this.index2[i] = j;
this.map[i++] = 1;
}
// reference counts for the bad-UTF-8-data block
for (j = j; j < 0xc0; j += DATA_BLOCK_LENGTH) {
this.map[i++] = 0;
}
// Reference counts for the null data block: all blocks except for the ASCII blocks.
// Plus 1 so that we don't drop this block during compaction.
// Plus as many as needed for lead surrogate code points.
// i==newTrie->dataNullOffset
this.map[i++] = ((0x110000 >> SHIFT_2) - (0x80 >> SHIFT_2)) + 1 + LSCP_INDEX_2_LENGTH;
j += DATA_BLOCK_LENGTH;
for (j = j; j < NEW_DATA_START_OFFSET; j += DATA_BLOCK_LENGTH) {
this.map[i++] = 0;
}
// set the remaining indexes in the BMP index-2 block
// to the null data block
for (i = 0x80 >> SHIFT_2; i < INDEX_2_BMP_LENGTH; i++) {
this.index2[i] = DATA_NULL_OFFSET;
}
// Fill the index gap with impossible values so that compaction
// does not overlap other index-2 blocks with the gap.
for (i = 0; i < INDEX_GAP_LENGTH; i++) {
this.index2[INDEX_GAP_OFFSET + i] = -1;
}
// set the indexes in the null index-2 block
for (i = 0; i < INDEX_2_BLOCK_LENGTH; i++) {
this.index2[INDEX_2_NULL_OFFSET + i] = DATA_NULL_OFFSET;
}
this.index2NullOffset = INDEX_2_NULL_OFFSET;
this.index2Length = INDEX_2_START_OFFSET;
// set the index-1 indexes for the linear index-2 block
j = 0;
for (i = 0; i < OMITTED_BMP_INDEX_1_LENGTH; i++) {
this.index1[i] = j;
j += INDEX_2_BLOCK_LENGTH;
}
// set the remaining index-1 indexes to the null index-2 block
for (i = i; i < INDEX_1_LENGTH; i++) {
this.index1[i] = INDEX_2_NULL_OFFSET;
}
// Preallocate and reset data for U+0080..U+07ff,
// for 2-byte UTF-8 which will be compacted in 64-blocks
// even if DATA_BLOCK_LENGTH is smaller.
for (i = 0x80; i < 0x800; i += DATA_BLOCK_LENGTH) {
this.set(i, this.initialValue);
}
}
set(codePoint, value) {
if ((codePoint < 0) || (codePoint > 0x10ffff)) {
throw new Error('Invalid code point');
}
if (this.isCompacted) {
throw new Error('Already compacted');
}
const block = this._getDataBlock(codePoint, true);
this.data[block + (codePoint & DATA_MASK)] = value;
return this;
}
setRange(start, end, value, overwrite) {
let block, repeatBlock;
if (overwrite == null) {
overwrite = true;
}
if ((start > 0x10ffff) || (end > 0x10ffff) || (start > end)) {
throw new Error('Invalid code point');
}
if (this.isCompacted) {
throw new Error('Already compacted');
}
if (!overwrite && (value === this.initialValue)) {
return this; // nothing to do
}
let limit = end + 1;
if ((start & DATA_MASK) !== 0) {
// set partial block at [start..following block boundary
block = this._getDataBlock(start, true);
const nextStart = (start + DATA_BLOCK_LENGTH) & ~DATA_MASK;
if (nextStart <= limit) {
this._fillBlock(block, start & DATA_MASK, DATA_BLOCK_LENGTH, value, this.initialValue, overwrite);
start = nextStart;
} else {
this._fillBlock(block, start & DATA_MASK, limit & DATA_MASK, value, this.initialValue, overwrite);
return this;
}
}
// number of positions in the last, partial block
const rest = limit & DATA_MASK;
// round down limit to a block boundary
limit &= ~DATA_MASK;
// iterate over all-value blocks
if (value === this.initialValue) {
repeatBlock = this.dataNullOffset;
} else {
repeatBlock = -1;
}
while (start < limit) {
let setRepeatBlock = false;
if ((value === this.initialValue) && this._isInNullBlock(start, true)) {
start += DATA_BLOCK_LENGTH; // nothing to do
continue;
}
// get index value
let i2 = this._getIndex2Block(start, true);
i2 += (start >> SHIFT_2) & INDEX_2_MASK;
block = this.index2[i2];
if (this._isWritableBlock(block)) {
// already allocated
if (overwrite && (block >= DATA_0800_OFFSET)) {
// We overwrite all values, and it's not a
// protected (ASCII-linear or 2-byte UTF-8) block:
// replace with the repeatBlock.
setRepeatBlock = true;
} else {
// protected block: just write the values into this block
this._fillBlock(block, 0, DATA_BLOCK_LENGTH, value, this.initialValue, overwrite);
}
} else if ((this.data[block] !== value) && (overwrite || (block === this.dataNullOffset))) {
// Set the repeatBlock instead of the null block or previous repeat block:
//
// If !isWritableBlock() then all entries in the block have the same value
// because it's the null block or a range block (the repeatBlock from a previous
// call to utrie2_setRange32()).
// No other blocks are used multiple times before compacting.
//
// The null block is the only non-writable block with the initialValue because
// of the repeatBlock initialization above. (If value==initialValue, then
// the repeatBlock will be the null data block.)
//
// We set our repeatBlock if the desired value differs from the block's value,
// and if we overwrite any data or if the data is all initial values
// (which is the same as the block being the null block, see above).
setRepeatBlock = true;
}
if (setRepeatBlock) {
if (repeatBlock >= 0) {
this._setIndex2Entry(i2, repeatBlock);
} else {
// create and set and fill the repeatBlock
repeatBlock = this._getDataBlock(start, true);
this._writeBlock(repeatBlock, value);
}
}
start += DATA_BLOCK_LENGTH;
}
if (rest > 0) {
// set partial block at [last block boundary..limit
block = this._getDataBlock(start, true);
this._fillBlock(block, 0, rest, value, this.initialValue, overwrite);
}
return this;
}
get(c, fromLSCP) {
let i2;
if (fromLSCP == null) {
fromLSCP = true;
}
if ((c < 0) || (c > 0x10ffff)) {
return this.errorValue;
}
if ((c >= this.highStart) && (!((c >= 0xd800) && (c < 0xdc00)) || fromLSCP)) {
return this.data[this.dataLength - DATA_GRANULARITY];
}
if (((c >= 0xd800) && (c < 0xdc00)) && fromLSCP) {
i2 = (LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2)) + (c >> SHIFT_2);
} else {
i2 = this.index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK);
}
const block = this.index2[i2];
return this.data[block + (c & DATA_MASK)];
}
_isInNullBlock(c, forLSCP) {
let i2;
if (((c & 0xfffffc00) === 0xd800) && forLSCP) {
i2 = (LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2)) + (c >> SHIFT_2);
} else {
i2 = this.index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK);
}
const block = this.index2[i2];
return block === this.dataNullOffset;
}
_allocIndex2Block() {
const newBlock = this.index2Length;
const newTop = newBlock + INDEX_2_BLOCK_LENGTH;
if (newTop > this.index2.length) {
// Should never occur.
// Either MAX_BUILD_TIME_INDEX_LENGTH is incorrect,
// or the code writes more values than should be possible.
throw new Error("Internal error in Trie2 creation.");
}
this.index2Length = newTop;
this.index2.set(this.index2.subarray(this.index2NullOffset, this.index2NullOffset + INDEX_2_BLOCK_LENGTH), newBlock);
return newBlock;
}
_getIndex2Block(c, forLSCP) {
if ((c >= 0xd800) && (c < 0xdc00) && forLSCP) {
return LSCP_INDEX_2_OFFSET;
}
const i1 = c >> SHIFT_1;
let i2 = this.index1[i1];
if (i2 === this.index2NullOffset) {
i2 = this._allocIndex2Block();
this.index1[i1] = i2;
}
return i2;
}
_isWritableBlock(block) {
return (block !== this.dataNullOffset) && (this.map[block >> SHIFT_2] === 1);
}
_allocDataBlock(copyBlock) {
let newBlock;
if (this.firstFreeBlock !== 0) {
// get the first free block
newBlock = this.firstFreeBlock;
this.firstFreeBlock = -this.map[newBlock >> SHIFT_2];
} else {
// get a new block from the high end
newBlock = this.dataLength;
const newTop = newBlock + DATA_BLOCK_LENGTH;
if (newTop > this.dataCapacity) {
// out of memory in the data array
let capacity;
if (this.dataCapacity < MEDIUM_DATA_LENGTH) {
capacity = MEDIUM_DATA_LENGTH;
} else if (this.dataCapacity < MAX_DATA_LENGTH_BUILDTIME) {
capacity = MAX_DATA_LENGTH_BUILDTIME;
} else {
// Should never occur.
// Either MAX_DATA_LENGTH_BUILDTIME is incorrect,
// or the code writes more values than should be possible.
throw new Error("Internal error in Trie2 creation.");
}
const newData = new Uint32Array(capacity);
newData.set(this.data.subarray(0, this.dataLength));
this.data = newData;
this.dataCapacity = capacity;
}
this.dataLength = newTop;
}
this.data.set(this.data.subarray(copyBlock, copyBlock + DATA_BLOCK_LENGTH), newBlock);
this.map[newBlock >> SHIFT_2] = 0;
return newBlock;
}
_releaseDataBlock(block) {
// put this block at the front of the free-block chain
this.map[block >> SHIFT_2] = -this.firstFreeBlock;
this.firstFreeBlock = block;
}
_setIndex2Entry(i2, block) {
++this.map[block >> SHIFT_2]; // increment first, in case block == oldBlock!
const oldBlock = this.index2[i2];
if (--this.map[oldBlock >> SHIFT_2] === 0) {
this._releaseDataBlock(oldBlock);
}
this.index2[i2] = block;
}
_getDataBlock(c, forLSCP) {
let i2 = this._getIndex2Block(c, forLSCP);
i2 += (c >> SHIFT_2) & INDEX_2_MASK;
const oldBlock = this.index2[i2];
if (this._isWritableBlock(oldBlock)) {
return oldBlock;
}
// allocate a new data block
const newBlock = this._allocDataBlock(oldBlock);
this._setIndex2Entry(i2, newBlock);
return newBlock;
}
_fillBlock(block, start, limit, value, initialValue, overwrite) {
let i;
if (overwrite) {
for (i = block + start; i < block + limit; i++) {
this.data[i] = value;
}
} else {
for (i = block + start; i < block + limit; i++) {
if (this.data[i] === initialValue) {
this.data[i] = value;
}
}
}
}
_writeBlock(block, value) {
const limit = block + DATA_BLOCK_LENGTH;
while (block < limit) {
this.data[block++] = value;
}
}
_findHighStart(highValue) {
let prevBlock, prevI2Block;
const data32 = this.data;
const { initialValue } = this;
const { index2NullOffset } = this;
const nullBlock = this.dataNullOffset;
// set variables for previous range
if (highValue === initialValue) {
prevI2Block = index2NullOffset;
prevBlock = nullBlock;
} else {
prevI2Block = -1;
prevBlock = -1;
}
const prev = 0x110000;
// enumerate index-2 blocks
let i1 = INDEX_1_LENGTH;
let c = prev;
while (c > 0) {
const i2Block = this.index1[--i1];
if (i2Block === prevI2Block) {
// the index-2 block is the same as the previous one, and filled with highValue
c -= CP_PER_INDEX_1_ENTRY;
continue;
}
prevI2Block = i2Block;
if (i2Block === index2NullOffset) {
// this is the null index-2 block
if (highValue !== initialValue) {
return c;
}
c -= CP_PER_INDEX_1_ENTRY;
} else {
// enumerate data blocks for one index-2 block
let i2 = INDEX_2_BLOCK_LENGTH;
while (i2 > 0) {
const block = this.index2[i2Block + --i2];
if (block === prevBlock) {
// the block is the same as the previous one, and filled with highValue
c -= DATA_BLOCK_LENGTH;
continue;
}
prevBlock = block;
if (block === nullBlock) {
// this is the null data block
if (highValue !== initialValue) {
return c;
}
c -= DATA_BLOCK_LENGTH;
} else {
let j = DATA_BLOCK_LENGTH;
while (j > 0) {
const value = data32[block + --j];
if (value !== highValue) {
return c;
}
--c;
}
}
}
}
}
// deliver last range
return 0;
}
_findSameDataBlock(dataLength, otherBlock, blockLength) {
// ensure that we do not even partially get past dataLength
dataLength -= blockLength;
let block = 0;
while (block <= dataLength) {
if (equal_int(this.data, block, otherBlock, blockLength)) {
return block;
}
block += DATA_GRANULARITY;
}
return -1;
}
_findSameIndex2Block(index2Length, otherBlock) {
// ensure that we do not even partially get past index2Length
index2Length -= INDEX_2_BLOCK_LENGTH;
for (let block = 0; block <= index2Length; block++) {
if (equal_int(this.index2, block, otherBlock, INDEX_2_BLOCK_LENGTH)) {
return block;
}
}
return -1;
}
_compactData() {
// do not compact linear-ASCII data
let newStart = DATA_START_OFFSET;
let start = 0;
let i = 0;
while (start < newStart) {
this.map[i++] = start;
start += DATA_BLOCK_LENGTH;
}
// Start with a block length of 64 for 2-byte UTF-8,
// then switch to DATA_BLOCK_LENGTH.
let blockLength = 64;
let blockCount = blockLength >> SHIFT_2;
start = newStart;
while (start < this.dataLength) {
// start: index of first entry of current block
// newStart: index where the current block is to be moved
// (right after current end of already-compacted data)
var mapIndex, movedStart;
if (start === DATA_0800_OFFSET) {
blockLength = DATA_BLOCK_LENGTH;
blockCount = 1;
}
// skip blocks that are not used
if (this.map[start >> SHIFT_2] <= 0) {
// advance start to the next block
start += blockLength;
// leave newStart with the previous block!
continue;
}
// search for an identical block
if ((movedStart = this._findSameDataBlock(newStart, start, blockLength)) >= 0) {
// found an identical block, set the other block's index value for the current block
mapIndex = start >> SHIFT_2;
for (i = blockCount; i > 0; i--) {
this.map[mapIndex++] = movedStart;
movedStart += DATA_BLOCK_LENGTH;
}
// advance start to the next block
start += blockLength;
// leave newStart with the previous block!
continue;
}
// see if the beginning of this block can be overlapped with the end of the previous block
// look for maximum overlap (modulo granularity) with the previous, adjacent block
let overlap = blockLength - DATA_GRANULARITY;
while ((overlap > 0) && !equal_int(this.data, (newStart - overlap), start, overlap)) {
overlap -= DATA_GRANULARITY;
}
if ((overlap > 0) || (newStart < start)) {
// some overlap, or just move the whole block
movedStart = newStart - overlap;
mapIndex = start >> SHIFT_2;
for (i = blockCount; i > 0; i--) {
this.map[mapIndex++] = movedStart;
movedStart += DATA_BLOCK_LENGTH;
}
// move the non-overlapping indexes to their new positions
start += overlap;
for (i = blockLength - overlap; i > 0; i--) {
this.data[newStart++] = this.data[start++];
}
} else { // no overlap && newStart==start
mapIndex = start >> SHIFT_2;
for (i = blockCount; i > 0; i--) {
this.map[mapIndex++] = start;
start += DATA_BLOCK_LENGTH;
}
newStart = start;
}
}
// now adjust the index-2 table
i = 0;
while (i < this.index2Length) {
// Gap indexes are invalid (-1). Skip over the gap.
if (i === INDEX_GAP_OFFSET) {
i += INDEX_GAP_LENGTH;
}
this.index2[i] = this.map[this.index2[i] >> SHIFT_2];
++i;
}
this.dataNullOffset = this.map[this.dataNullOffset >> SHIFT_2];
// ensure dataLength alignment
while ((newStart & (DATA_GRANULARITY - 1)) !== 0) {
this.data[newStart++] = this.initialValue;
}
this.dataLength = newStart;
}
_compactIndex2() {
// do not compact linear-BMP index-2 blocks
let newStart = INDEX_2_BMP_LENGTH;
let start = 0;
let i = 0;
while (start < newStart) {
this.map[i++] = start;
start += INDEX_2_BLOCK_LENGTH;
}
// Reduce the index table gap to what will be needed at runtime.
newStart += UTF8_2B_INDEX_2_LENGTH + ((this.highStart - 0x10000) >> SHIFT_1);
start = INDEX_2_NULL_OFFSET;
while (start < this.index2Length) {
// start: index of first entry of current block
// newStart: index where the current block is to be moved
// (right after current end of already-compacted data)
// search for an identical block
var movedStart;
if ((movedStart = this._findSameIndex2Block(newStart, start)) >= 0) {
// found an identical block, set the other block's index value for the current block
this.map[start >> SHIFT_1_2] = movedStart;
// advance start to the next block
start += INDEX_2_BLOCK_LENGTH;
// leave newStart with the previous block!
continue;
}
// see if the beginning of this block can be overlapped with the end of the previous block
// look for maximum overlap with the previous, adjacent block
let overlap = INDEX_2_BLOCK_LENGTH - 1;
while ((overlap > 0) && !equal_int(this.index2, (newStart - overlap), start, overlap)) {
--overlap;
}
if ((overlap > 0) || (newStart < start)) {
// some overlap, or just move the whole block
this.map[start >> SHIFT_1_2] = newStart - overlap;
// move the non-overlapping indexes to their new positions
start += overlap;
for (i = INDEX_2_BLOCK_LENGTH - overlap; i > 0; i--) {
this.index2[newStart++] = this.index2[start++];
}
} else { // no overlap && newStart==start
this.map[start >> SHIFT_1_2] = start;
start += INDEX_2_BLOCK_LENGTH;
newStart = start;
}
}
// now adjust the index-1 table
for (i = 0; i < INDEX_1_LENGTH; i++) {
this.index1[i] = this.map[this.index1[i] >> SHIFT_1_2];
}
this.index2NullOffset = this.map[this.index2NullOffset >> SHIFT_1_2];
// Ensure data table alignment:
// Needs to be granularity-aligned for 16-bit trie
// (so that dataMove will be down-shiftable),
// and 2-aligned for uint32_t data.
// Arbitrary value: 0x3fffc not possible for real data.
while ((newStart & ((DATA_GRANULARITY - 1) | 1)) !== 0) {
this.index2[newStart++] = 0x0000ffff << INDEX_SHIFT;
}
this.index2Length = newStart;
}
_compact() {
// find highStart and round it up
let highValue = this.get(0x10ffff);
let highStart = this._findHighStart(highValue);
highStart = (highStart + (CP_PER_INDEX_1_ENTRY - 1)) & ~(CP_PER_INDEX_1_ENTRY - 1);
if (highStart === 0x110000) {
highValue = this.errorValue;
}
// Set trie->highStart only after utrie2_get32(trie, highStart).
// Otherwise utrie2_get32(trie, highStart) would try to read the highValue.
this.highStart = highStart;
if (this.highStart < 0x110000) {
// Blank out [highStart..10ffff] to release associated data blocks.
const suppHighStart = this.highStart <= 0x10000 ? 0x10000 : this.highStart;
this.setRange(suppHighStart, 0x10ffff, this.initialValue, true);
}
this._compactData();
if (this.highStart > 0x10000) {
this._compactIndex2();
}
// Store the highValue in the data array and round up the dataLength.
// Must be done after compactData() because that assumes that dataLength
// is a multiple of DATA_BLOCK_LENGTH.
this.data[this.dataLength++] = highValue;
while ((this.dataLength & (DATA_GRANULARITY - 1)) !== 0) {
this.data[this.dataLength++] = this.initialValue;
}
this.isCompacted = true;
}
freeze() {
let allIndexesLength, i;
if (!this.isCompacted) {
this._compact();
}
if (this.highStart <= 0x10000) {
allIndexesLength = INDEX_1_OFFSET;
} else {
allIndexesLength = this.index2Length;
}
const dataMove = allIndexesLength;
// are indexLength and dataLength within limits?
if ((allIndexesLength > MAX_INDEX_LENGTH) || // for unshifted indexLength
((dataMove + this.dataNullOffset) > 0xffff) || // for unshifted dataNullOffset
((dataMove + DATA_0800_OFFSET) > 0xffff) || // for unshifted 2-byte UTF-8 index-2 values
((dataMove + this.dataLength) > MAX_DATA_LENGTH_RUNTIME)) { // for shiftedDataLength
throw new Error("Trie data is too large.");
}
// calculate the sizes of, and allocate, the index and data arrays
const indexLength = allIndexesLength + this.dataLength;
const data = new Int32Array(indexLength);
// write the index-2 array values shifted right by INDEX_SHIFT, after adding dataMove
let destIdx = 0;
for (i = 0; i < INDEX_2_BMP_LENGTH; i++) {
data[destIdx++] = ((this.index2[i] + dataMove) >> INDEX_SHIFT);
}
// write UTF-8 2-byte index-2 values, not right-shifted
for (i = 0; i < 0xc2 - 0xc0; i++) { // C0..C1
data[destIdx++] = (dataMove + BAD_UTF8_DATA_OFFSET);
}
for (i = i; i < 0xe0 - 0xc0; i++) { // C2..DF
data[destIdx++] = (dataMove + this.index2[i << (6 - SHIFT_2)]);
}
if (this.highStart > 0x10000) {
const index1Length = (this.highStart - 0x10000) >> SHIFT_1;
const index2Offset = INDEX_2_BMP_LENGTH + UTF8_2B_INDEX_2_LENGTH + index1Length;
// write 16-bit index-1 values for supplementary code points
for (i = 0; i < index1Length; i++) {
data[destIdx++] = (INDEX_2_OFFSET + this.index1[i + OMITTED_BMP_INDEX_1_LENGTH]);
}
// write the index-2 array values for supplementary code points,
// shifted right by INDEX_SHIFT, after adding dataMove
for (i = 0; i < this.index2Length - index2Offset; i++) {
data[destIdx++] = ((dataMove + this.index2[index2Offset + i]) >> INDEX_SHIFT);
}
}
// write 16-bit data values
for (i = 0; i < this.dataLength; i++) {
data[destIdx++] = this.data[i];
}
const dest = new UnicodeTrie({
data,
highStart: this.highStart,
errorValue: this.errorValue
});
return dest;
}
// Generates a Buffer containing the serialized and compressed trie.
// Trie data is compressed twice using the deflate algorithm to minimize file size.
// Format:
// uint32_t highStart;
// uint32_t errorValue;
// uint32_t uncompressedDataLength;
// uint8_t trieData[dataLength];
toBuffer() {
const trie = this.freeze();
const data = new Uint8Array(trie.data.buffer);
// swap bytes to little-endian
swap32LE(data);
let compressed = pako.deflateRaw(data);
compressed = pako.deflateRaw(compressed);
const buf = Buffer.alloc(compressed.length + 12);
buf.writeUInt32LE(trie.highStart, 0);
buf.writeUInt32LE(trie.errorValue, 4);
buf.writeUInt32LE(data.length, 8);
for (let i = 0; i < compressed.length; i++) {
const b = compressed[i];
buf[i + 12] = b;
}
return buf;
}
}
module.exports = UnicodeTrieBuilder;

136
node_modules/unicode-trie/index.js generated vendored Normal file
View File

@@ -0,0 +1,136 @@
const inflate = require('tiny-inflate');
const { swap32LE } = require('./swap');
// Shift size for getting the index-1 table offset.
const SHIFT_1 = 6 + 5;
// Shift size for getting the index-2 table offset.
const SHIFT_2 = 5;
// Difference between the two shift sizes,
// for getting an index-1 offset from an index-2 offset. 6=11-5
const SHIFT_1_2 = SHIFT_1 - SHIFT_2;
// Number of index-1 entries for the BMP. 32=0x20
// This part of the index-1 table is omitted from the serialized form.
const OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1;
// Number of entries in an index-2 block. 64=0x40
const INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2;
// Mask for getting the lower bits for the in-index-2-block offset. */
const INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1;
// Shift size for shifting left the index array values.
// Increases possible data size with 16-bit index values at the cost
// of compactability.
// This requires data blocks to be aligned by DATA_GRANULARITY.
const INDEX_SHIFT = 2;
// Number of entries in a data block. 32=0x20
const DATA_BLOCK_LENGTH = 1 << SHIFT_2;
// Mask for getting the lower bits for the in-data-block offset.
const DATA_MASK = DATA_BLOCK_LENGTH - 1;
// The part of the index-2 table for U+D800..U+DBFF stores values for
// lead surrogate code _units_ not code _points_.
// Values for lead surrogate code _points_ are indexed with this portion of the table.
// Length=32=0x20=0x400>>SHIFT_2. (There are 1024=0x400 lead surrogates.)
const LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2;
const LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2;
// Count the lengths of both BMP pieces. 2080=0x820
const INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH;
// The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
// Length 32=0x20 for lead bytes C0..DF, regardless of SHIFT_2.
const UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH;
const UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6; // U+0800 is the first code point after 2-byte UTF-8
// The index-1 table, only used for supplementary code points, at offset 2112=0x840.
// Variable length, for code points up to highStart, where the last single-value range starts.
// Maximum length 512=0x200=0x100000>>SHIFT_1.
// (For 0x100000 supplementary code points U+10000..U+10ffff.)
//
// The part of the index-2 table for supplementary code points starts
// after this index-1 table.
//
// Both the index-1 table and the following part of the index-2 table
// are omitted completely if there is only BMP data.
const INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH;
// The alignment size of a data block. Also the granularity for compaction.
const DATA_GRANULARITY = 1 << INDEX_SHIFT;
class UnicodeTrie {
constructor(data) {
const isBuffer = (typeof data.readUInt32BE === 'function') && (typeof data.slice === 'function');
if (isBuffer || data instanceof Uint8Array) {
// read binary format
let uncompressedLength;
if (isBuffer) {
this.highStart = data.readUInt32LE(0);
this.errorValue = data.readUInt32LE(4);
uncompressedLength = data.readUInt32LE(8);
data = data.slice(12);
} else {
const view = new DataView(data.buffer);
this.highStart = view.getUint32(0, true);
this.errorValue = view.getUint32(4, true);
uncompressedLength = view.getUint32(8, true);
data = data.subarray(12);
}
// double inflate the actual trie data
data = inflate(data, new Uint8Array(uncompressedLength));
data = inflate(data, new Uint8Array(uncompressedLength));
// swap bytes from little-endian
swap32LE(data);
this.data = new Uint32Array(data.buffer);
} else {
// pre-parsed data
({ data: this.data, highStart: this.highStart, errorValue: this.errorValue } = data);
}
}
get(codePoint) {
let index;
if ((codePoint < 0) || (codePoint > 0x10ffff)) {
return this.errorValue;
}
if ((codePoint < 0xd800) || ((codePoint > 0xdbff) && (codePoint <= 0xffff))) {
// Ordinary BMP code point, excluding leading surrogates.
// BMP uses a single level lookup. BMP index starts at offset 0 in the index.
// data is stored in the index array itself.
index = (this.data[codePoint >> SHIFT_2] << INDEX_SHIFT) + (codePoint & DATA_MASK);
return this.data[index];
}
if (codePoint <= 0xffff) {
// Lead Surrogate Code Point. A Separate index section is stored for
// lead surrogate code units and code points.
// The main index has the code unit data.
// For this function, we need the code point data.
index = (this.data[LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> SHIFT_2)] << INDEX_SHIFT) + (codePoint & DATA_MASK);
return this.data[index];
}
if (codePoint < this.highStart) {
// Supplemental code point, use two-level lookup.
index = this.data[(INDEX_1_OFFSET - OMITTED_BMP_INDEX_1_LENGTH) + (codePoint >> SHIFT_1)];
index = this.data[index + ((codePoint >> SHIFT_2) & INDEX_2_MASK)];
index = (index << INDEX_SHIFT) + (codePoint & DATA_MASK);
return this.data[index];
}
return this.data[this.data.length - DATA_GRANULARITY];
}
}
module.exports = UnicodeTrie;

27
node_modules/unicode-trie/package.json generated vendored Normal file
View File

@@ -0,0 +1,27 @@
{
"name": "unicode-trie",
"version": "2.0.0",
"description": "Unicode Trie data structure for fast character metadata lookup, ported from ICU",
"devDependencies": {
"mocha": "^6.1.4",
"nyc": "^14.1.1"
},
"scripts": {
"test": "mocha",
"coverage": "nyc mocha"
},
"repository": {
"type": "git",
"url": "git://github.com/devongovett/unicode-trie.git"
},
"author": "Devon Govett <devongovett@gmail.com>",
"license": "MIT",
"bugs": {
"url": "https://github.com/devongovett/unicode-trie/issues"
},
"homepage": "https://github.com/devongovett/unicode-trie",
"dependencies": {
"pako": "^0.2.5",
"tiny-inflate": "^1.0.0"
}
}

25
node_modules/unicode-trie/swap.js generated vendored Normal file
View File

@@ -0,0 +1,25 @@
const isBigEndian = (new Uint8Array(new Uint32Array([0x12345678]).buffer)[0] === 0x12);
const swap = (b, n, m) => {
let i = b[n];
b[n] = b[m];
b[m] = i;
};
const swap32 = array => {
const len = array.length;
for (let i = 0; i < len; i += 4) {
swap(array, i, i + 3);
swap(array, i + 1, i + 2);
}
};
const swap32LE = array => {
if (isBigEndian) {
swap32(array);
}
};
module.exports = {
swap32LE: swap32LE
};

1
node_modules/unicode-trie/test/mocha.opts generated vendored Normal file
View File

@@ -0,0 +1 @@
--reporter spec

243
node_modules/unicode-trie/test/test.js generated vendored Normal file
View File

@@ -0,0 +1,243 @@
const assert = require('assert');
const UnicodeTrieBuilder = require('../builder');
const UnicodeTrie = require('../');
describe('unicode trie', () => {
it('set', () => {
const trie = new UnicodeTrieBuilder(10, 666);
trie.set(0x4567, 99);
assert.equal(trie.get(0x4566), 10);
assert.equal(trie.get(0x4567), 99);
assert.equal(trie.get(-1), 666);
assert.equal(trie.get(0x110000), 666);
});
it('set -> compacted trie', () => {
const t = new UnicodeTrieBuilder(10, 666);
t.set(0x4567, 99);
const trie = t.freeze();
assert.equal(trie.get(0x4566), 10);
assert.equal(trie.get(0x4567), 99);
assert.equal(trie.get(-1), 666);
assert.equal(trie.get(0x110000), 666);
});
it('setRange', () => {
const trie = new UnicodeTrieBuilder(10, 666);
trie.setRange(13, 6666, 7788, false);
trie.setRange(6000, 7000, 9900, true);
assert.equal(trie.get(12), 10);
assert.equal(trie.get(13), 7788);
assert.equal(trie.get(5999), 7788);
assert.equal(trie.get(6000), 9900);
assert.equal(trie.get(7000), 9900);
assert.equal(trie.get(7001), 10);
assert.equal(trie.get(0x110000), 666);
});
it('setRange -> compacted trie', () => {
const t = new UnicodeTrieBuilder(10, 666);
t.setRange(13, 6666, 7788, false);
t.setRange(6000, 7000, 9900, true);
const trie = t.freeze();
assert.equal(trie.get(12), 10);
assert.equal(trie.get(13), 7788);
assert.equal(trie.get(5999), 7788);
assert.equal(trie.get(6000), 9900);
assert.equal(trie.get(7000), 9900);
assert.equal(trie.get(7001), 10);
assert.equal(trie.get(0x110000), 666);
});
it('toBuffer written in little-endian', () => {
const trie = new UnicodeTrieBuilder();
trie.set(0x4567, 99);
const buf = trie.toBuffer();
const bufferExpected = new Buffer.from([0, 72, 0, 0, 0, 0, 0, 0, 128, 36, 0, 0, 123, 123, 206, 144, 235, 128, 2, 143, 67, 96, 225, 171, 23, 55, 54, 38, 231, 47, 44, 127, 233, 90, 109, 194, 92, 246, 126, 197, 131, 223, 31, 56, 102, 78, 154, 20, 108, 117, 88, 244, 93, 192, 190, 218, 229, 156, 12, 107, 86, 235, 125, 96, 102, 0, 129, 15, 239, 109, 219, 204, 58, 151, 92, 52, 126, 152, 198, 14, 0]);
assert.equal(buf.toString('hex'), bufferExpected.toString('hex'));
});
it('should work with compressed serialization format', () => {
const t = new UnicodeTrieBuilder(10, 666);
t.setRange(13, 6666, 7788, false);
t.setRange(6000, 7000, 9900, true);
const buf = t.toBuffer();
const trie = new UnicodeTrie(buf);
assert.equal(trie.get(12), 10);
assert.equal(trie.get(13), 7788);
assert.equal(trie.get(5999), 7788);
assert.equal(trie.get(6000), 9900);
assert.equal(trie.get(7000), 9900);
assert.equal(trie.get(7001), 10);
assert.equal(trie.get(0x110000), 666);
});
const rangeTests = [
{
ranges: [
[ 0, 0, 0, 0 ],
[ 0, 0x40, 0, 0 ],
[ 0x40, 0xe7, 0x1234, 0 ],
[ 0xe7, 0x3400, 0, 0 ],
[ 0x3400, 0x9fa6, 0x6162, 0 ],
[ 0x9fa6, 0xda9e, 0x3132, 0 ],
[ 0xdada, 0xeeee, 0x87ff, 0 ],
[ 0xeeee, 0x11111, 1, 0 ],
[ 0x11111, 0x44444, 0x6162, 0 ],
[ 0x44444, 0x60003, 0, 0 ],
[ 0xf0003, 0xf0004, 0xf, 0 ],
[ 0xf0004, 0xf0006, 0x10, 0 ],
[ 0xf0006, 0xf0007, 0x11, 0 ],
[ 0xf0007, 0xf0040, 0x12, 0 ],
[ 0xf0040, 0x110000, 0, 0 ]
],
check: [
[ 0, 0 ],
[ 0x40, 0 ],
[ 0xe7, 0x1234 ],
[ 0x3400, 0 ],
[ 0x9fa6, 0x6162 ],
[ 0xda9e, 0x3132 ],
[ 0xdada, 0 ],
[ 0xeeee, 0x87ff ],
[ 0x11111, 1 ],
[ 0x44444, 0x6162 ],
[ 0xf0003, 0 ],
[ 0xf0004, 0xf ],
[ 0xf0006, 0x10 ],
[ 0xf0007, 0x11 ],
[ 0xf0040, 0x12 ],
[ 0x110000, 0 ]
]
},
{
// set some interesting overlapping ranges
ranges: [
[ 0, 0, 0, 0 ],
[ 0x21, 0x7f, 0x5555, 1 ],
[ 0x2f800, 0x2fedc, 0x7a, 1 ],
[ 0x72, 0xdd, 3, 1 ],
[ 0xdd, 0xde, 4, 0 ],
[ 0x201, 0x240, 6, 1 ], // 3 consecutive blocks with the same pattern but
[ 0x241, 0x280, 6, 1 ], // discontiguous value ranges, testing utrie2_enum()
[ 0x281, 0x2c0, 6, 1 ],
[ 0x2f987, 0x2fa98, 5, 1 ],
[ 0x2f777, 0x2f883, 0, 1 ],
[ 0x2f900, 0x2ffaa, 1, 0 ],
[ 0x2ffaa, 0x2ffab, 2, 1 ],
[ 0x2ffbb, 0x2ffc0, 7, 1 ]
],
check: [
[ 0, 0 ],
[ 0x21, 0 ],
[ 0x72, 0x5555 ],
[ 0xdd, 3 ],
[ 0xde, 4 ],
[ 0x201, 0 ],
[ 0x240, 6 ],
[ 0x241, 0 ],
[ 0x280, 6 ],
[ 0x281, 0 ],
[ 0x2c0, 6 ],
[ 0x2f883, 0 ],
[ 0x2f987, 0x7a ],
[ 0x2fa98, 5 ],
[ 0x2fedc, 0x7a ],
[ 0x2ffaa, 1 ],
[ 0x2ffab, 2 ],
[ 0x2ffbb, 0 ],
[ 0x2ffc0, 7 ],
[ 0x110000, 0 ]
]
},
{
// use a non-zero initial value
ranges: [
[ 0, 0, 9, 0 ], // non-zero initial value.
[ 0x31, 0xa4, 1, 0 ],
[ 0x3400, 0x6789, 2, 0 ],
[ 0x8000, 0x89ab, 9, 1 ],
[ 0x9000, 0xa000, 4, 1 ],
[ 0xabcd, 0xbcde, 3, 1 ],
[ 0x55555, 0x110000, 6, 1 ], // highStart<U+ffff with non-initialValue
[ 0xcccc, 0x55555, 6, 1 ]
],
check: [
[ 0, 9 ], // non-zero initialValue
[ 0x31, 9 ],
[ 0xa4, 1 ],
[ 0x3400, 9 ],
[ 0x6789, 2 ],
[ 0x9000, 9 ],
[ 0xa000, 4 ],
[ 0xabcd, 9 ],
[ 0xbcde, 3 ],
[ 0xcccc, 9 ],
[ 0x110000, 6 ]
]
},
{
// empty or single-value tries, testing highStart==0
ranges: [
[ 0, 0, 3, 0 ] // Only the element with the initial value.
],
check: [
[ 0, 3 ],
[ 0x110000, 3 ]
]
},
{
ranges: [
[ 0, 0, 3, 0 ], // Initial value = 3
[ 0, 0x110000, 5, 1 ]
],
check: [
[ 0, 3 ],
[ 0x110000, 5 ]
]
}
];
it('should pass range tests', () => {
const result = [];
for (let test of rangeTests) {
let initialValue = 0;
let errorValue = 0x0bad;
let i = 0;
if (test.ranges[i][1] < 0) {
errorValue = test.ranges[i][2];
i++;
}
initialValue = test.ranges[i++][2];
var trie = new UnicodeTrieBuilder(initialValue, errorValue);
for (let range of test.ranges.slice(i)) {
trie.setRange(range[0], range[1] - 1, range[2], range[3] !== 0);
}
var frozen = trie.freeze();
var start = 0;
result.push(test.check.map((check) => {
let end;
const result1 = [];
for (start = start, end = check[0]; start < end; start++) {
assert.equal(trie.get(start), check[1]);
result1.push(assert.equal(frozen.get(start), check[1]));
}
return result1;
}));
}
});
});