repo_id
stringlengths 15
86
| file_path
stringlengths 28
180
| content
stringlengths 1
1.75M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/package-lock.json | {
"name": "tokenizers",
"version": "0.13.3",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"@babel/code-frame": {
"version": "7.12.11",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz",
"integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==",
"dev": true,
"requires": {
"@babel/highlight": "^7.10.4"
}
},
"@babel/compat-data": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.15.0.tgz",
"integrity": "sha512-0NqAC1IJE0S0+lL1SWFMxMkz1pKCNCjI4tr2Zx4LJSXxCLAdr6KyArnY+sno5m3yH9g737ygOyPABDsnXkpxiA==",
"dev": true
},
"@babel/core": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.15.0.tgz",
"integrity": "sha512-tXtmTminrze5HEUPn/a0JtOzzfp0nk+UEXQ/tqIJo3WDGypl/2OFQEMll/zSFU8f/lfmfLXvTaORHF3cfXIQMw==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.14.5",
"@babel/generator": "^7.15.0",
"@babel/helper-compilation-targets": "^7.15.0",
"@babel/helper-module-transforms": "^7.15.0",
"@babel/helpers": "^7.14.8",
"@babel/parser": "^7.15.0",
"@babel/template": "^7.14.5",
"@babel/traverse": "^7.15.0",
"@babel/types": "^7.15.0",
"convert-source-map": "^1.7.0",
"debug": "^4.1.0",
"gensync": "^1.0.0-beta.2",
"json5": "^2.1.2",
"semver": "^6.3.0",
"source-map": "^0.5.0"
},
"dependencies": {
"@babel/code-frame": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz",
"integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==",
"dev": true,
"requires": {
"@babel/highlight": "^7.14.5"
}
},
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
},
"source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
"integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
"dev": true
}
}
},
"@babel/generator": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.15.0.tgz",
"integrity": "sha512-eKl4XdMrbpYvuB505KTta4AV9g+wWzmVBW69tX0H2NwKVKd2YJbKgyK6M8j/rgLbmHOYJn6rUklV677nOyJrEQ==",
"dev": true,
"requires": {
"@babel/types": "^7.15.0",
"jsesc": "^2.5.1",
"source-map": "^0.5.0"
},
"dependencies": {
"source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
"integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
"dev": true
}
}
},
"@babel/helper-compilation-targets": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.15.0.tgz",
"integrity": "sha512-h+/9t0ncd4jfZ8wsdAsoIxSa61qhBYlycXiHWqJaQBCXAhDCMbPRSMTGnZIkkmt1u4ag+UQmuqcILwqKzZ4N2A==",
"dev": true,
"requires": {
"@babel/compat-data": "^7.15.0",
"@babel/helper-validator-option": "^7.14.5",
"browserslist": "^4.16.6",
"semver": "^6.3.0"
},
"dependencies": {
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
}
}
},
"@babel/helper-function-name": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.14.5.tgz",
"integrity": "sha512-Gjna0AsXWfFvrAuX+VKcN/aNNWonizBj39yGwUzVDVTlMYJMK2Wp6xdpy72mfArFq5uK+NOuexfzZlzI1z9+AQ==",
"dev": true,
"requires": {
"@babel/helper-get-function-arity": "^7.14.5",
"@babel/template": "^7.14.5",
"@babel/types": "^7.14.5"
}
},
"@babel/helper-get-function-arity": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.14.5.tgz",
"integrity": "sha512-I1Db4Shst5lewOM4V+ZKJzQ0JGGaZ6VY1jYvMghRjqs6DWgxLCIyFt30GlnKkfUeFLpJt2vzbMVEXVSXlIFYUg==",
"dev": true,
"requires": {
"@babel/types": "^7.14.5"
}
},
"@babel/helper-hoist-variables": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.14.5.tgz",
"integrity": "sha512-R1PXiz31Uc0Vxy4OEOm07x0oSjKAdPPCh3tPivn/Eo8cvz6gveAeuyUUPB21Hoiif0uoPQSSdhIPS3352nvdyQ==",
"dev": true,
"requires": {
"@babel/types": "^7.14.5"
}
},
"@babel/helper-member-expression-to-functions": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.15.0.tgz",
"integrity": "sha512-Jq8H8U2kYiafuj2xMTPQwkTBnEEdGKpT35lJEQsRRjnG0LW3neucsaMWLgKcwu3OHKNeYugfw+Z20BXBSEs2Lg==",
"dev": true,
"requires": {
"@babel/types": "^7.15.0"
}
},
"@babel/helper-module-imports": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.14.5.tgz",
"integrity": "sha512-SwrNHu5QWS84XlHwGYPDtCxcA0hrSlL2yhWYLgeOc0w7ccOl2qv4s/nARI0aYZW+bSwAL5CukeXA47B/1NKcnQ==",
"dev": true,
"requires": {
"@babel/types": "^7.14.5"
}
},
"@babel/helper-module-transforms": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.15.0.tgz",
"integrity": "sha512-RkGiW5Rer7fpXv9m1B3iHIFDZdItnO2/BLfWVW/9q7+KqQSDY5kUfQEbzdXM1MVhJGcugKV7kRrNVzNxmk7NBg==",
"dev": true,
"requires": {
"@babel/helper-module-imports": "^7.14.5",
"@babel/helper-replace-supers": "^7.15.0",
"@babel/helper-simple-access": "^7.14.8",
"@babel/helper-split-export-declaration": "^7.14.5",
"@babel/helper-validator-identifier": "^7.14.9",
"@babel/template": "^7.14.5",
"@babel/traverse": "^7.15.0",
"@babel/types": "^7.15.0"
}
},
"@babel/helper-optimise-call-expression": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.14.5.tgz",
"integrity": "sha512-IqiLIrODUOdnPU9/F8ib1Fx2ohlgDhxnIDU7OEVi+kAbEZcyiF7BLU8W6PfvPi9LzztjS7kcbzbmL7oG8kD6VA==",
"dev": true,
"requires": {
"@babel/types": "^7.14.5"
}
},
"@babel/helper-plugin-utils": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.14.5.tgz",
"integrity": "sha512-/37qQCE3K0vvZKwoK4XU/irIJQdIfCJuhU5eKnNxpFDsOkgFaUAwbv+RYw6eYgsC0E4hS7r5KqGULUogqui0fQ==",
"dev": true
},
"@babel/helper-replace-supers": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.15.0.tgz",
"integrity": "sha512-6O+eWrhx+HEra/uJnifCwhwMd6Bp5+ZfZeJwbqUTuqkhIT6YcRhiZCOOFChRypOIe0cV46kFrRBlm+t5vHCEaA==",
"dev": true,
"requires": {
"@babel/helper-member-expression-to-functions": "^7.15.0",
"@babel/helper-optimise-call-expression": "^7.14.5",
"@babel/traverse": "^7.15.0",
"@babel/types": "^7.15.0"
}
},
"@babel/helper-simple-access": {
"version": "7.14.8",
"resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.14.8.tgz",
"integrity": "sha512-TrFN4RHh9gnWEU+s7JloIho2T76GPwRHhdzOWLqTrMnlas8T9O7ec+oEDNsRXndOmru9ymH9DFrEOxpzPoSbdg==",
"dev": true,
"requires": {
"@babel/types": "^7.14.8"
}
},
"@babel/helper-split-export-declaration": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.14.5.tgz",
"integrity": "sha512-hprxVPu6e5Kdp2puZUmvOGjaLv9TCe58E/Fl6hRq4YiVQxIcNvuq6uTM2r1mT/oPskuS9CgR+I94sqAYv0NGKA==",
"dev": true,
"requires": {
"@babel/types": "^7.14.5"
}
},
"@babel/helper-validator-identifier": {
"version": "7.14.9",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz",
"integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==",
"dev": true
},
"@babel/helper-validator-option": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.14.5.tgz",
"integrity": "sha512-OX8D5eeX4XwcroVW45NMvoYaIuFI+GQpA2a8Gi+X/U/cDUIRsV37qQfF905F0htTRCREQIB4KqPeaveRJUl3Ow==",
"dev": true
},
"@babel/helpers": {
"version": "7.15.3",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.15.3.tgz",
"integrity": "sha512-HwJiz52XaS96lX+28Tnbu31VeFSQJGOeKHJeaEPQlTl7PnlhFElWPj8tUXtqFIzeN86XxXoBr+WFAyK2PPVz6g==",
"dev": true,
"requires": {
"@babel/template": "^7.14.5",
"@babel/traverse": "^7.15.0",
"@babel/types": "^7.15.0"
}
},
"@babel/highlight": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz",
"integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==",
"dev": true,
"requires": {
"@babel/helper-validator-identifier": "^7.14.5",
"chalk": "^2.0.0",
"js-tokens": "^4.0.0"
},
"dependencies": {
"chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"requires": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
}
},
"has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
"dev": true
},
"supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"requires": {
"has-flag": "^3.0.0"
}
}
}
},
"@babel/parser": {
"version": "7.15.3",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.15.3.tgz",
"integrity": "sha512-O0L6v/HvqbdJawj0iBEfVQMc3/6WP+AeOsovsIgBFyJaG+W2w7eqvZB7puddATmWuARlm1SX7DwxJ/JJUnDpEA==",
"dev": true
},
"@babel/plugin-syntax-async-generators": {
"version": "7.8.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
"integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-bigint": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz",
"integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-class-properties": {
"version": "7.12.13",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
"integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.12.13"
}
},
"@babel/plugin-syntax-import-meta": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
"integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.10.4"
}
},
"@babel/plugin-syntax-json-strings": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
"integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-logical-assignment-operators": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
"integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.10.4"
}
},
"@babel/plugin-syntax-nullish-coalescing-operator": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
"integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-numeric-separator": {
"version": "7.10.4",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
"integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.10.4"
}
},
"@babel/plugin-syntax-object-rest-spread": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
"integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-optional-catch-binding": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
"integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-optional-chaining": {
"version": "7.8.3",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
"integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.8.0"
}
},
"@babel/plugin-syntax-top-level-await": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
"integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.14.5"
}
},
"@babel/template": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.14.5.tgz",
"integrity": "sha512-6Z3Po85sfxRGachLULUhOmvAaOo7xCvqGQtxINai2mEGPFm6pQ4z5QInFnUrRpfoSV60BnjyF5F3c+15fxFV1g==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.14.5",
"@babel/parser": "^7.14.5",
"@babel/types": "^7.14.5"
},
"dependencies": {
"@babel/code-frame": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz",
"integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==",
"dev": true,
"requires": {
"@babel/highlight": "^7.14.5"
}
}
}
},
"@babel/traverse": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.15.0.tgz",
"integrity": "sha512-392d8BN0C9eVxVWd8H6x9WfipgVH5IaIoLp23334Sc1vbKKWINnvwRpb4us0xtPaCumlwbTtIYNA0Dv/32sVFw==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.14.5",
"@babel/generator": "^7.15.0",
"@babel/helper-function-name": "^7.14.5",
"@babel/helper-hoist-variables": "^7.14.5",
"@babel/helper-split-export-declaration": "^7.14.5",
"@babel/parser": "^7.15.0",
"@babel/types": "^7.15.0",
"debug": "^4.1.0",
"globals": "^11.1.0"
},
"dependencies": {
"@babel/code-frame": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz",
"integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==",
"dev": true,
"requires": {
"@babel/highlight": "^7.14.5"
}
},
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"globals": {
"version": "11.12.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
"integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
"dev": true
}
}
},
"@babel/types": {
"version": "7.15.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.15.0.tgz",
"integrity": "sha512-OBvfqnllOIdX4ojTHpwZbpvz4j3EWyjkZEdmjH0/cgsd6QOdSgU8rLSk6ard/pcW7rlmjdVSX/AWOaORR1uNOQ==",
"dev": true,
"requires": {
"@babel/helper-validator-identifier": "^7.14.9",
"to-fast-properties": "^2.0.0"
}
},
"@bcoe/v8-coverage": {
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
"integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
"dev": true
},
"@cnakazawa/watch": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/@cnakazawa/watch/-/watch-1.0.4.tgz",
"integrity": "sha512-v9kIhKwjeZThiWrLmj0y17CWoyddASLj9O2yvbZkbvw/N3rWOYy9zkV66ursAoVr0mV15bL8g0c4QZUE6cdDoQ==",
"dev": true,
"requires": {
"exec-sh": "^0.3.2",
"minimist": "^1.2.0"
}
},
"@eslint/eslintrc": {
"version": "0.4.3",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz",
"integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==",
"dev": true,
"requires": {
"ajv": "^6.12.4",
"debug": "^4.1.1",
"espree": "^7.3.0",
"globals": "^13.9.0",
"ignore": "^4.0.6",
"import-fresh": "^3.2.1",
"js-yaml": "^3.13.1",
"minimatch": "^3.0.4",
"strip-json-comments": "^3.1.1"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true
}
}
},
"@humanwhocodes/config-array": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz",
"integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==",
"dev": true,
"requires": {
"@humanwhocodes/object-schema": "^1.2.0",
"debug": "^4.1.1",
"minimatch": "^3.0.4"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
}
}
},
"@humanwhocodes/object-schema": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.0.tgz",
"integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==",
"dev": true
},
"@istanbuljs/load-nyc-config": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
"integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==",
"dev": true,
"requires": {
"camelcase": "^5.3.1",
"find-up": "^4.1.0",
"get-package-type": "^0.1.0",
"js-yaml": "^3.13.1",
"resolve-from": "^5.0.0"
},
"dependencies": {
"resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
"integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
"dev": true
}
}
},
"@istanbuljs/schema": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
"integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
"dev": true
},
"@jest/console": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/console/-/console-26.6.2.tgz",
"integrity": "sha512-IY1R2i2aLsLr7Id3S6p2BA82GNWryt4oSvEXLAKc+L2zdi89dSkE8xC1C+0kpATG4JhBJREnQOH7/zmccM2B0g==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"@types/node": "*",
"chalk": "^4.0.0",
"jest-message-util": "^26.6.2",
"jest-util": "^26.6.2",
"slash": "^3.0.0"
}
},
"@jest/core": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/@jest/core/-/core-26.6.3.tgz",
"integrity": "sha512-xvV1kKbhfUqFVuZ8Cyo+JPpipAHHAV3kcDBftiduK8EICXmTFddryy3P7NfZt8Pv37rA9nEJBKCCkglCPt/Xjw==",
"dev": true,
"requires": {
"@jest/console": "^26.6.2",
"@jest/reporters": "^26.6.2",
"@jest/test-result": "^26.6.2",
"@jest/transform": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"ansi-escapes": "^4.2.1",
"chalk": "^4.0.0",
"exit": "^0.1.2",
"graceful-fs": "^4.2.4",
"jest-changed-files": "^26.6.2",
"jest-config": "^26.6.3",
"jest-haste-map": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-regex-util": "^26.0.0",
"jest-resolve": "^26.6.2",
"jest-resolve-dependencies": "^26.6.3",
"jest-runner": "^26.6.3",
"jest-runtime": "^26.6.3",
"jest-snapshot": "^26.6.2",
"jest-util": "^26.6.2",
"jest-validate": "^26.6.2",
"jest-watcher": "^26.6.2",
"micromatch": "^4.0.2",
"p-each-series": "^2.1.0",
"rimraf": "^3.0.0",
"slash": "^3.0.0",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"requires": {
"glob": "^7.1.3"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"@jest/environment": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/environment/-/environment-26.6.2.tgz",
"integrity": "sha512-nFy+fHl28zUrRsCeMB61VDThV1pVTtlEokBRgqPrcT1JNq4yRNIyTHfyht6PqtUvY9IsuLGTrbG8kPXjSZIZwA==",
"dev": true,
"requires": {
"@jest/fake-timers": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"jest-mock": "^26.6.2"
}
},
"@jest/fake-timers": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-26.6.2.tgz",
"integrity": "sha512-14Uleatt7jdzefLPYM3KLcnUl1ZNikaKq34enpb5XG9i81JpppDb5muZvonvKyrl7ftEHkKS5L5/eB/kxJ+bvA==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"@sinonjs/fake-timers": "^6.0.1",
"@types/node": "*",
"jest-message-util": "^26.6.2",
"jest-mock": "^26.6.2",
"jest-util": "^26.6.2"
}
},
"@jest/globals": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/globals/-/globals-26.6.2.tgz",
"integrity": "sha512-85Ltnm7HlB/KesBUuALwQ68YTU72w9H2xW9FjZ1eL1U3lhtefjjl5c2MiUbpXt/i6LaPRvoOFJ22yCBSfQ0JIA==",
"dev": true,
"requires": {
"@jest/environment": "^26.6.2",
"@jest/types": "^26.6.2",
"expect": "^26.6.2"
}
},
"@jest/reporters": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-26.6.2.tgz",
"integrity": "sha512-h2bW53APG4HvkOnVMo8q3QXa6pcaNt1HkwVsOPMBV6LD/q9oSpxNSYZQYkAnjdMjrJ86UuYeLo+aEZClV6opnw==",
"dev": true,
"requires": {
"@bcoe/v8-coverage": "^0.2.3",
"@jest/console": "^26.6.2",
"@jest/test-result": "^26.6.2",
"@jest/transform": "^26.6.2",
"@jest/types": "^26.6.2",
"chalk": "^4.0.0",
"collect-v8-coverage": "^1.0.0",
"exit": "^0.1.2",
"glob": "^7.1.2",
"graceful-fs": "^4.2.4",
"istanbul-lib-coverage": "^3.0.0",
"istanbul-lib-instrument": "^4.0.3",
"istanbul-lib-report": "^3.0.0",
"istanbul-lib-source-maps": "^4.0.0",
"istanbul-reports": "^3.0.2",
"jest-haste-map": "^26.6.2",
"jest-resolve": "^26.6.2",
"jest-util": "^26.6.2",
"jest-worker": "^26.6.2",
"node-notifier": "^8.0.0",
"slash": "^3.0.0",
"source-map": "^0.6.0",
"string-length": "^4.0.1",
"terminal-link": "^2.0.0",
"v8-to-istanbul": "^7.0.0"
}
},
"@jest/source-map": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-26.6.2.tgz",
"integrity": "sha512-YwYcCwAnNmOVsZ8mr3GfnzdXDAl4LaenZP5z+G0c8bzC9/dugL8zRmxZzdoTl4IaS3CryS1uWnROLPFmb6lVvA==",
"dev": true,
"requires": {
"callsites": "^3.0.0",
"graceful-fs": "^4.2.4",
"source-map": "^0.6.0"
}
},
"@jest/test-result": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-26.6.2.tgz",
"integrity": "sha512-5O7H5c/7YlojphYNrK02LlDIV2GNPYisKwHm2QTKjNZeEzezCbwYs9swJySv2UfPMyZ0VdsmMv7jIlD/IKYQpQ==",
"dev": true,
"requires": {
"@jest/console": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/istanbul-lib-coverage": "^2.0.0",
"collect-v8-coverage": "^1.0.0"
}
},
"@jest/test-sequencer": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-26.6.3.tgz",
"integrity": "sha512-YHlVIjP5nfEyjlrSr8t/YdNfU/1XEt7c5b4OxcXCjyRhjzLYu/rO69/WHPuYcbCWkz8kAeZVZp2N2+IOLLEPGw==",
"dev": true,
"requires": {
"@jest/test-result": "^26.6.2",
"graceful-fs": "^4.2.4",
"jest-haste-map": "^26.6.2",
"jest-runner": "^26.6.3",
"jest-runtime": "^26.6.3"
}
},
"@jest/transform": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/transform/-/transform-26.6.2.tgz",
"integrity": "sha512-E9JjhUgNzvuQ+vVAL21vlyfy12gP0GhazGgJC4h6qUt1jSdUXGWJ1wfu/X7Sd8etSgxV4ovT1pb9v5D6QW4XgA==",
"dev": true,
"requires": {
"@babel/core": "^7.1.0",
"@jest/types": "^26.6.2",
"babel-plugin-istanbul": "^6.0.0",
"chalk": "^4.0.0",
"convert-source-map": "^1.4.0",
"fast-json-stable-stringify": "^2.0.0",
"graceful-fs": "^4.2.4",
"jest-haste-map": "^26.6.2",
"jest-regex-util": "^26.0.0",
"jest-util": "^26.6.2",
"micromatch": "^4.0.2",
"pirates": "^4.0.1",
"slash": "^3.0.0",
"source-map": "^0.6.1",
"write-file-atomic": "^3.0.0"
}
},
"@jest/types": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/@jest/types/-/types-26.6.2.tgz",
"integrity": "sha512-fC6QCp7Sc5sX6g8Tvbmj4XUTbyrik0akgRy03yjXbQaBWWNWGE7SGtJk98m0N8nzegD/7SggrUlivxo5ax4KWQ==",
"dev": true,
"requires": {
"@types/istanbul-lib-coverage": "^2.0.0",
"@types/istanbul-reports": "^3.0.0",
"@types/node": "*",
"@types/yargs": "^15.0.0",
"chalk": "^4.0.0"
}
},
"@sinonjs/commons": {
"version": "1.8.3",
"resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-1.8.3.tgz",
"integrity": "sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==",
"dev": true,
"requires": {
"type-detect": "4.0.8"
}
},
"@sinonjs/fake-timers": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz",
"integrity": "sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA==",
"dev": true,
"requires": {
"@sinonjs/commons": "^1.7.0"
}
},
"@tootallnate/once": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz",
"integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==",
"dev": true
},
"@types/babel__core": {
"version": "7.1.15",
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.1.15.tgz",
"integrity": "sha512-bxlMKPDbY8x5h6HBwVzEOk2C8fb6SLfYQ5Jw3uBYuYF1lfWk/kbLd81la82vrIkBb0l+JdmrZaDikPrNxpS/Ew==",
"dev": true,
"requires": {
"@babel/parser": "^7.1.0",
"@babel/types": "^7.0.0",
"@types/babel__generator": "*",
"@types/babel__template": "*",
"@types/babel__traverse": "*"
}
},
"@types/babel__generator": {
"version": "7.6.3",
"resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.3.tgz",
"integrity": "sha512-/GWCmzJWqV7diQW54smJZzWbSFf4QYtF71WCKhcx6Ru/tFyQIY2eiiITcCAeuPbNSvT9YCGkVMqqvSk2Z0mXiA==",
"dev": true,
"requires": {
"@babel/types": "^7.0.0"
}
},
"@types/babel__template": {
"version": "7.4.1",
"resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz",
"integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==",
"dev": true,
"requires": {
"@babel/parser": "^7.1.0",
"@babel/types": "^7.0.0"
}
},
"@types/babel__traverse": {
"version": "7.14.2",
"resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.14.2.tgz",
"integrity": "sha512-K2waXdXBi2302XUdcHcR1jCeU0LL4TD9HRs/gk0N2Xvrht+G/BfJa4QObBQZfhMdxiCpV3COl5Nfq4uKTeTnJA==",
"dev": true,
"requires": {
"@babel/types": "^7.3.0"
}
},
"@types/eslint-visitor-keys": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@types/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz",
"integrity": "sha512-OCutwjDZ4aFS6PB1UZ988C4YgwlBHJd6wCeQqaLdmadZ/7e+w79+hbMUFC1QXDNCmdyoRfAFdm0RypzwR+Qpag==",
"dev": true
},
"@types/graceful-fs": {
"version": "4.1.5",
"resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.5.tgz",
"integrity": "sha512-anKkLmZZ+xm4p8JWBf4hElkM4XR+EZeA2M9BAkkTldmcyDY4mbdIJnRghDJH3Ov5ooY7/UAoENtmdMSkaAd7Cw==",
"dev": true,
"requires": {
"@types/node": "*"
}
},
"@types/istanbul-lib-coverage": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.3.tgz",
"integrity": "sha512-sz7iLqvVUg1gIedBOvlkxPlc8/uVzyS5OwGz1cKjXzkl3FpL3al0crU8YGU1WoHkxn0Wxbw5tyi6hvzJKNzFsw==",
"dev": true
},
"@types/istanbul-lib-report": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
"integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==",
"dev": true,
"requires": {
"@types/istanbul-lib-coverage": "*"
}
},
"@types/istanbul-reports": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz",
"integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==",
"dev": true,
"requires": {
"@types/istanbul-lib-report": "*"
}
},
"@types/jest": {
"version": "26.0.24",
"resolved": "https://registry.npmjs.org/@types/jest/-/jest-26.0.24.tgz",
"integrity": "sha512-E/X5Vib8BWqZNRlDxj9vYXhsDwPYbPINqKF9BsnSoon4RQ0D9moEuLD8txgyypFLH7J4+Lho9Nr/c8H0Fi+17w==",
"dev": true,
"requires": {
"jest-diff": "^26.0.0",
"pretty-format": "^26.0.0"
}
},
"@types/json-schema": {
"version": "7.0.9",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.9.tgz",
"integrity": "sha512-qcUXuemtEu+E5wZSJHNxUXeCZhAfXKQ41D+duX+VYPde7xyEVZci+/oXKJL13tnRs9lR2pr4fod59GT6/X1/yQ==",
"dev": true
},
"@types/node": {
"version": "13.13.52",
"resolved": "https://registry.npmjs.org/@types/node/-/node-13.13.52.tgz",
"integrity": "sha512-s3nugnZumCC//n4moGGe6tkNMyYEdaDBitVjwPxXmR5lnMG5dHePinH2EdxkG3Rh1ghFHHixAG4NJhpJW1rthQ=="
},
"@types/normalize-package-data": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz",
"integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==",
"dev": true
},
"@types/prettier": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/@types/prettier/-/prettier-2.3.2.tgz",
"integrity": "sha512-eI5Yrz3Qv4KPUa/nSIAi0h+qX0XyewOliug5F2QAtuRg6Kjg6jfmxe1GIwoIRhZspD1A0RP8ANrPwvEXXtRFog==",
"dev": true
},
"@types/stack-utils": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz",
"integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==",
"dev": true
},
"@types/yargs": {
"version": "15.0.14",
"resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-15.0.14.tgz",
"integrity": "sha512-yEJzHoxf6SyQGhBhIYGXQDSCkJjB6HohDShto7m8vaKg9Yp0Yn8+71J9eakh2bnPg6BfsH9PRMhiRTZnd4eXGQ==",
"dev": true,
"requires": {
"@types/yargs-parser": "*"
}
},
"@types/yargs-parser": {
"version": "20.2.1",
"resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-20.2.1.tgz",
"integrity": "sha512-7tFImggNeNBVMsn0vLrpn1H1uPrUBdnARPTpZoitY37ZrdJREzf7I16tMrlK3hen349gr1NYh8CmZQa7CTG6Aw==",
"dev": true
},
"@typescript-eslint/eslint-plugin": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-3.10.1.tgz",
"integrity": "sha512-PQg0emRtzZFWq6PxBcdxRH3QIQiyFO3WCVpRL3fgj5oQS3CDs3AeAKfv4DxNhzn8ITdNJGJ4D3Qw8eAJf3lXeQ==",
"dev": true,
"requires": {
"@typescript-eslint/experimental-utils": "3.10.1",
"debug": "^4.1.1",
"functional-red-black-tree": "^1.0.1",
"regexpp": "^3.0.0",
"semver": "^7.3.2",
"tsutils": "^3.17.1"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"@typescript-eslint/experimental-utils": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-3.10.1.tgz",
"integrity": "sha512-DewqIgscDzmAfd5nOGe4zm6Bl7PKtMG2Ad0KG8CUZAHlXfAKTF9Ol5PXhiMh39yRL2ChRH1cuuUGOcVyyrhQIw==",
"dev": true,
"requires": {
"@types/json-schema": "^7.0.3",
"@typescript-eslint/types": "3.10.1",
"@typescript-eslint/typescript-estree": "3.10.1",
"eslint-scope": "^5.0.0",
"eslint-utils": "^2.0.0"
}
},
"@typescript-eslint/parser": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-3.10.1.tgz",
"integrity": "sha512-Ug1RcWcrJP02hmtaXVS3axPPTTPnZjupqhgj+NnZ6BCkwSImWk/283347+x9wN+lqOdK9Eo3vsyiyDHgsmiEJw==",
"dev": true,
"requires": {
"@types/eslint-visitor-keys": "^1.0.0",
"@typescript-eslint/experimental-utils": "3.10.1",
"@typescript-eslint/types": "3.10.1",
"@typescript-eslint/typescript-estree": "3.10.1",
"eslint-visitor-keys": "^1.1.0"
}
},
"@typescript-eslint/types": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-3.10.1.tgz",
"integrity": "sha512-+3+FCUJIahE9q0lDi1WleYzjCwJs5hIsbugIgnbB+dSCYUxl8L6PwmsyOPFZde2hc1DlTo/xnkOgiTLSyAbHiQ==",
"dev": true
},
"@typescript-eslint/typescript-estree": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-3.10.1.tgz",
"integrity": "sha512-QbcXOuq6WYvnB3XPsZpIwztBoquEYLXh2MtwVU+kO8jgYCiv4G5xrSP/1wg4tkvrEE+esZVquIPX/dxPlePk1w==",
"dev": true,
"requires": {
"@typescript-eslint/types": "3.10.1",
"@typescript-eslint/visitor-keys": "3.10.1",
"debug": "^4.1.1",
"glob": "^7.1.6",
"is-glob": "^4.0.1",
"lodash": "^4.17.15",
"semver": "^7.3.2",
"tsutils": "^3.17.1"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"@typescript-eslint/visitor-keys": {
"version": "3.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-3.10.1.tgz",
"integrity": "sha512-9JgC82AaQeglebjZMgYR5wgmfUdUc+EitGUUMW8u2nDckaeimzW+VsoLV6FoimPv2id3VQzfjwBxEMVz08ameQ==",
"dev": true,
"requires": {
"eslint-visitor-keys": "^1.1.0"
}
},
"abab": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/abab/-/abab-2.0.5.tgz",
"integrity": "sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==",
"dev": true
},
"abbrev": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
"integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q=="
},
"abs": {
"version": "1.3.14",
"resolved": "https://registry.npmjs.org/abs/-/abs-1.3.14.tgz",
"integrity": "sha512-PrS26IzwKLWwuURpiKl8wRmJ2KdR/azaVrLEBWG/TALwT20Y7qjtYp1qcMLHA4206hBHY5phv3w4pjf9NPv4Vw==",
"requires": {
"ul": "^5.0.0"
}
},
"acorn": {
"version": "7.4.1",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
"integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
"dev": true
},
"acorn-globals": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-6.0.0.tgz",
"integrity": "sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg==",
"dev": true,
"requires": {
"acorn": "^7.1.1",
"acorn-walk": "^7.1.1"
}
},
"acorn-jsx": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
"integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
"dev": true
},
"acorn-walk": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-7.2.0.tgz",
"integrity": "sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA==",
"dev": true
},
"agent-base": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
"integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"dev": true,
"requires": {
"debug": "4"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
}
}
},
"ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
"requires": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
}
},
"ansi-colors": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz",
"integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==",
"dev": true
},
"ansi-escapes": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
"integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
"dev": true,
"requires": {
"type-fest": "^0.21.3"
},
"dependencies": {
"type-fest": {
"version": "0.21.3",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
"integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
"dev": true
}
}
},
"ansi-regex": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
"integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
},
"ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dev": true,
"requires": {
"color-convert": "^1.9.0"
}
},
"anymatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz",
"integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==",
"dev": true,
"requires": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
}
},
"aproba": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
"integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw=="
},
"are-we-there-yet": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz",
"integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==",
"requires": {
"delegates": "^1.0.0",
"readable-stream": "^2.0.6"
}
},
"argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dev": true,
"requires": {
"sprintf-js": "~1.0.2"
}
},
"arr-diff": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
"integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
"dev": true
},
"arr-flatten": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
"integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
"dev": true
},
"arr-union": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
"integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
"dev": true
},
"array-back": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/array-back/-/array-back-3.1.0.tgz",
"integrity": "sha512-TkuxA4UCOvxuDK6NZYXCalszEzj+TLszyASooky+i742l9TqsOdYCMJJupxRic61hwquNtppB3hgcuq9SVSH1Q==",
"dev": true
},
"array-unique": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
"integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
"dev": true
},
"assign-symbols": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
"integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
"dev": true
},
"astral-regex": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz",
"integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==",
"dev": true
},
"asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
"dev": true
},
"atob": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
"integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
"dev": true
},
"babel-jest": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-26.6.3.tgz",
"integrity": "sha512-pl4Q+GAVOHwvjrck6jKjvmGhnO3jHX/xuB9d27f+EJZ/6k+6nMuPjorrYp7s++bKKdANwzElBWnLWaObvTnaZA==",
"dev": true,
"requires": {
"@jest/transform": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/babel__core": "^7.1.7",
"babel-plugin-istanbul": "^6.0.0",
"babel-preset-jest": "^26.6.2",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.4",
"slash": "^3.0.0"
}
},
"babel-plugin-istanbul": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.0.0.tgz",
"integrity": "sha512-AF55rZXpe7trmEylbaE1Gv54wn6rwU03aptvRoVIGP8YykoSxqdVLV1TfwflBCE/QtHmqtP8SWlTENqbK8GCSQ==",
"dev": true,
"requires": {
"@babel/helper-plugin-utils": "^7.0.0",
"@istanbuljs/load-nyc-config": "^1.0.0",
"@istanbuljs/schema": "^0.1.2",
"istanbul-lib-instrument": "^4.0.0",
"test-exclude": "^6.0.0"
}
},
"babel-plugin-jest-hoist": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-26.6.2.tgz",
"integrity": "sha512-PO9t0697lNTmcEHH69mdtYiOIkkOlj9fySqfO3K1eCcdISevLAE0xY59VLLUj0SoiPiTX/JU2CYFpILydUa5Lw==",
"dev": true,
"requires": {
"@babel/template": "^7.3.3",
"@babel/types": "^7.3.3",
"@types/babel__core": "^7.0.0",
"@types/babel__traverse": "^7.0.6"
}
},
"babel-preset-current-node-syntax": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz",
"integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==",
"dev": true,
"requires": {
"@babel/plugin-syntax-async-generators": "^7.8.4",
"@babel/plugin-syntax-bigint": "^7.8.3",
"@babel/plugin-syntax-class-properties": "^7.8.3",
"@babel/plugin-syntax-import-meta": "^7.8.3",
"@babel/plugin-syntax-json-strings": "^7.8.3",
"@babel/plugin-syntax-logical-assignment-operators": "^7.8.3",
"@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
"@babel/plugin-syntax-numeric-separator": "^7.8.3",
"@babel/plugin-syntax-object-rest-spread": "^7.8.3",
"@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
"@babel/plugin-syntax-optional-chaining": "^7.8.3",
"@babel/plugin-syntax-top-level-await": "^7.8.3"
}
},
"babel-preset-jest": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-26.6.2.tgz",
"integrity": "sha512-YvdtlVm9t3k777c5NPQIv6cxFFFapys25HiUmuSgHwIZhfifweR5c5Sf5nwE3MAbfu327CYSvps8Yx6ANLyleQ==",
"dev": true,
"requires": {
"babel-plugin-jest-hoist": "^26.6.2",
"babel-preset-current-node-syntax": "^1.0.0"
}
},
"balanced-match": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
"integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
},
"base": {
"version": "0.11.2",
"resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
"integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
"dev": true,
"requires": {
"cache-base": "^1.0.1",
"class-utils": "^0.3.5",
"component-emitter": "^1.2.1",
"define-property": "^1.0.0",
"isobject": "^3.0.1",
"mixin-deep": "^1.2.0",
"pascalcase": "^0.1.1"
},
"dependencies": {
"define-property": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
"integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
"dev": true,
"requires": {
"is-descriptor": "^1.0.0"
}
},
"is-accessor-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
"integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-data-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
"integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-descriptor": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
"integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
"dev": true,
"requires": {
"is-accessor-descriptor": "^1.0.0",
"is-data-descriptor": "^1.0.0",
"kind-of": "^6.0.2"
}
}
}
},
"bluebird": {
"version": "3.5.5",
"resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.5.tgz",
"integrity": "sha512-5am6HnnfN+urzt4yfg7IgTbotDjIT/u8AJpEt0sIU9FtXfVeezXAPKswrG+xKUCOYAINpSdgZVDU6QFh+cuH3w=="
},
"brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"braces": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
"dev": true,
"requires": {
"fill-range": "^7.0.1"
}
},
"browser-process-hrtime": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz",
"integrity": "sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==",
"dev": true
},
"browserslist": {
"version": "4.16.8",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.8.tgz",
"integrity": "sha512-sc2m9ohR/49sWEbPj14ZSSZqp+kbi16aLao42Hmn3Z8FpjuMaq2xCA2l4zl9ITfyzvnvyE0hcg62YkIGKxgaNQ==",
"dev": true,
"requires": {
"caniuse-lite": "^1.0.30001251",
"colorette": "^1.3.0",
"electron-to-chromium": "^1.3.811",
"escalade": "^3.1.1",
"node-releases": "^1.1.75"
}
},
"bs-logger": {
"version": "0.2.6",
"resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz",
"integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==",
"dev": true,
"requires": {
"fast-json-stable-stringify": "2.x"
}
},
"bser": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz",
"integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==",
"dev": true,
"requires": {
"node-int64": "^0.4.0"
}
},
"buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
"dev": true
},
"builtins": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz",
"integrity": "sha1-y5T662HIaWRR2zZTThQi+U8K7og=",
"dev": true
},
"cache-base": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
"integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
"dev": true,
"requires": {
"collection-visit": "^1.0.0",
"component-emitter": "^1.2.1",
"get-value": "^2.0.6",
"has-value": "^1.0.0",
"isobject": "^3.0.1",
"set-value": "^2.0.0",
"to-object-path": "^0.3.0",
"union-value": "^1.0.0",
"unset-value": "^1.0.0"
}
},
"callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true
},
"camelcase": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
"integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
"dev": true
},
"caniuse-lite": {
"version": "1.0.30001252",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001252.tgz",
"integrity": "sha512-I56jhWDGMtdILQORdusxBOH+Nl/KgQSdDmpJezYddnAkVOmnoU8zwjTV9xAjMIYxr0iPreEAVylCGcmHCjfaOw==",
"dev": true
},
"capture-exit": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/capture-exit/-/capture-exit-2.0.0.tgz",
"integrity": "sha512-PiT/hQmTonHhl/HFGN+Lx3JJUznrVYJ3+AQsnthneZbvW7x+f08Tk7yLJTLEOUvBTbduLeeBkxEaYXUOUrRq6g==",
"dev": true,
"requires": {
"rsvp": "^4.8.4"
}
},
"capture-stack-trace": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.2.tgz",
"integrity": "sha512-X/WM2UQs6VMHUtjUDnZTRI+i1crWteJySFzr9UpGoQa4WQffXVTTXuekjl7TjZRlcF2XfjgITT0HxZ9RnxeT0w=="
},
"chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"dev": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
}
}
},
"char-regex": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
"integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==",
"dev": true
},
"chardet": {
"version": "0.7.0",
"resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
"integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
"dev": true
},
"chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
"integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
},
"ci-info": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
"integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==",
"dev": true
},
"cjs-module-lexer": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-0.6.0.tgz",
"integrity": "sha512-uc2Vix1frTfnuzxxu1Hp4ktSvM3QaI4oXl4ZUqL1wjTu/BGki9TrCWoqLTg/drR1KwAEarXuRFCG2Svr1GxPFw==",
"dev": true
},
"class-utils": {
"version": "0.3.6",
"resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
"integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
"dev": true,
"requires": {
"arr-union": "^3.1.0",
"define-property": "^0.2.5",
"isobject": "^3.0.0",
"static-extend": "^0.1.1"
},
"dependencies": {
"define-property": {
"version": "0.2.5",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
"integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
"dev": true,
"requires": {
"is-descriptor": "^0.1.0"
}
}
}
},
"cli-cursor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
"integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
"dev": true,
"requires": {
"restore-cursor": "^3.1.0"
}
},
"cli-width": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz",
"integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==",
"dev": true
},
"cliui": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz",
"integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==",
"dev": true,
"requires": {
"string-width": "^4.2.0",
"strip-ansi": "^6.0.0",
"wrap-ansi": "^6.2.0"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"string-width": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"co": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
"integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
"dev": true
},
"code-point-at": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz",
"integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c="
},
"collect-v8-coverage": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz",
"integrity": "sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg==",
"dev": true
},
"collection-visit": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
"integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
"dev": true,
"requires": {
"map-visit": "^1.0.0",
"object-visit": "^1.0.0"
}
},
"collections": {
"version": "5.1.13",
"resolved": "https://registry.npmjs.org/collections/-/collections-5.1.13.tgz",
"integrity": "sha512-SCb6Qd+d3Z02corWQ7/mqXiXeeTdHvkP6TeFSYfGYdCFp1WrjSNZ3j6y8Y3T/7osGEe0iOcU2g1d346l99m4Lg==",
"requires": {
"weak-map": "~1.0.x"
}
},
"color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dev": true,
"requires": {
"color-name": "1.1.3"
}
},
"color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
"dev": true
},
"colorette": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/colorette/-/colorette-1.3.0.tgz",
"integrity": "sha512-ecORCqbSFP7Wm8Y6lyqMJjexBQqXSF7SSeaTyGGphogUjBlFP9m9o08wy86HL2uB7fMTxtOUzLMk7ogKcxMg1w==",
"dev": true
},
"combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dev": true,
"requires": {
"delayed-stream": "~1.0.0"
}
},
"command-line-args": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.2.0.tgz",
"integrity": "sha512-4zqtU1hYsSJzcJBOcNZIbW5Fbk9BkjCp1pZVhQKoRaWL5J7N4XphDLwo8aWwdQpTugxwu+jf9u2ZhkXiqp5Z6A==",
"dev": true,
"requires": {
"array-back": "^3.1.0",
"find-replace": "^3.0.0",
"lodash.camelcase": "^4.3.0",
"typical": "^4.0.0"
}
},
"command-line-commands": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/command-line-commands/-/command-line-commands-3.0.2.tgz",
"integrity": "sha512-ac6PdCtdR6q7S3HN+JiVLIWGHY30PRYIEl2qPo+FuEuzwAUk0UYyimrngrg7FvF/mCr4Jgoqv5ZnHZgads50rw==",
"dev": true,
"requires": {
"array-back": "^4.0.1"
},
"dependencies": {
"array-back": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/array-back/-/array-back-4.0.2.tgz",
"integrity": "sha512-NbdMezxqf94cnNfWLL7V/im0Ub+Anbb0IoZhvzie8+4HJ4nMQuzHuy49FkGYCJK2yAloZ3meiB6AVMClbrI1vg==",
"dev": true
}
}
},
"command-line-usage": {
"version": "6.1.1",
"resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-6.1.1.tgz",
"integrity": "sha512-F59pEuAR9o1SF/bD0dQBDluhpT4jJQNWUHEuVBqpDmCUo6gPjCi+m9fCWnWZVR/oG6cMTUms4h+3NPl74wGXvA==",
"dev": true,
"requires": {
"array-back": "^4.0.1",
"chalk": "^2.4.2",
"table-layout": "^1.0.1",
"typical": "^5.2.0"
},
"dependencies": {
"array-back": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/array-back/-/array-back-4.0.2.tgz",
"integrity": "sha512-NbdMezxqf94cnNfWLL7V/im0Ub+Anbb0IoZhvzie8+4HJ4nMQuzHuy49FkGYCJK2yAloZ3meiB6AVMClbrI1vg==",
"dev": true
},
"chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"requires": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
}
},
"has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
"dev": true
},
"supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"requires": {
"has-flag": "^3.0.0"
}
},
"typical": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/typical/-/typical-5.2.0.tgz",
"integrity": "sha512-dvdQgNDNJo+8B2uBQoqdb11eUCE1JQXhvjC/CZtgvZseVd5TYMXnq0+vuUemXbd/Se29cTaUuPX3YIc2xgbvIg==",
"dev": true
}
}
},
"comment-parser": {
"version": "0.7.6",
"resolved": "https://registry.npmjs.org/comment-parser/-/comment-parser-0.7.6.tgz",
"integrity": "sha512-GKNxVA7/iuTnAqGADlTWX4tkhzxZKXp5fLJqKTlQLHkE65XDUKutZ3BHaJC5IGcper2tT3QRD1xr4o3jNpgXXg==",
"dev": true
},
"component-emitter": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
"integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==",
"dev": true
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
},
"console-control-strings": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz",
"integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4="
},
"convert-source-map": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz",
"integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==",
"dev": true,
"requires": {
"safe-buffer": "~5.1.1"
}
},
"copy-descriptor": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
"integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
"dev": true
},
"core-util-is": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
},
"create-error-class": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/create-error-class/-/create-error-class-3.0.2.tgz",
"integrity": "sha512-gYTKKexFO3kh200H1Nit76sRwRtOY32vQd3jpAQKpLtZqyNsSQNfI4N7o3eP2wUjV35pTWKRYqFUDBvUha/Pkw==",
"requires": {
"capture-stack-trace": "^1.0.0"
}
},
"cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dev": true,
"requires": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
}
},
"cssom": {
"version": "0.4.4",
"resolved": "https://registry.npmjs.org/cssom/-/cssom-0.4.4.tgz",
"integrity": "sha512-p3pvU7r1MyyqbTk+WbNJIgJjG2VmTIaB10rI93LzVPrmDJKkzKYMtxxyAvQXR/NS6otuzveI7+7BBq3SjBS2mw==",
"dev": true
},
"cssstyle": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz",
"integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==",
"dev": true,
"requires": {
"cssom": "~0.3.6"
},
"dependencies": {
"cssom": {
"version": "0.3.8",
"resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz",
"integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==",
"dev": true
}
}
},
"d": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
"integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
"requires": {
"es5-ext": "^0.10.50",
"type": "^1.0.1"
},
"dependencies": {
"type": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
"integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg=="
}
}
},
"data-urls": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/data-urls/-/data-urls-2.0.0.tgz",
"integrity": "sha512-X5eWTSXO/BJmpdIKCRuKUgSCgAN0OwliVK3yPKbwIWU1Tdw5BRajxlzMidvh+gwko9AfQ9zIj52pzF91Q3YAvQ==",
"dev": true,
"requires": {
"abab": "^2.0.3",
"whatwg-mimetype": "^2.3.0",
"whatwg-url": "^8.0.0"
}
},
"debug": {
"version": "3.2.6",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
"integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
"requires": {
"ms": "^2.1.1"
}
},
"decamelize": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=",
"dev": true
},
"decimal.js": {
"version": "10.3.1",
"resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.3.1.tgz",
"integrity": "sha512-V0pfhfr8suzyPGOx3nmq4aHqabehUZn6Ch9kyFpV79TGDTWFmHqUqXdabR7QHqxzrYolF4+tVmJhUG4OURg5dQ==",
"dev": true
},
"decode-uri-component": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz",
"integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==",
"dev": true
},
"deep-extend": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
"integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="
},
"deep-is": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz",
"integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=",
"dev": true
},
"deepmerge": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz",
"integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==",
"dev": true
},
"deffy": {
"version": "2.2.4",
"resolved": "https://registry.npmjs.org/deffy/-/deffy-2.2.4.tgz",
"integrity": "sha512-pLc9lsbsWjr6RxmJ2OLyvm+9l4j1yK69h+TML/gUit/t3vTijpkNGh8LioaJYTGO7F25m6HZndADcUOo2PsiUg==",
"requires": {
"typpy": "^2.0.0"
}
},
"define-property": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
"integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
"dev": true,
"requires": {
"is-descriptor": "^1.0.2",
"isobject": "^3.0.1"
},
"dependencies": {
"is-accessor-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
"integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-data-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
"integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-descriptor": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
"integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
"dev": true,
"requires": {
"is-accessor-descriptor": "^1.0.0",
"is-data-descriptor": "^1.0.0",
"kind-of": "^6.0.2"
}
}
}
},
"delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
"dev": true
},
"delegates": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
"integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o="
},
"detect-libc": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz",
"integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups="
},
"detect-newline": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
"integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==",
"dev": true
},
"diff-sequences": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-26.6.2.tgz",
"integrity": "sha512-Mv/TDa3nZ9sbc5soK+OoA74BsS3mL37yixCvUAQkiuA4Wz6YtwP/K47n2rv2ovzHZvoiQeA5FTQOschKkEwB0Q==",
"dev": true
},
"doctrine": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
"integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
"dev": true,
"requires": {
"esutils": "^2.0.2"
}
},
"domelementtype": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
"integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
},
"domexception": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/domexception/-/domexception-2.0.1.tgz",
"integrity": "sha512-yxJ2mFy/sibVQlu5qHjOkf9J3K6zgmCxgJ94u2EdvDOV09H+32LtRswEcUsmUWN72pVLOEnTSRaIVVzVQgS0dg==",
"dev": true,
"requires": {
"webidl-conversions": "^5.0.0"
},
"dependencies": {
"webidl-conversions": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-5.0.0.tgz",
"integrity": "sha512-VlZwKPCkYKxQgeSbH5EyngOmRp7Ww7I9rQLERETtf5ofd9pGeswWiOtogpEO850jziPRarreGxn5QIiTqpb2wA==",
"dev": true
}
}
},
"domhandler": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.0.3.tgz",
"integrity": "sha512-D8+qeCUo6LpYvBZsmXWunDJ9zOD8mVg6EwZIdgxmnT+xGWRsReF/TwsZ5EzlIJDipxlE6qJh1dXt9oeplGN3Bg==",
"requires": {
"domelementtype": "1"
}
},
"domutils": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/domutils/-/domutils-1.1.6.tgz",
"integrity": "sha512-ZeagMzMKyk9GSFMqV3x3uHgRN36hLpSOF6LIRXmftce0UUqFsAx/azJAJ4Jc+9DYKmwROH5HLOcOu1OPARWwNg==",
"requires": {
"domelementtype": "1"
}
},
"duplexer2": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/duplexer2/-/duplexer2-0.1.4.tgz",
"integrity": "sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==",
"requires": {
"readable-stream": "^2.0.2"
}
},
"electron-to-chromium": {
"version": "1.3.827",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.827.tgz",
"integrity": "sha512-ye+4uQOY/jbjRutMcE/EmOcNwUeo1qo9aKL2tPyb09cU3lmxNeyDF4RWiemmkknW+p29h7dyDqy02higTxc9/A==",
"dev": true
},
"emittery": {
"version": "0.7.2",
"resolved": "https://registry.npmjs.org/emittery/-/emittery-0.7.2.tgz",
"integrity": "sha512-A8OG5SR/ij3SsJdWDJdkkSYUjQdCUx6APQXem0SaEePBSRg4eymGYwBkKo1Y6DU+af/Jn2dBQqDBvjnr9Vi8nQ==",
"dev": true
},
"emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"end-of-stream": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
"integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
"dev": true,
"requires": {
"once": "^1.4.0"
}
},
"enquirer": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz",
"integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==",
"dev": true,
"requires": {
"ansi-colors": "^4.1.1"
}
},
"err": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/err/-/err-1.1.1.tgz",
"integrity": "sha512-N97Ybd2jJHVQ+Ft3Q5+C2gM3kgygkdeQmEqbN2z15UTVyyEsIwLA1VK39O1DHEJhXbwIFcJLqm6iARNhFANcQA==",
"requires": {
"typpy": "^2.2.0"
}
},
"error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
"integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
"requires": {
"is-arrayish": "^0.2.1"
}
},
"es5-ext": {
"version": "0.10.62",
"resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.62.tgz",
"integrity": "sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==",
"requires": {
"es6-iterator": "^2.0.3",
"es6-symbol": "^3.1.3",
"next-tick": "^1.1.0"
}
},
"es6-iterator": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
"integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==",
"requires": {
"d": "1",
"es5-ext": "^0.10.35",
"es6-symbol": "^3.1.1"
}
},
"es6-set": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/es6-set/-/es6-set-0.1.6.tgz",
"integrity": "sha512-TE3LgGLDIBX332jq3ypv6bcOpkLO0AslAQo7p2VqX/1N46YNsvIWgvjojjSEnWEGWMhr1qUbYeTSir5J6mFHOw==",
"requires": {
"d": "^1.0.1",
"es5-ext": "^0.10.62",
"es6-iterator": "~2.0.3",
"es6-symbol": "^3.1.3",
"event-emitter": "^0.3.5",
"type": "^2.7.2"
}
},
"es6-symbol": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
"integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
"requires": {
"d": "^1.0.1",
"ext": "^1.1.2"
}
},
"escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"dev": true
},
"escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
"dev": true
},
"escodegen": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.0.0.tgz",
"integrity": "sha512-mmHKys/C8BFUGI+MAWNcSYoORYLMdPzjrknd2Vc+bUsjN5bXcr8EhrNB+UTqfL1y3I9c4fw2ihgtMPQLBRiQxw==",
"dev": true,
"requires": {
"esprima": "^4.0.1",
"estraverse": "^5.2.0",
"esutils": "^2.0.2",
"optionator": "^0.8.1",
"source-map": "~0.6.1"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
"dev": true
},
"levn": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz",
"integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=",
"dev": true,
"requires": {
"prelude-ls": "~1.1.2",
"type-check": "~0.3.2"
}
},
"optionator": {
"version": "0.8.3",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
"integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==",
"dev": true,
"requires": {
"deep-is": "~0.1.3",
"fast-levenshtein": "~2.0.6",
"levn": "~0.3.0",
"prelude-ls": "~1.1.2",
"type-check": "~0.3.2",
"word-wrap": "~1.2.3"
}
},
"prelude-ls": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz",
"integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=",
"dev": true
},
"type-check": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz",
"integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=",
"dev": true,
"requires": {
"prelude-ls": "~1.1.2"
}
}
}
},
"eslint": {
"version": "7.32.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz",
"integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==",
"dev": true,
"requires": {
"@babel/code-frame": "7.12.11",
"@eslint/eslintrc": "^0.4.3",
"@humanwhocodes/config-array": "^0.5.0",
"ajv": "^6.10.0",
"chalk": "^4.0.0",
"cross-spawn": "^7.0.2",
"debug": "^4.0.1",
"doctrine": "^3.0.0",
"enquirer": "^2.3.5",
"escape-string-regexp": "^4.0.0",
"eslint-scope": "^5.1.1",
"eslint-utils": "^2.1.0",
"eslint-visitor-keys": "^2.0.0",
"espree": "^7.3.1",
"esquery": "^1.4.0",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
"file-entry-cache": "^6.0.1",
"functional-red-black-tree": "^1.0.1",
"glob-parent": "^5.1.2",
"globals": "^13.6.0",
"ignore": "^4.0.6",
"import-fresh": "^3.0.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
"js-yaml": "^3.13.1",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.4.1",
"lodash.merge": "^4.6.2",
"minimatch": "^3.0.4",
"natural-compare": "^1.4.0",
"optionator": "^0.9.1",
"progress": "^2.0.0",
"regexpp": "^3.1.0",
"semver": "^7.2.1",
"strip-ansi": "^6.0.0",
"strip-json-comments": "^3.1.0",
"table": "^6.0.9",
"text-table": "^0.2.0",
"v8-compile-cache": "^2.0.3"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"dev": true
},
"eslint-visitor-keys": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz",
"integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==",
"dev": true
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
},
"strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true
}
}
},
"eslint-config-prettier": {
"version": "6.15.0",
"resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-6.15.0.tgz",
"integrity": "sha512-a1+kOYLR8wMGustcgAjdydMsQ2A/2ipRPwRKUmfYaSxc9ZPcrku080Ctl6zrZzZNs/U82MjSv+qKREkoq3bJaw==",
"dev": true,
"requires": {
"get-stdin": "^6.0.0"
}
},
"eslint-plugin-jest": {
"version": "23.20.0",
"resolved": "https://registry.npmjs.org/eslint-plugin-jest/-/eslint-plugin-jest-23.20.0.tgz",
"integrity": "sha512-+6BGQt85OREevBDWCvhqj1yYA4+BFK4XnRZSGJionuEYmcglMZYLNNBBemwzbqUAckURaHdJSBcjHPyrtypZOw==",
"dev": true,
"requires": {
"@typescript-eslint/experimental-utils": "^2.5.0"
},
"dependencies": {
"@typescript-eslint/experimental-utils": {
"version": "2.34.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-2.34.0.tgz",
"integrity": "sha512-eS6FTkq+wuMJ+sgtuNTtcqavWXqsflWcfBnlYhg/nS4aZ1leewkXGbvBhaapn1q6qf4M71bsR1tez5JTRMuqwA==",
"dev": true,
"requires": {
"@types/json-schema": "^7.0.3",
"@typescript-eslint/typescript-estree": "2.34.0",
"eslint-scope": "^5.0.0",
"eslint-utils": "^2.0.0"
}
},
"@typescript-eslint/typescript-estree": {
"version": "2.34.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-2.34.0.tgz",
"integrity": "sha512-OMAr+nJWKdlVM9LOqCqh3pQQPwxHAN7Du8DR6dmwCrAmxtiXQnhHJ6tBNtf+cggqfo51SG/FCwnKhXCIM7hnVg==",
"dev": true,
"requires": {
"debug": "^4.1.1",
"eslint-visitor-keys": "^1.1.0",
"glob": "^7.1.6",
"is-glob": "^4.0.1",
"lodash": "^4.17.15",
"semver": "^7.3.2",
"tsutils": "^3.17.1"
}
},
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"eslint-plugin-jsdoc": {
"version": "30.7.13",
"resolved": "https://registry.npmjs.org/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-30.7.13.tgz",
"integrity": "sha512-YM4WIsmurrp0rHX6XiXQppqKB8Ne5ATiZLJe2+/fkp9l9ExXFr43BbAbjZaVrpCT+tuPYOZ8k1MICARHnURUNQ==",
"dev": true,
"requires": {
"comment-parser": "^0.7.6",
"debug": "^4.3.1",
"jsdoctypeparser": "^9.0.0",
"lodash": "^4.17.20",
"regextras": "^0.7.1",
"semver": "^7.3.4",
"spdx-expression-parse": "^3.0.1"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"eslint-plugin-prettier": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-3.4.1.tgz",
"integrity": "sha512-htg25EUYUeIhKHXjOinK4BgCcDwtLHjqaxCDsMy5nbnUMkKFvIhMVCp+5GFUXQ4Nr8lBsPqtGAqBenbpFqAA2g==",
"dev": true,
"requires": {
"prettier-linter-helpers": "^1.0.0"
}
},
"eslint-plugin-simple-import-sort": {
"version": "5.0.3",
"resolved": "https://registry.npmjs.org/eslint-plugin-simple-import-sort/-/eslint-plugin-simple-import-sort-5.0.3.tgz",
"integrity": "sha512-1rf3AWiHeWNCQdAq0iXNnlccnH1UDnelGgrPbjBBHE8d2hXVtOudcmy0vTF4hri3iJ0MKz8jBhmH6lJ0ZWZLHQ==",
"dev": true
},
"eslint-scope": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
"integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
"dev": true,
"requires": {
"esrecurse": "^4.3.0",
"estraverse": "^4.1.1"
}
},
"eslint-utils": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
"integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
"dev": true,
"requires": {
"eslint-visitor-keys": "^1.1.0"
}
},
"eslint-visitor-keys": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
"integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
"dev": true
},
"espree": {
"version": "7.3.1",
"resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz",
"integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==",
"dev": true,
"requires": {
"acorn": "^7.4.0",
"acorn-jsx": "^5.3.1",
"eslint-visitor-keys": "^1.3.0"
}
},
"esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"dev": true
},
"esquery": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz",
"integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==",
"dev": true,
"requires": {
"estraverse": "^5.1.0"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
"dev": true
}
}
},
"esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dev": true,
"requires": {
"estraverse": "^5.2.0"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
"dev": true
}
}
},
"estraverse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
"integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
"dev": true
},
"esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
"integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
"dev": true
},
"event-emitter": {
"version": "0.3.5",
"resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz",
"integrity": "sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==",
"requires": {
"d": "1",
"es5-ext": "~0.10.14"
}
},
"exec-limiter": {
"version": "3.2.13",
"resolved": "https://registry.npmjs.org/exec-limiter/-/exec-limiter-3.2.13.tgz",
"integrity": "sha512-86Ri699bwiHZVBzTzNj8gspqAhCPchg70zPVWIh3qzUOA1pUMcb272Em3LPk8AE0mS95B9yMJhtqF8vFJAn0dA==",
"requires": {
"limit-it": "^3.0.0",
"typpy": "^2.1.0"
}
},
"exec-sh": {
"version": "0.3.6",
"resolved": "https://registry.npmjs.org/exec-sh/-/exec-sh-0.3.6.tgz",
"integrity": "sha512-nQn+hI3yp+oD0huYhKwvYI32+JFeq+XkNcD1GAo3Y/MjxsfVGmrrzrnzjWiNY6f+pUCP440fThsFh5gZrRAU/w==",
"dev": true
},
"execa": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz",
"integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
"dev": true,
"requires": {
"cross-spawn": "^6.0.0",
"get-stream": "^4.0.0",
"is-stream": "^1.1.0",
"npm-run-path": "^2.0.0",
"p-finally": "^1.0.0",
"signal-exit": "^3.0.0",
"strip-eof": "^1.0.0"
},
"dependencies": {
"cross-spawn": {
"version": "6.0.5",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
"integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
"dev": true,
"requires": {
"nice-try": "^1.0.4",
"path-key": "^2.0.1",
"semver": "^5.5.0",
"shebang-command": "^1.2.0",
"which": "^1.2.9"
}
},
"path-key": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
"integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=",
"dev": true
},
"shebang-command": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
"integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
"dev": true,
"requires": {
"shebang-regex": "^1.0.0"
}
},
"shebang-regex": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
"integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=",
"dev": true
},
"which": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
"integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
"dev": true,
"requires": {
"isexe": "^2.0.0"
}
}
}
},
"exit": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
"integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=",
"dev": true
},
"expand-brackets": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
"integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
"dev": true,
"requires": {
"debug": "^2.3.3",
"define-property": "^0.2.5",
"extend-shallow": "^2.0.1",
"posix-character-classes": "^0.1.0",
"regex-not": "^1.0.0",
"snapdragon": "^0.8.1",
"to-regex": "^3.0.1"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"requires": {
"ms": "2.0.0"
}
},
"define-property": {
"version": "0.2.5",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
"integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
"dev": true,
"requires": {
"is-descriptor": "^0.1.0"
}
},
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
"dev": true
}
}
},
"expect": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/expect/-/expect-26.6.2.tgz",
"integrity": "sha512-9/hlOBkQl2l/PLHJx6JjoDF6xPKcJEsUlWKb23rKE7KzeDqUZKXKNMW27KIue5JMdBV9HgmoJPcc8HtO85t9IA==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"ansi-styles": "^4.0.0",
"jest-get-type": "^26.3.0",
"jest-matcher-utils": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-regex-util": "^26.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
}
}
},
"ext": {
"version": "1.7.0",
"resolved": "https://registry.npmjs.org/ext/-/ext-1.7.0.tgz",
"integrity": "sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==",
"requires": {
"type": "^2.7.2"
}
},
"extend-shallow": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
"integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
"dev": true,
"requires": {
"assign-symbols": "^1.0.0",
"is-extendable": "^1.0.1"
},
"dependencies": {
"is-extendable": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
"integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
"dev": true,
"requires": {
"is-plain-object": "^2.0.4"
}
}
}
},
"external-editor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
"integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
"dev": true,
"requires": {
"chardet": "^0.7.0",
"iconv-lite": "^0.4.24",
"tmp": "^0.0.33"
}
},
"extglob": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
"integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
"dev": true,
"requires": {
"array-unique": "^0.3.2",
"define-property": "^1.0.0",
"expand-brackets": "^2.1.4",
"extend-shallow": "^2.0.1",
"fragment-cache": "^0.2.1",
"regex-not": "^1.0.0",
"snapdragon": "^0.8.1",
"to-regex": "^3.0.1"
},
"dependencies": {
"define-property": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
"integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
"dev": true,
"requires": {
"is-descriptor": "^1.0.0"
}
},
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
},
"is-accessor-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
"integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-data-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
"integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-descriptor": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
"integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
"dev": true,
"requires": {
"is-accessor-descriptor": "^1.0.0",
"is-data-descriptor": "^1.0.0",
"kind-of": "^6.0.2"
}
}
}
},
"fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true
},
"fast-diff": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.2.0.tgz",
"integrity": "sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==",
"dev": true
},
"fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
"dev": true
},
"fast-levenshtein": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
"integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=",
"dev": true
},
"fb-watchman": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.1.tgz",
"integrity": "sha512-DkPJKQeY6kKwmuMretBhr7G6Vodr7bFwDYTXIkfG1gjvNpaxBTQV3PbXg6bR1c1UP4jPOX0jHUbbHANL9vRjVg==",
"dev": true,
"requires": {
"bser": "2.1.1"
}
},
"figures": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
"integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
"dev": true,
"requires": {
"escape-string-regexp": "^1.0.5"
}
},
"file-entry-cache": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
"integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
"dev": true,
"requires": {
"flat-cache": "^3.0.4"
}
},
"fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
"dev": true,
"requires": {
"to-regex-range": "^5.0.1"
}
},
"find-replace": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/find-replace/-/find-replace-3.0.0.tgz",
"integrity": "sha512-6Tb2myMioCAgv5kfvP5/PkZZ/ntTpVK39fHY7WkWBgvbeE+VHd/tZuZ4mrC+bxh4cfOZeYKVPaJIZtZXV7GNCQ==",
"dev": true,
"requires": {
"array-back": "^3.0.1"
}
},
"find-up": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
"integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
"dev": true,
"requires": {
"locate-path": "^5.0.0",
"path-exists": "^4.0.0"
}
},
"flat-cache": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
"integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
"dev": true,
"requires": {
"flatted": "^3.1.0",
"rimraf": "^3.0.2"
},
"dependencies": {
"rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"requires": {
"glob": "^7.1.3"
}
}
}
},
"flatted": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.2.tgz",
"integrity": "sha512-JaTY/wtrcSyvXJl4IMFHPKyFur1sE9AUqc0QnhOaJ0CxHtAoIV8pYDzeEfAaNEtGkOfq4gr3LBFmdXW5mOQFnA==",
"dev": true
},
"for-in": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
"integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
"dev": true
},
"form-data": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz",
"integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==",
"dev": true,
"requires": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
}
},
"fragment-cache": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
"integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
"dev": true,
"requires": {
"map-cache": "^0.2.2"
}
},
"frb": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/frb/-/frb-4.0.3.tgz",
"integrity": "sha512-ZdRVqQM72pttQpvuae2GCGfoa5Z7zJUc7jGFSBZJhgeiC8lh2OlUwP0lEmRE2xlaFDrAVzTcyxFmuev/M5KzLg==",
"requires": {
"collections": "^5.1.3"
}
},
"fs-minipass": {
"version": "1.2.7",
"resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-1.2.7.tgz",
"integrity": "sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==",
"requires": {
"minipass": "^2.6.0"
}
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
},
"fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"dev": true,
"optional": true
},
"function-bind": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==",
"dev": true
},
"function.name": {
"version": "1.0.13",
"resolved": "https://registry.npmjs.org/function.name/-/function.name-1.0.13.tgz",
"integrity": "sha512-mVrqdoy5npWZyoXl4DxCeuVF6delDcQjVS9aPdvLYlBxtMTZDR2B5GVEQEoM1jJyspCqg3C0v4ABkLE7tp9xFA==",
"requires": {
"noop6": "^1.0.1"
}
},
"functional-red-black-tree": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
"integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=",
"dev": true
},
"gauge": {
"version": "2.7.4",
"resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz",
"integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=",
"requires": {
"aproba": "^1.0.3",
"console-control-strings": "^1.0.0",
"has-unicode": "^2.0.0",
"object-assign": "^4.1.0",
"signal-exit": "^3.0.0",
"string-width": "^1.0.1",
"strip-ansi": "^3.0.1",
"wide-align": "^1.1.0"
}
},
"gensync": {
"version": "1.0.0-beta.2",
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
"integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
"dev": true
},
"get-caller-file": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
"integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
"dev": true
},
"get-package-type": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
"integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==",
"dev": true
},
"get-stdin": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-6.0.0.tgz",
"integrity": "sha512-jp4tHawyV7+fkkSKyvjuLZswblUtz+SQKzSWnBbii16BuZksJlU1wuBYXY75r+duh/llF1ur6oNwi+2ZzjKZ7g==",
"dev": true
},
"get-stream": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
"integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
"dev": true,
"requires": {
"pump": "^3.0.0"
}
},
"get-value": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
"integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
"dev": true
},
"git-config": {
"version": "0.0.7",
"resolved": "https://registry.npmjs.org/git-config/-/git-config-0.0.7.tgz",
"integrity": "sha1-qcij7wendsPXImE1bYtye2IgKyg=",
"dev": true,
"requires": {
"iniparser": "~1.0.5"
}
},
"git-package-json": {
"version": "1.4.10",
"resolved": "https://registry.npmjs.org/git-package-json/-/git-package-json-1.4.10.tgz",
"integrity": "sha512-DRAcvbzd2SxGK7w8OgYfvKqhFliT5keX0lmSmVdgScgf1kkl5tbbo7Pam6uYoCa1liOiipKxQZG8quCtGWl/fA==",
"requires": {
"deffy": "^2.2.1",
"err": "^1.1.1",
"gry": "^5.0.0",
"normalize-package-data": "^2.3.5",
"oargv": "^3.4.1",
"one-by-one": "^3.1.0",
"r-json": "^1.2.1",
"r-package-json": "^1.0.0",
"tmp": "0.0.28"
},
"dependencies": {
"tmp": {
"version": "0.0.28",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.28.tgz",
"integrity": "sha512-c2mmfiBmND6SOVxzogm1oda0OJ1HZVIk/5n26N59dDTh80MUeavpiCls4PGAdkX1PFkKokLpcf7prSjCeXLsJg==",
"requires": {
"os-tmpdir": "~1.0.1"
}
}
}
},
"git-source": {
"version": "1.1.10",
"resolved": "https://registry.npmjs.org/git-source/-/git-source-1.1.10.tgz",
"integrity": "sha512-XZZ7ZgnLL35oLgM/xjnLYgtlKlxJG0FohC1kWDvGkU7s1VKGXK0pFF/g1itQEwQ3D+uTQzBnzPi8XbqOv7Wc1Q==",
"requires": {
"git-url-parse": "^5.0.1"
}
},
"git-up": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/git-up/-/git-up-1.2.1.tgz",
"integrity": "sha512-SRVN3rOLACva8imc7BFrB6ts5iISWKH1/h/1Z+JZYoUI7UVQM7gQqk4M2yxUENbq2jUUT09NEND5xwP1i7Ktlw==",
"requires": {
"is-ssh": "^1.0.0",
"parse-url": "^1.0.0"
}
},
"git-url-parse": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/git-url-parse/-/git-url-parse-5.0.1.tgz",
"integrity": "sha512-4uSiOgrryNEMBX+gTWogenYRUh2j1D+95STTSEF2RCTgLkfJikl8c7BGr0Bn274hwuxTsbS2/FQ5pVS9FoXegQ==",
"requires": {
"git-up": "^1.0.0"
}
},
"glob": {
"version": "7.1.6",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
"integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.0.4",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"requires": {
"is-glob": "^4.0.1"
}
},
"globals": {
"version": "13.11.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.11.0.tgz",
"integrity": "sha512-08/xrJ7wQjK9kkkRoI3OFUBbLx4f+6x3SGwcPvQ0QH6goFDrOU2oyAWrmh3dJezu65buo+HBMzAMQy6rovVC3g==",
"dev": true,
"requires": {
"type-fest": "^0.20.2"
}
},
"got": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/got/-/got-5.7.1.tgz",
"integrity": "sha512-1qd54GLxvVgzuidFmw9ze9umxS3rzhdBH6Wt6BTYrTQUXTN01vGGYXwzLzYLowNx8HBH3/c7kRyvx90fh13i7Q==",
"requires": {
"create-error-class": "^3.0.1",
"duplexer2": "^0.1.4",
"is-redirect": "^1.0.0",
"is-retry-allowed": "^1.0.0",
"is-stream": "^1.0.0",
"lowercase-keys": "^1.0.0",
"node-status-codes": "^1.0.0",
"object-assign": "^4.0.1",
"parse-json": "^2.1.0",
"pinkie-promise": "^2.0.0",
"read-all-stream": "^3.0.0",
"readable-stream": "^2.0.5",
"timed-out": "^3.0.0",
"unzip-response": "^1.0.2",
"url-parse-lax": "^1.0.0"
},
"dependencies": {
"parse-json": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz",
"integrity": "sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==",
"requires": {
"error-ex": "^1.2.0"
}
}
}
},
"graceful-fs": {
"version": "4.2.8",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.8.tgz",
"integrity": "sha512-qkIilPUYcNhJpd33n0GBXTB1MMPp14TxEsEs0pTrsSVucApsYzW5V+Q8Qxhik6KU3evy+qkAAowTByymK0avdg==",
"dev": true
},
"growly": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz",
"integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=",
"dev": true,
"optional": true
},
"gry": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/gry/-/gry-5.0.8.tgz",
"integrity": "sha512-meq9ZjYVpLzZh3ojhTg7IMad9grGsx6rUUKHLqPnhLXzJkRQvEL2U3tQpS5/WentYTtHtxkT3Ew/mb10D6F6/g==",
"requires": {
"abs": "^1.2.1",
"exec-limiter": "^3.0.0",
"one-by-one": "^3.0.0",
"ul": "^5.0.0"
}
},
"handlebars": {
"version": "4.7.7",
"resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz",
"integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==",
"dev": true,
"requires": {
"minimist": "^1.2.5",
"neo-async": "^2.6.0",
"source-map": "^0.6.1",
"uglify-js": "^3.1.4",
"wordwrap": "^1.0.0"
}
},
"has": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
"dev": true,
"requires": {
"function-bind": "^1.1.1"
}
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true
},
"has-unicode": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz",
"integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk="
},
"has-value": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
"integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
"dev": true,
"requires": {
"get-value": "^2.0.6",
"has-values": "^1.0.0",
"isobject": "^3.0.0"
}
},
"has-values": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
"integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
"dev": true,
"requires": {
"is-number": "^3.0.0",
"kind-of": "^4.0.0"
},
"dependencies": {
"is-number": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
"integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
"dev": true,
"requires": {
"kind-of": "^3.0.2"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"kind-of": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
"integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"hosted-git-info": {
"version": "2.8.9",
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
"integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw=="
},
"html-encoding-sniffer": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-2.0.1.tgz",
"integrity": "sha512-D5JbOMBIR/TVZkubHT+OyT2705QvogUW4IBn6nHd756OwieSF9aDYFj4dv6HHEVGYbHaLETa3WggZYWWMyy3ZQ==",
"dev": true,
"requires": {
"whatwg-encoding": "^1.0.5"
}
},
"html-escaper": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
"integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
"dev": true
},
"htmlparser2": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.0.5.tgz",
"integrity": "sha512-O6tfy29gD7hjemI+HI0iJJNHH/YCkM44UO+9A7nJfmWhDi6g2rHUC05xnudONt214aqsBLHmdVnGXc2rPB8tsw==",
"requires": {
"domelementtype": "1",
"domhandler": "2.0",
"domutils": "1.1",
"readable-stream": "1.0"
},
"dependencies": {
"isarray": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
"integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
},
"readable-stream": {
"version": "1.0.34",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz",
"integrity": "sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==",
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.1",
"isarray": "0.0.1",
"string_decoder": "~0.10.x"
}
},
"string_decoder": {
"version": "0.10.31",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz",
"integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ=="
}
}
},
"http-proxy-agent": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz",
"integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==",
"dev": true,
"requires": {
"@tootallnate/once": "1",
"agent-base": "6",
"debug": "4"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
}
}
},
"https-proxy-agent": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz",
"integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==",
"dev": true,
"requires": {
"agent-base": "6",
"debug": "4"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
}
}
},
"human-signals": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz",
"integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==",
"dev": true
},
"iconv-lite": {
"version": "0.4.24",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
"requires": {
"safer-buffer": ">= 2.1.2 < 3"
}
},
"ignore": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
"integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
"dev": true
},
"ignore-walk": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/ignore-walk/-/ignore-walk-3.0.3.tgz",
"integrity": "sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw==",
"requires": {
"minimatch": "^3.0.4"
}
},
"import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dev": true,
"requires": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
}
},
"import-local": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/import-local/-/import-local-3.0.2.tgz",
"integrity": "sha512-vjL3+w0oulAVZ0hBHnxa/Nm5TAurf9YLQJDhqRZyqb+VKGOB6LU8t9H1Nr5CIo16vh9XfJTOoHwU0B71S557gA==",
"dev": true,
"requires": {
"pkg-dir": "^4.2.0",
"resolve-cwd": "^3.0.0"
}
},
"imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
"dev": true
},
"inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
"requires": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"ini": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
"integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
},
"iniparser": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/iniparser/-/iniparser-1.0.5.tgz",
"integrity": "sha1-g21r7+bfv87gvM8c+fKsxwJ/eD0=",
"dev": true
},
"inquirer": {
"version": "7.3.3",
"resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz",
"integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==",
"dev": true,
"requires": {
"ansi-escapes": "^4.2.1",
"chalk": "^4.1.0",
"cli-cursor": "^3.1.0",
"cli-width": "^3.0.0",
"external-editor": "^3.0.3",
"figures": "^3.0.0",
"lodash": "^4.17.19",
"mute-stream": "0.0.8",
"run-async": "^2.4.0",
"rxjs": "^6.6.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0",
"through": "^2.3.6"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
}
},
"strip-ansi": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.1"
}
}
}
},
"interpret": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz",
"integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==",
"dev": true
},
"is-accessor-descriptor": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
"integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
"dev": true,
"requires": {
"kind-of": "^3.0.2"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
"integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0="
},
"is-buffer": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
"integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
"dev": true
},
"is-ci": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
"integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
"dev": true,
"requires": {
"ci-info": "^2.0.0"
}
},
"is-core-module": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.6.0.tgz",
"integrity": "sha512-wShG8vs60jKfPWpF2KZRaAtvt3a20OAn7+IJ6hLPECpSABLcKtFKTTI4ZtH5QcBruBHlq+WsdHWyz0BCZW7svQ==",
"dev": true,
"requires": {
"has": "^1.0.3"
}
},
"is-data-descriptor": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
"integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
"dev": true,
"requires": {
"kind-of": "^3.0.2"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"is-descriptor": {
"version": "0.1.6",
"resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
"integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
"dev": true,
"requires": {
"is-accessor-descriptor": "^0.1.6",
"is-data-descriptor": "^0.1.4",
"kind-of": "^5.0.0"
},
"dependencies": {
"kind-of": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
"integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
"dev": true
}
}
},
"is-docker": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
"dev": true,
"optional": true
},
"is-extendable": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
"integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
"dev": true
},
"is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
"dev": true
},
"is-fullwidth-code-point": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz",
"integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=",
"requires": {
"number-is-nan": "^1.0.0"
}
},
"is-generator-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz",
"integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==",
"dev": true
},
"is-glob": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
"integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
"dev": true,
"requires": {
"is-extglob": "^2.1.1"
}
},
"is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true
},
"is-plain-object": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
"integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
"dev": true,
"requires": {
"isobject": "^3.0.1"
}
},
"is-potential-custom-element-name": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz",
"integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==",
"dev": true
},
"is-redirect": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-redirect/-/is-redirect-1.0.0.tgz",
"integrity": "sha512-cr/SlUEe5zOGmzvj9bUyC4LVvkNVAXu4GytXLNMr1pny+a65MpQ9IJzFHD5vi7FyJgb4qt27+eS3TuQnqB+RQw=="
},
"is-retry-allowed": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz",
"integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg=="
},
"is-ssh": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/is-ssh/-/is-ssh-1.4.0.tgz",
"integrity": "sha512-x7+VxdxOdlV3CYpjvRLBv5Lo9OJerlYanjwFrPR9fuGPjCiNiCzFgAWpiLAohSbsnH4ZAys3SBh+hq5rJosxUQ==",
"requires": {
"protocols": "^2.0.1"
}
},
"is-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
"integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ="
},
"is-typedarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
"integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=",
"dev": true
},
"is-windows": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
"integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
"dev": true
},
"is-wsl": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
"dev": true,
"optional": true,
"requires": {
"is-docker": "^2.0.0"
}
},
"isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
"dev": true
},
"isobject": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
"integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
"dev": true
},
"istanbul-lib-coverage": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.0.0.tgz",
"integrity": "sha512-UiUIqxMgRDET6eR+o5HbfRYP1l0hqkWOs7vNxC/mggutCMUIhWMm8gAHb8tHlyfD3/l6rlgNA5cKdDzEAf6hEg==",
"dev": true
},
"istanbul-lib-instrument": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-4.0.3.tgz",
"integrity": "sha512-BXgQl9kf4WTCPCCpmFGoJkz/+uhvm7h7PFKUYxh7qarQd3ER33vHG//qaE8eN25l07YqZPpHXU9I09l/RD5aGQ==",
"dev": true,
"requires": {
"@babel/core": "^7.7.5",
"@istanbuljs/schema": "^0.1.2",
"istanbul-lib-coverage": "^3.0.0",
"semver": "^6.3.0"
},
"dependencies": {
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
}
}
},
"istanbul-lib-report": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
"integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==",
"dev": true,
"requires": {
"istanbul-lib-coverage": "^3.0.0",
"make-dir": "^3.0.0",
"supports-color": "^7.1.0"
}
},
"istanbul-lib-source-maps": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.0.tgz",
"integrity": "sha512-c16LpFRkR8vQXyHZ5nLpY35JZtzj1PQY1iZmesUbf1FZHbIupcWfjgOXBY9YHkLEQ6puz1u4Dgj6qmU/DisrZg==",
"dev": true,
"requires": {
"debug": "^4.1.1",
"istanbul-lib-coverage": "^3.0.0",
"source-map": "^0.6.1"
},
"dependencies": {
"debug": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz",
"integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
}
}
},
"istanbul-reports": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.0.2.tgz",
"integrity": "sha512-9tZvz7AiR3PEDNGiV9vIouQ/EAcqMXFmkcA1CDFTwOB98OZVDL0PH9glHotf5Ugp6GCOTypfzGWI/OqjWNCRUw==",
"dev": true,
"requires": {
"html-escaper": "^2.0.0",
"istanbul-lib-report": "^3.0.0"
}
},
"iterate-object": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/iterate-object/-/iterate-object-1.3.4.tgz",
"integrity": "sha512-4dG1D1x/7g8PwHS9aK6QV5V94+ZvyP4+d19qDv43EzImmrndysIl4prmJ1hWWIGCqrZHyaHBm6BSEWHOLnpoNw=="
},
"jest": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest/-/jest-26.6.3.tgz",
"integrity": "sha512-lGS5PXGAzR4RF7V5+XObhqz2KZIDUA1yD0DG6pBVmy10eh0ZIXQImRuzocsI/N2XZ1GrLFwTS27In2i2jlpq1Q==",
"dev": true,
"requires": {
"@jest/core": "^26.6.3",
"import-local": "^3.0.2",
"jest-cli": "^26.6.3"
},
"dependencies": {
"jest-cli": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-26.6.3.tgz",
"integrity": "sha512-GF9noBSa9t08pSyl3CY4frMrqp+aQXFGFkf5hEPbh/pIUFYWMK6ZLTfbmadxJVcJrdRoChlWQsA2VkJcDFK8hg==",
"dev": true,
"requires": {
"@jest/core": "^26.6.3",
"@jest/test-result": "^26.6.2",
"@jest/types": "^26.6.2",
"chalk": "^4.0.0",
"exit": "^0.1.2",
"graceful-fs": "^4.2.4",
"import-local": "^3.0.2",
"is-ci": "^2.0.0",
"jest-config": "^26.6.3",
"jest-util": "^26.6.2",
"jest-validate": "^26.6.2",
"prompts": "^2.0.1",
"yargs": "^15.4.1"
}
}
}
},
"jest-changed-files": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-26.6.2.tgz",
"integrity": "sha512-fDS7szLcY9sCtIip8Fjry9oGf3I2ht/QT21bAHm5Dmf0mD4X3ReNUf17y+bO6fR8WgbIZTlbyG1ak/53cbRzKQ==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"execa": "^4.0.0",
"throat": "^5.0.0"
},
"dependencies": {
"execa": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz",
"integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==",
"dev": true,
"requires": {
"cross-spawn": "^7.0.0",
"get-stream": "^5.0.0",
"human-signals": "^1.1.1",
"is-stream": "^2.0.0",
"merge-stream": "^2.0.0",
"npm-run-path": "^4.0.0",
"onetime": "^5.1.0",
"signal-exit": "^3.0.2",
"strip-final-newline": "^2.0.0"
}
},
"get-stream": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
"integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
"dev": true,
"requires": {
"pump": "^3.0.0"
}
},
"is-stream": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
"integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
"dev": true
},
"npm-run-path": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
"integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
"dev": true,
"requires": {
"path-key": "^3.0.0"
}
}
}
},
"jest-config": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-config/-/jest-config-26.6.3.tgz",
"integrity": "sha512-t5qdIj/bCj2j7NFVHb2nFB4aUdfucDn3JRKgrZnplb8nieAirAzRSHP8uDEd+qV6ygzg9Pz4YG7UTJf94LPSyg==",
"dev": true,
"requires": {
"@babel/core": "^7.1.0",
"@jest/test-sequencer": "^26.6.3",
"@jest/types": "^26.6.2",
"babel-jest": "^26.6.3",
"chalk": "^4.0.0",
"deepmerge": "^4.2.2",
"glob": "^7.1.1",
"graceful-fs": "^4.2.4",
"jest-environment-jsdom": "^26.6.2",
"jest-environment-node": "^26.6.2",
"jest-get-type": "^26.3.0",
"jest-jasmine2": "^26.6.3",
"jest-regex-util": "^26.0.0",
"jest-resolve": "^26.6.2",
"jest-util": "^26.6.2",
"jest-validate": "^26.6.2",
"micromatch": "^4.0.2",
"pretty-format": "^26.6.2"
}
},
"jest-diff": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-26.6.2.tgz",
"integrity": "sha512-6m+9Z3Gv9wN0WFVasqjCL/06+EFCMTqDEUl/b87HYK2rAPTyfz4ZIuSlPhY51PIQRWx5TaxeF1qmXKe9gfN3sA==",
"dev": true,
"requires": {
"chalk": "^4.0.0",
"diff-sequences": "^26.6.2",
"jest-get-type": "^26.3.0",
"pretty-format": "^26.6.2"
}
},
"jest-docblock": {
"version": "26.0.0",
"resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-26.0.0.tgz",
"integrity": "sha512-RDZ4Iz3QbtRWycd8bUEPxQsTlYazfYn/h5R65Fc6gOfwozFhoImx+affzky/FFBuqISPTqjXomoIGJVKBWoo0w==",
"dev": true,
"requires": {
"detect-newline": "^3.0.0"
}
},
"jest-each": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-each/-/jest-each-26.6.2.tgz",
"integrity": "sha512-Mer/f0KaATbjl8MCJ+0GEpNdqmnVmDYqCTJYTvoo7rqmRiDllmp2AYN+06F93nXcY3ur9ShIjS+CO/uD+BbH4A==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"chalk": "^4.0.0",
"jest-get-type": "^26.3.0",
"jest-util": "^26.6.2",
"pretty-format": "^26.6.2"
}
},
"jest-environment-jsdom": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-26.6.2.tgz",
"integrity": "sha512-jgPqCruTlt3Kwqg5/WVFyHIOJHsiAvhcp2qiR2QQstuG9yWox5+iHpU3ZrcBxW14T4fe5Z68jAfLRh7joCSP2Q==",
"dev": true,
"requires": {
"@jest/environment": "^26.6.2",
"@jest/fake-timers": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"jest-mock": "^26.6.2",
"jest-util": "^26.6.2",
"jsdom": "^16.4.0"
}
},
"jest-environment-node": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-26.6.2.tgz",
"integrity": "sha512-zhtMio3Exty18dy8ee8eJ9kjnRyZC1N4C1Nt/VShN1apyXc8rWGtJ9lI7vqiWcyyXS4BVSEn9lxAM2D+07/Tag==",
"dev": true,
"requires": {
"@jest/environment": "^26.6.2",
"@jest/fake-timers": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"jest-mock": "^26.6.2",
"jest-util": "^26.6.2"
}
},
"jest-get-type": {
"version": "26.3.0",
"resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-26.3.0.tgz",
"integrity": "sha512-TpfaviN1R2pQWkIihlfEanwOXK0zcxrKEE4MlU6Tn7keoXdN6/3gK/xl0yEh8DOunn5pOVGKf8hB4R9gVh04ig==",
"dev": true
},
"jest-haste-map": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-26.6.2.tgz",
"integrity": "sha512-easWIJXIw71B2RdR8kgqpjQrbMRWQBgiBwXYEhtGUTaX+doCjBheluShdDMeR8IMfJiTqH4+zfhtg29apJf/8w==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"@types/graceful-fs": "^4.1.2",
"@types/node": "*",
"anymatch": "^3.0.3",
"fb-watchman": "^2.0.0",
"fsevents": "^2.1.2",
"graceful-fs": "^4.2.4",
"jest-regex-util": "^26.0.0",
"jest-serializer": "^26.6.2",
"jest-util": "^26.6.2",
"jest-worker": "^26.6.2",
"micromatch": "^4.0.2",
"sane": "^4.0.3",
"walker": "^1.0.7"
}
},
"jest-jasmine2": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-jasmine2/-/jest-jasmine2-26.6.3.tgz",
"integrity": "sha512-kPKUrQtc8aYwBV7CqBg5pu+tmYXlvFlSFYn18ev4gPFtrRzB15N2gW/Roew3187q2w2eHuu0MU9TJz6w0/nPEg==",
"dev": true,
"requires": {
"@babel/traverse": "^7.1.0",
"@jest/environment": "^26.6.2",
"@jest/source-map": "^26.6.2",
"@jest/test-result": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"chalk": "^4.0.0",
"co": "^4.6.0",
"expect": "^26.6.2",
"is-generator-fn": "^2.0.0",
"jest-each": "^26.6.2",
"jest-matcher-utils": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-runtime": "^26.6.3",
"jest-snapshot": "^26.6.2",
"jest-util": "^26.6.2",
"pretty-format": "^26.6.2",
"throat": "^5.0.0"
}
},
"jest-leak-detector": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-26.6.2.tgz",
"integrity": "sha512-i4xlXpsVSMeKvg2cEKdfhh0H39qlJlP5Ex1yQxwF9ubahboQYMgTtz5oML35AVA3B4Eu+YsmwaiKVev9KCvLxg==",
"dev": true,
"requires": {
"jest-get-type": "^26.3.0",
"pretty-format": "^26.6.2"
}
},
"jest-matcher-utils": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-26.6.2.tgz",
"integrity": "sha512-llnc8vQgYcNqDrqRDXWwMr9i7rS5XFiCwvh6DTP7Jqa2mqpcCBBlpCbn+trkG0KNhPu/h8rzyBkriOtBstvWhw==",
"dev": true,
"requires": {
"chalk": "^4.0.0",
"jest-diff": "^26.6.2",
"jest-get-type": "^26.3.0",
"pretty-format": "^26.6.2"
}
},
"jest-message-util": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-26.6.2.tgz",
"integrity": "sha512-rGiLePzQ3AzwUshu2+Rn+UMFk0pHN58sOG+IaJbk5Jxuqo3NYO1U2/MIR4S1sKgsoYSXSzdtSa0TgrmtUwEbmA==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.0.0",
"@jest/types": "^26.6.2",
"@types/stack-utils": "^2.0.0",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.4",
"micromatch": "^4.0.2",
"pretty-format": "^26.6.2",
"slash": "^3.0.0",
"stack-utils": "^2.0.2"
}
},
"jest-mock": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-26.6.2.tgz",
"integrity": "sha512-YyFjePHHp1LzpzYcmgqkJ0nm0gg/lJx2aZFzFy1S6eUqNjXsOqTK10zNRff2dNfssgokjkG65OlWNcIlgd3zew==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"@types/node": "*"
}
},
"jest-pnp-resolver": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz",
"integrity": "sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w==",
"dev": true
},
"jest-regex-util": {
"version": "26.0.0",
"resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-26.0.0.tgz",
"integrity": "sha512-Gv3ZIs/nA48/Zvjrl34bf+oD76JHiGDUxNOVgUjh3j890sblXryjY4rss71fPtD/njchl6PSE2hIhvyWa1eT0A==",
"dev": true
},
"jest-resolve": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-26.6.2.tgz",
"integrity": "sha512-sOxsZOq25mT1wRsfHcbtkInS+Ek7Q8jCHUB0ZUTP0tc/c41QHriU/NunqMfCUWsL4H3MHpvQD4QR9kSYhS7UvQ==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.4",
"jest-pnp-resolver": "^1.2.2",
"jest-util": "^26.6.2",
"read-pkg-up": "^7.0.1",
"resolve": "^1.18.1",
"slash": "^3.0.0"
},
"dependencies": {
"resolve": {
"version": "1.20.0",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz",
"integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==",
"dev": true,
"requires": {
"is-core-module": "^2.2.0",
"path-parse": "^1.0.6"
}
}
}
},
"jest-resolve-dependencies": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-26.6.3.tgz",
"integrity": "sha512-pVwUjJkxbhe4RY8QEWzN3vns2kqyuldKpxlxJlzEYfKSvY6/bMvxoFrYYzUO1Gx28yKWN37qyV7rIoIp2h8fTg==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"jest-regex-util": "^26.0.0",
"jest-snapshot": "^26.6.2"
}
},
"jest-runner": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-26.6.3.tgz",
"integrity": "sha512-atgKpRHnaA2OvByG/HpGA4g6CSPS/1LK0jK3gATJAoptC1ojltpmVlYC3TYgdmGp+GLuhzpH30Gvs36szSL2JQ==",
"dev": true,
"requires": {
"@jest/console": "^26.6.2",
"@jest/environment": "^26.6.2",
"@jest/test-result": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"chalk": "^4.0.0",
"emittery": "^0.7.1",
"exit": "^0.1.2",
"graceful-fs": "^4.2.4",
"jest-config": "^26.6.3",
"jest-docblock": "^26.0.0",
"jest-haste-map": "^26.6.2",
"jest-leak-detector": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-resolve": "^26.6.2",
"jest-runtime": "^26.6.3",
"jest-util": "^26.6.2",
"jest-worker": "^26.6.2",
"source-map-support": "^0.5.6",
"throat": "^5.0.0"
}
},
"jest-runtime": {
"version": "26.6.3",
"resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-26.6.3.tgz",
"integrity": "sha512-lrzyR3N8sacTAMeonbqpnSka1dHNux2uk0qqDXVkMv2c/A3wYnvQ4EXuI013Y6+gSKSCxdaczvf4HF0mVXHRdw==",
"dev": true,
"requires": {
"@jest/console": "^26.6.2",
"@jest/environment": "^26.6.2",
"@jest/fake-timers": "^26.6.2",
"@jest/globals": "^26.6.2",
"@jest/source-map": "^26.6.2",
"@jest/test-result": "^26.6.2",
"@jest/transform": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/yargs": "^15.0.0",
"chalk": "^4.0.0",
"cjs-module-lexer": "^0.6.0",
"collect-v8-coverage": "^1.0.0",
"exit": "^0.1.2",
"glob": "^7.1.3",
"graceful-fs": "^4.2.4",
"jest-config": "^26.6.3",
"jest-haste-map": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-mock": "^26.6.2",
"jest-regex-util": "^26.0.0",
"jest-resolve": "^26.6.2",
"jest-snapshot": "^26.6.2",
"jest-util": "^26.6.2",
"jest-validate": "^26.6.2",
"slash": "^3.0.0",
"strip-bom": "^4.0.0",
"yargs": "^15.4.1"
}
},
"jest-serializer": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-serializer/-/jest-serializer-26.6.2.tgz",
"integrity": "sha512-S5wqyz0DXnNJPd/xfIzZ5Xnp1HrJWBczg8mMfMpN78OJ5eDxXyf+Ygld9wX1DnUWbIbhM1YDY95NjR4CBXkb2g==",
"dev": true,
"requires": {
"@types/node": "*",
"graceful-fs": "^4.2.4"
}
},
"jest-snapshot": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-26.6.2.tgz",
"integrity": "sha512-OLhxz05EzUtsAmOMzuupt1lHYXCNib0ECyuZ/PZOx9TrZcC8vL0x+DUG3TL+GLX3yHG45e6YGjIm0XwDc3q3og==",
"dev": true,
"requires": {
"@babel/types": "^7.0.0",
"@jest/types": "^26.6.2",
"@types/babel__traverse": "^7.0.4",
"@types/prettier": "^2.0.0",
"chalk": "^4.0.0",
"expect": "^26.6.2",
"graceful-fs": "^4.2.4",
"jest-diff": "^26.6.2",
"jest-get-type": "^26.3.0",
"jest-haste-map": "^26.6.2",
"jest-matcher-utils": "^26.6.2",
"jest-message-util": "^26.6.2",
"jest-resolve": "^26.6.2",
"natural-compare": "^1.4.0",
"pretty-format": "^26.6.2",
"semver": "^7.3.2"
},
"dependencies": {
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"jest-util": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-util/-/jest-util-26.6.2.tgz",
"integrity": "sha512-MDW0fKfsn0OI7MS7Euz6h8HNDXVQ0gaM9uW6RjfDmd1DAFcaxX9OqIakHIqhbnmF08Cf2DLDG+ulq8YQQ0Lp0Q==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"@types/node": "*",
"chalk": "^4.0.0",
"graceful-fs": "^4.2.4",
"is-ci": "^2.0.0",
"micromatch": "^4.0.2"
}
},
"jest-validate": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-26.6.2.tgz",
"integrity": "sha512-NEYZ9Aeyj0i5rQqbq+tpIOom0YS1u2MVu6+euBsvpgIme+FOfRmoC4R5p0JiAUpaFvFy24xgrpMknarR/93XjQ==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"camelcase": "^6.0.0",
"chalk": "^4.0.0",
"jest-get-type": "^26.3.0",
"leven": "^3.1.0",
"pretty-format": "^26.6.2"
},
"dependencies": {
"camelcase": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz",
"integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==",
"dev": true
}
}
},
"jest-watcher": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-26.6.2.tgz",
"integrity": "sha512-WKJob0P/Em2csiVthsI68p6aGKTIcsfjH9Gsx1f0A3Italz43e3ho0geSAVsmj09RWOELP1AZ/DXyJgOgDKxXQ==",
"dev": true,
"requires": {
"@jest/test-result": "^26.6.2",
"@jest/types": "^26.6.2",
"@types/node": "*",
"ansi-escapes": "^4.2.1",
"chalk": "^4.0.0",
"jest-util": "^26.6.2",
"string-length": "^4.0.1"
}
},
"jest-worker": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-26.6.2.tgz",
"integrity": "sha512-KWYVV1c4i+jbMpaBC+U++4Va0cp8OisU185o73T1vo99hqi7w8tSJfUXYswwqqrjzwxa6KpRK54WhPvwf5w6PQ==",
"dev": true,
"requires": {
"@types/node": "*",
"merge-stream": "^2.0.0",
"supports-color": "^7.0.0"
}
},
"js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true
},
"js-yaml": {
"version": "3.14.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
"integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
"dev": true,
"requires": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
}
},
"jsdoctypeparser": {
"version": "9.0.0",
"resolved": "https://registry.npmjs.org/jsdoctypeparser/-/jsdoctypeparser-9.0.0.tgz",
"integrity": "sha512-jrTA2jJIL6/DAEILBEh2/w9QxCuwmvNXIry39Ay/HVfhE3o2yVV0U44blYkqdHA/OKloJEqvJy0xU+GSdE2SIw==",
"dev": true
},
"jsdom": {
"version": "16.7.0",
"resolved": "https://registry.npmjs.org/jsdom/-/jsdom-16.7.0.tgz",
"integrity": "sha512-u9Smc2G1USStM+s/x1ru5Sxrl6mPYCbByG1U/hUmqaVsm4tbNyS7CicOSRyuGQYZhTu0h84qkZZQ/I+dzizSVw==",
"dev": true,
"requires": {
"abab": "^2.0.5",
"acorn": "^8.2.4",
"acorn-globals": "^6.0.0",
"cssom": "^0.4.4",
"cssstyle": "^2.3.0",
"data-urls": "^2.0.0",
"decimal.js": "^10.2.1",
"domexception": "^2.0.1",
"escodegen": "^2.0.0",
"form-data": "^3.0.0",
"html-encoding-sniffer": "^2.0.1",
"http-proxy-agent": "^4.0.1",
"https-proxy-agent": "^5.0.0",
"is-potential-custom-element-name": "^1.0.1",
"nwsapi": "^2.2.0",
"parse5": "6.0.1",
"saxes": "^5.0.1",
"symbol-tree": "^3.2.4",
"tough-cookie": "^4.0.0",
"w3c-hr-time": "^1.0.2",
"w3c-xmlserializer": "^2.0.0",
"webidl-conversions": "^6.1.0",
"whatwg-encoding": "^1.0.5",
"whatwg-mimetype": "^2.3.0",
"whatwg-url": "^8.5.0",
"ws": "^7.4.6",
"xml-name-validator": "^3.0.0"
},
"dependencies": {
"acorn": {
"version": "8.4.1",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.4.1.tgz",
"integrity": "sha512-asabaBSkEKosYKMITunzX177CXxQ4Q8BSSzMTKD+FefUhipQC70gfW5SiUDhYQ3vk8G+81HqQk7Fv9OXwwn9KA==",
"dev": true
}
}
},
"jsesc": {
"version": "2.5.2",
"resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
"integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
"dev": true
},
"json-parse-even-better-errors": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
"integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
"dev": true
},
"json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
"dev": true
},
"json-stable-stringify-without-jsonify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
"integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
"dev": true
},
"json5": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
"integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
"dev": true
},
"kind-of": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
"integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
"dev": true
},
"kleur": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
"integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
"dev": true
},
"leven": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
"integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
"dev": true
},
"levn": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
"integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
"dev": true,
"requires": {
"prelude-ls": "^1.2.1",
"type-check": "~0.4.0"
}
},
"limit-it": {
"version": "3.2.10",
"resolved": "https://registry.npmjs.org/limit-it/-/limit-it-3.2.10.tgz",
"integrity": "sha512-T0NK99pHnkimldr1WUqvbGV1oWDku/xC9J/OqzJFsV1jeOS6Bwl8W7vkeQIBqwiON9dTALws+rX/XPMQqWerDQ==",
"requires": {
"typpy": "^2.0.0"
}
},
"lines-and-columns": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz",
"integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=",
"dev": true
},
"locate-path": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
"integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
"dev": true,
"requires": {
"p-locate": "^4.1.0"
}
},
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
"dev": true
},
"lodash.camelcase": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
"integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY="
},
"lodash.clonedeep": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz",
"integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=",
"dev": true
},
"lodash.kebabcase": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz",
"integrity": "sha512-N8XRTIMMqqDgSy4VLKPnJ/+hpGZN+PHQiJnSenYqPaVV/NCqEogTnAdZLQiGKhxX+JCs8waWq2t1XHWKOmlY8g=="
},
"lodash.merge": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
"integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
"dev": true
},
"lodash.snakecase": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz",
"integrity": "sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw=="
},
"lodash.trim": {
"version": "4.5.1",
"resolved": "https://registry.npmjs.org/lodash.trim/-/lodash.trim-4.5.1.tgz",
"integrity": "sha512-nJAlRl/K+eiOehWKDzoBVrSMhK0K3A3YQsUNXHQa5yIrKBAhsZgSu3KoAFoFT+mEgiyBHddZ0pRk1ITpIp90Wg=="
},
"lodash.truncate": {
"version": "4.4.2",
"resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz",
"integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=",
"dev": true
},
"lowercase-keys": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
"integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA=="
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"requires": {
"yallist": "^4.0.0"
},
"dependencies": {
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true
}
}
},
"make-dir": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
"integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
"dev": true,
"requires": {
"semver": "^6.0.0"
},
"dependencies": {
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"dev": true
}
}
},
"make-error": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
"integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==",
"dev": true
},
"make-promises-safe": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/make-promises-safe/-/make-promises-safe-5.1.0.tgz",
"integrity": "sha512-AfdZ49rtyhQR/6cqVKGoH7y4ql7XkS5HJI1lZm0/5N6CQosy1eYbBJ/qbhkKHzo17UH7M918Bysf6XB9f3kS1g==",
"dev": true
},
"makeerror": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.11.tgz",
"integrity": "sha1-4BpckQnyr3lmDk6LlYd5AYT1qWw=",
"dev": true,
"requires": {
"tmpl": "1.0.x"
}
},
"map-cache": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
"integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
"dev": true
},
"map-visit": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
"integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
"dev": true,
"requires": {
"object-visit": "^1.0.0"
}
},
"merge-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
"integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
"dev": true
},
"micromatch": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz",
"integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==",
"dev": true,
"requires": {
"braces": "^3.0.1",
"picomatch": "^2.2.3"
}
},
"mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
},
"mime-db": {
"version": "1.49.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.49.0.tgz",
"integrity": "sha512-CIc8j9URtOVApSFCQIF+VBkX1RwXp/oMMOrqdyXSBXq5RWNEsRfyj1kiRnQgmNXmHxPoFIxOroKA3zcU9P+nAA==",
"dev": true
},
"mime-types": {
"version": "2.1.32",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.32.tgz",
"integrity": "sha512-hJGaVS4G4c9TSMYh2n6SQAGrC4RnfU+daP8G7cSCmaqNjiOoUY0VHCMS42pxnQmVF1GWwFhbHWn3RIxCqTmZ9A==",
"dev": true,
"requires": {
"mime-db": "1.49.0"
}
},
"mimeparse": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/mimeparse/-/mimeparse-0.1.4.tgz",
"integrity": "sha512-jiuAsJJY4c0oF97oHKic9nva2y1QF2yhYJG3LXLys//f8SNQ89eFuGZ29z62Z29CAY4endJS6zFiKUtURFErog=="
},
"mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"dev": true
},
"minimatch": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"requires": {
"brace-expansion": "^1.1.7"
}
},
"minimist": {
"version": "1.2.6",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
"integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
},
"minipass": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-2.9.0.tgz",
"integrity": "sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==",
"requires": {
"safe-buffer": "^5.1.2",
"yallist": "^3.0.0"
}
},
"minizlib": {
"version": "1.3.3",
"resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.3.3.tgz",
"integrity": "sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==",
"requires": {
"minipass": "^2.9.0"
}
},
"mixin-deep": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
"integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
"dev": true,
"requires": {
"for-in": "^1.0.2",
"is-extendable": "^1.0.1"
},
"dependencies": {
"is-extendable": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
"integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
"dev": true,
"requires": {
"is-plain-object": "^2.0.4"
}
}
}
},
"mkdirp": {
"version": "0.5.5",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
"integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
"requires": {
"minimist": "^1.2.5"
}
},
"montage": {
"version": "17.2.2",
"resolved": "https://registry.npmjs.org/montage/-/montage-17.2.2.tgz",
"integrity": "sha512-YITyDIFh2hRFMU2KJDmprqKhEx0l/bfIIJEGxeLI08GLytDnf2WY+jOfSKmCOuePQF/GhCTyDE7T5wDz2i6OOA==",
"requires": {
"bluebird": "~3.5.0",
"collections": "~5.1.x",
"frb": "~4.0.x",
"htmlparser2": "~3.0.5",
"lodash.camelcase": "^4.3.0",
"lodash.kebabcase": "^4.1.1",
"lodash.snakecase": "^4.1.1",
"lodash.trim": "^4.5.1",
"mr": "^17.0.11",
"proxy-polyfill": "~0.1.7",
"q-io": "^1.13.3",
"weak-map": "^1.0.5"
}
},
"mr": {
"version": "17.0.14",
"resolved": "https://registry.npmjs.org/mr/-/mr-17.0.14.tgz",
"integrity": "sha512-ADXntqwl7wStHsrFLJnoOt69Gv42lxuoccR0V/6fy1sLJDtH4beoA+YwOWQHNQ7kCF7lOCJaW8cBACdTy3qzUA==",
"requires": {
"bluebird": "~3.5.0"
}
},
"ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
},
"mute-stream": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
"integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
"dev": true
},
"nanomatch": {
"version": "1.2.13",
"resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
"integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
"dev": true,
"requires": {
"arr-diff": "^4.0.0",
"array-unique": "^0.3.2",
"define-property": "^2.0.2",
"extend-shallow": "^3.0.2",
"fragment-cache": "^0.2.1",
"is-windows": "^1.0.2",
"kind-of": "^6.0.2",
"object.pick": "^1.3.0",
"regex-not": "^1.0.0",
"snapdragon": "^0.8.1",
"to-regex": "^3.0.1"
}
},
"native": {
"version": "0.3.3",
"resolved": "https://registry.npmjs.org/native/-/native-0.3.3.tgz",
"integrity": "sha512-NtzhjGaXTMMCE2rkJ6kSZW+6UbPqdnlY5ZQ2PK+ZDRu5W3UiGqEBJqIRhaDdYywxaX9stFMDnPEv/hTvyhnbEg==",
"requires": {
"montage": ">=16.0.4"
}
},
"natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
"dev": true
},
"needle": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/needle/-/needle-2.5.0.tgz",
"integrity": "sha512-o/qITSDR0JCyCKEQ1/1bnUXMmznxabbwi/Y4WwJElf+evwJNFNwIDMCCt5IigFVxgeGBJESLohGtIS9gEzo1fA==",
"requires": {
"debug": "^3.2.6",
"iconv-lite": "^0.4.4",
"sax": "^1.2.4"
}
},
"neo-async": {
"version": "2.6.2",
"resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
"integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
"dev": true
},
"neon-cli": {
"version": "0.9.1",
"resolved": "https://registry.npmjs.org/neon-cli/-/neon-cli-0.9.1.tgz",
"integrity": "sha512-iWwKjI1Q4o1lWVc4vkr59BXEYlBl5HggKx5ETw5kStpuvT1nVcolPtTdydisx8pLRRxN+gjER571DOGnA0Yxcg==",
"dev": true,
"requires": {
"chalk": "^4.1.0",
"command-line-args": "^5.1.1",
"command-line-commands": "^3.0.1",
"command-line-usage": "^6.1.0",
"git-config": "0.0.7",
"handlebars": "^4.7.6",
"inquirer": "^7.3.3",
"make-promises-safe": "^5.1.0",
"rimraf": "^3.0.2",
"semver": "^7.3.2",
"toml": "^3.0.0",
"ts-typed-json": "^0.3.2",
"validate-npm-package-license": "^3.0.4",
"validate-npm-package-name": "^3.0.0"
},
"dependencies": {
"rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"requires": {
"glob": "^7.1.3"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"next-tick": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz",
"integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ=="
},
"nice-try": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
"integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==",
"dev": true
},
"node-int64": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
"integrity": "sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs=",
"dev": true
},
"node-modules-regexp": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz",
"integrity": "sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA=",
"dev": true
},
"node-notifier": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-8.0.2.tgz",
"integrity": "sha512-oJP/9NAdd9+x2Q+rfphB2RJCHjod70RcRLjosiPMMu5gjIfwVnOUGq2nbTjTUbmy0DJ/tFIVT30+Qe3nzl4TJg==",
"dev": true,
"optional": true,
"requires": {
"growly": "^1.3.0",
"is-wsl": "^2.2.0",
"semver": "^7.3.2",
"shellwords": "^0.1.1",
"uuid": "^8.3.0",
"which": "^2.0.2"
},
"dependencies": {
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"optional": true,
"requires": {
"lru-cache": "^6.0.0"
}
}
}
},
"node-pre-gyp": {
"version": "0.14.0",
"resolved": "https://registry.npmjs.org/node-pre-gyp/-/node-pre-gyp-0.14.0.tgz",
"integrity": "sha512-+CvDC7ZttU/sSt9rFjix/P05iS43qHCOOGzcr3Ry99bXG7VX953+vFyEuph/tfqoYu8dttBkE86JSKBO2OzcxA==",
"requires": {
"detect-libc": "^1.0.2",
"mkdirp": "^0.5.1",
"needle": "^2.2.1",
"nopt": "^4.0.1",
"npm-packlist": "^1.1.6",
"npmlog": "^4.0.2",
"rc": "^1.2.7",
"rimraf": "^2.6.1",
"semver": "^5.3.0",
"tar": "^4.4.2"
}
},
"node-releases": {
"version": "1.1.75",
"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.75.tgz",
"integrity": "sha512-Qe5OUajvqrqDSy6wrWFmMwfJ0jVgwiw4T3KqmbTcZ62qW0gQkheXYhcFM1+lOVcGUoRxcEcfyvFMAnDgaF1VWw==",
"dev": true
},
"node-status-codes": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-status-codes/-/node-status-codes-1.0.0.tgz",
"integrity": "sha512-1cBMgRxdMWE8KeWCqk2RIOrvUb0XCwYfEsY5/y2NlXyq4Y/RumnOZvTj4Nbr77+Vb2C+kyBoRTdkNOS8L3d/aQ=="
},
"noop6": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/noop6/-/noop6-1.0.9.tgz",
"integrity": "sha512-DB3Hwyd89dPr5HqEPg3YHjzvwh/mCqizC1zZ8vyofqc+TQRyPDnT4wgXXbLGF4z9YAzwwTLi8pNLhGqcbSjgkA=="
},
"nopt": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
"integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
"requires": {
"abbrev": "1",
"osenv": "^0.1.4"
}
},
"normalize-package-data": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
"integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
"requires": {
"hosted-git-info": "^2.1.4",
"resolve": "^1.10.0",
"semver": "2 || 3 || 4 || 5",
"validate-npm-package-license": "^3.0.1"
}
},
"normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"dev": true
},
"npm-bundled": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/npm-bundled/-/npm-bundled-1.1.1.tgz",
"integrity": "sha512-gqkfgGePhTpAEgUsGEgcq1rqPXA+tv/aVBlgEzfXwA1yiUJF7xtEt3CtVwOjNYQOVknDk0F20w58Fnm3EtG0fA==",
"requires": {
"npm-normalize-package-bin": "^1.0.1"
}
},
"npm-normalize-package-bin": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz",
"integrity": "sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA=="
},
"npm-packlist": {
"version": "1.4.8",
"resolved": "https://registry.npmjs.org/npm-packlist/-/npm-packlist-1.4.8.tgz",
"integrity": "sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A==",
"requires": {
"ignore-walk": "^3.0.1",
"npm-bundled": "^1.0.1",
"npm-normalize-package-bin": "^1.0.1"
}
},
"npm-run-path": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
"integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=",
"dev": true,
"requires": {
"path-key": "^2.0.0"
},
"dependencies": {
"path-key": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
"integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=",
"dev": true
}
}
},
"npmlog": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz",
"integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==",
"requires": {
"are-we-there-yet": "~1.1.2",
"console-control-strings": "~1.1.0",
"gauge": "~2.7.3",
"set-blocking": "~2.0.0"
}
},
"number-is-nan": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz",
"integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0="
},
"nwsapi": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.0.tgz",
"integrity": "sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==",
"dev": true
},
"oargv": {
"version": "3.4.10",
"resolved": "https://registry.npmjs.org/oargv/-/oargv-3.4.10.tgz",
"integrity": "sha512-SXaMANv9sr7S/dP0vj0+Ybipa47UE1ntTWQ2rpPRhC6Bsvfl+Jg03Xif7jfL0sWKOYWK8oPjcZ5eJ82t8AP/8g==",
"requires": {
"iterate-object": "^1.1.0",
"ul": "^5.0.0"
}
},
"obj-def": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/obj-def/-/obj-def-1.0.9.tgz",
"integrity": "sha512-bQ4ya3VYD6FAA1+s6mEhaURRHSmw4+sKaXE6UyXZ1XDYc5D+c7look25dFdydmLd18epUegh398gdDkMUZI9xg==",
"requires": {
"deffy": "^2.2.2"
}
},
"object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
},
"object-copy": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
"integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
"dev": true,
"requires": {
"copy-descriptor": "^0.1.0",
"define-property": "^0.2.5",
"kind-of": "^3.0.3"
},
"dependencies": {
"define-property": {
"version": "0.2.5",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
"integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
"dev": true,
"requires": {
"is-descriptor": "^0.1.0"
}
},
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"object-visit": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
"integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
"dev": true,
"requires": {
"isobject": "^3.0.0"
}
},
"object.pick": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
"integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
"dev": true,
"requires": {
"isobject": "^3.0.1"
}
},
"once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
"requires": {
"wrappy": "1"
}
},
"one-by-one": {
"version": "3.2.8",
"resolved": "https://registry.npmjs.org/one-by-one/-/one-by-one-3.2.8.tgz",
"integrity": "sha512-HR/pSzZdm46Xqj58K+Bu64kMbSTw8/u77AwWvV+rprO/OsuR++pPlkUJn+SmwqBGRgHKwSKQ974V3uls7crIeQ==",
"requires": {
"obj-def": "^1.0.0",
"sliced": "^1.0.1"
}
},
"onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
"integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
"dev": true,
"requires": {
"mimic-fn": "^2.1.0"
}
},
"optionator": {
"version": "0.9.1",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz",
"integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==",
"dev": true,
"requires": {
"deep-is": "^0.1.3",
"fast-levenshtein": "^2.0.6",
"levn": "^0.4.1",
"prelude-ls": "^1.2.1",
"type-check": "^0.4.0",
"word-wrap": "^1.2.3"
}
},
"os-homedir": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
"integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M="
},
"os-tmpdir": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
"integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ="
},
"osenv": {
"version": "0.1.5",
"resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
"integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
"requires": {
"os-homedir": "^1.0.0",
"os-tmpdir": "^1.0.0"
}
},
"p-each-series": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-each-series/-/p-each-series-2.2.0.tgz",
"integrity": "sha512-ycIL2+1V32th+8scbpTvyHNaHe02z0sjgh91XXjAk+ZeXoPN4Z46DVUnzdso0aX4KckKw0FNNFHdjZ2UsZvxiA==",
"dev": true
},
"p-finally": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
"integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=",
"dev": true
},
"p-limit": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
"integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
"dev": true,
"requires": {
"p-try": "^2.0.0"
}
},
"p-locate": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
"integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
"dev": true,
"requires": {
"p-limit": "^2.2.0"
}
},
"p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
"integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
"dev": true
},
"package-json": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/package-json/-/package-json-2.4.0.tgz",
"integrity": "sha512-PRg65iXMTt/uK8Rfh5zvzkUbfAPitF17YaCY+IbHsYgksiLvtzWWTUildHth3mVaZ7871OJ7gtP4LBRBlmAdXg==",
"requires": {
"got": "^5.0.0",
"registry-auth-token": "^3.0.1",
"registry-url": "^3.0.3",
"semver": "^5.1.0"
}
},
"package-json-path": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/package-json-path/-/package-json-path-1.0.9.tgz",
"integrity": "sha512-uNu7f6Ef7tQHZRnkyVnCtzdSYVN9uBtge/sG7wzcUaawFWkPYUq67iXxRGrQSg/q0tzxIB8jSyIYUKjG2Jn//A==",
"requires": {
"abs": "^1.2.1"
}
},
"package.json": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/package.json/-/package.json-2.0.1.tgz",
"integrity": "sha512-pSxZ6XR5yEawRN2ekxx9IKgPN5uNAYco7MCPxtBEWMKO3UKWa1X2CtQMzMgloeGj2g2o6cue3Sb5iPkByIJqlw==",
"requires": {
"git-package-json": "^1.4.0",
"git-source": "^1.1.0",
"package-json": "^2.3.1"
}
},
"parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
"requires": {
"callsites": "^3.0.0"
}
},
"parse-json": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
"integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
"dev": true,
"requires": {
"@babel/code-frame": "^7.0.0",
"error-ex": "^1.3.1",
"json-parse-even-better-errors": "^2.3.0",
"lines-and-columns": "^1.1.6"
}
},
"parse-url": {
"version": "1.3.11",
"resolved": "https://registry.npmjs.org/parse-url/-/parse-url-1.3.11.tgz",
"integrity": "sha512-1wj9nkgH/5EboDxLwaTMGJh3oH3f+Gue+aGdh631oCqoSBpokzmMmOldvOeBPtB8GJBYJbaF93KPzlkU+Y1ksg==",
"requires": {
"is-ssh": "^1.3.0",
"protocols": "^1.4.0"
},
"dependencies": {
"protocols": {
"version": "1.4.8",
"resolved": "https://registry.npmjs.org/protocols/-/protocols-1.4.8.tgz",
"integrity": "sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg=="
}
}
},
"parse5": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
"integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==",
"dev": true
},
"pascalcase": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
"integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
"dev": true
},
"path-exists": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
"integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
"dev": true
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
},
"path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true
},
"path-parse": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
},
"picomatch": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz",
"integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==",
"dev": true
},
"pinkie": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz",
"integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg=="
},
"pinkie-promise": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz",
"integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==",
"requires": {
"pinkie": "^2.0.0"
}
},
"pirates": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.1.tgz",
"integrity": "sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA==",
"dev": true,
"requires": {
"node-modules-regexp": "^1.0.0"
}
},
"pkg-dir": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
"integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
"dev": true,
"requires": {
"find-up": "^4.0.0"
}
},
"posix-character-classes": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
"integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
"dev": true
},
"prelude-ls": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
"integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
"dev": true
},
"prepend-http": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz",
"integrity": "sha512-PhmXi5XmoyKw1Un4E+opM2KcsJInDvKyuOumcjjw3waw86ZNjHwVUOOWLc4bCzLdcKNaWBH9e99sbWzDQsVaYg=="
},
"prettier": {
"version": "2.5.1",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-2.5.1.tgz",
"integrity": "sha512-vBZcPRUR5MZJwoyi3ZoyQlc1rXeEck8KgeC9AwwOn+exuxLxq5toTRDTSaVrXHxelDMHy9zlicw8u66yxoSUFg==",
"dev": true
},
"prettier-linter-helpers": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
"integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
"dev": true,
"requires": {
"fast-diff": "^1.1.2"
}
},
"pretty-format": {
"version": "26.6.2",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-26.6.2.tgz",
"integrity": "sha512-7AeGuCYNGmycyQbCqd/3PWH4eOoX/OiCa0uphp57NVTeAGdJGaAliecxwBDHYQCIvrW7aDBZCYeNTP/WX69mkg==",
"dev": true,
"requires": {
"@jest/types": "^26.6.2",
"ansi-regex": "^5.0.0",
"ansi-styles": "^4.0.0",
"react-is": "^17.0.1"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
}
}
},
"process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
},
"progress": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
"integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
"dev": true
},
"prompts": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.1.tgz",
"integrity": "sha512-EQyfIuO2hPDsX1L/blblV+H7I0knhgAd82cVneCwcdND9B8AuCDuRcBH6yIcG4dFzlOUqbazQqwGjx5xmsNLuQ==",
"dev": true,
"requires": {
"kleur": "^3.0.3",
"sisteransi": "^1.0.5"
}
},
"protocols": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/protocols/-/protocols-2.0.1.tgz",
"integrity": "sha512-/XJ368cyBJ7fzLMwLKv1e4vLxOju2MNAIokcr7meSaNcVbWz/CPcW22cP04mwxOErdA5mwjA8Q6w/cdAQxVn7Q=="
},
"proxy-polyfill": {
"version": "0.1.7",
"resolved": "https://registry.npmjs.org/proxy-polyfill/-/proxy-polyfill-0.1.7.tgz",
"integrity": "sha512-WUob/kKCMt4IDJthdKv8GYi89pFsKhv14E/Ej+bTPcKJFa2xw4cmSRD9MDJFCExanfhViIARMBL7SJY7/S7aMA=="
},
"psl": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz",
"integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==",
"dev": true
},
"pump": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
"integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
"dev": true,
"requires": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"punycode": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
"integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
"dev": true
},
"q": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
"integrity": "sha512-kV/CThkXo6xyFEZUugw/+pIOywXcDbFYgSct5cT3gqlbkBE1SJdwy6UQoZvodiWF/ckQLZyDE/Bu1M6gVu5lVw=="
},
"q-io": {
"version": "1.13.6",
"resolved": "https://registry.npmjs.org/q-io/-/q-io-1.13.6.tgz",
"integrity": "sha512-xgxlfN3iOQ4LMuyhb8+aibtv57HnqZqycCLaE3o/qkeqvBuEfbq4tU4oHjIHgY/LVN3vJs+Gy9hKh8kzlnug7g==",
"requires": {
"es6-set": "^0.1.1",
"mime": "^1.2.11",
"mimeparse": "^0.1.4",
"q": "^1.0.1",
"qs": "^1.2.1",
"url2": "^0.0.0"
}
},
"qs": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/qs/-/qs-1.2.2.tgz",
"integrity": "sha512-xEqT+49YIt+BdwQthXKTOkp7atENe6JqrGGerxBPiER6BArOIiVJtpZZYpWOpq2IOkTPVnDM8CgYvppFoJNwyQ=="
},
"querystringify": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
"integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==",
"dev": true
},
"r-json": {
"version": "1.2.10",
"resolved": "https://registry.npmjs.org/r-json/-/r-json-1.2.10.tgz",
"integrity": "sha512-hu9vyLjSlHXT62NAS7DjI9WazDlvjN0lgp3n431dCVnirVcLkZIpzSwA3orhZEKzdDD2jqNYI+w0yG0aFf4kpA=="
},
"r-package-json": {
"version": "1.0.9",
"resolved": "https://registry.npmjs.org/r-package-json/-/r-package-json-1.0.9.tgz",
"integrity": "sha512-G4Vpf1KImWmmPFGdtWQTU0L9zk0SjqEC4qs/jE7AQ+Ylmr5kizMzGeC4wnHp5+ijPqNN+2ZPpvyjVNdN1CDVcg==",
"requires": {
"package-json-path": "^1.0.0",
"r-json": "^1.2.1"
}
},
"rc": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
"integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
"requires": {
"deep-extend": "^0.6.0",
"ini": "~1.3.0",
"minimist": "^1.2.0",
"strip-json-comments": "~2.0.1"
}
},
"react-is": {
"version": "17.0.2",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
"integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==",
"dev": true
},
"read-all-stream": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/read-all-stream/-/read-all-stream-3.1.0.tgz",
"integrity": "sha512-DI1drPHbmBcUDWrJ7ull/F2Qb8HkwBncVx8/RpKYFSIACYaVRQReISYPdZz/mt1y1+qMCOrfReTopERmaxtP6w==",
"requires": {
"pinkie-promise": "^2.0.0",
"readable-stream": "^2.0.0"
}
},
"read-pkg": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
"integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
"dev": true,
"requires": {
"@types/normalize-package-data": "^2.4.0",
"normalize-package-data": "^2.5.0",
"parse-json": "^5.0.0",
"type-fest": "^0.6.0"
},
"dependencies": {
"type-fest": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
"integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
"dev": true
}
}
},
"read-pkg-up": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
"integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
"dev": true,
"requires": {
"find-up": "^4.1.0",
"read-pkg": "^5.2.0",
"type-fest": "^0.8.1"
},
"dependencies": {
"type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"dev": true
}
}
},
"readable-stream": {
"version": "2.3.7",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
"integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
},
"rechoir": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz",
"integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=",
"dev": true,
"requires": {
"resolve": "^1.1.6"
}
},
"reduce-flatten": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-2.0.0.tgz",
"integrity": "sha512-EJ4UNY/U1t2P/2k6oqotuX2Cc3T6nxJwsM0N0asT7dhrtH1ltUxDn4NalSYmPE2rCkVpcf/X6R0wDwcFpzhd4w==",
"dev": true
},
"regex-not": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
"integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
"dev": true,
"requires": {
"extend-shallow": "^3.0.2",
"safe-regex": "^1.1.0"
}
},
"regexpp": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
"integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==",
"dev": true
},
"regextras": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/regextras/-/regextras-0.7.1.tgz",
"integrity": "sha512-9YXf6xtW+qzQ+hcMQXx95MOvfqXFgsKDZodX3qZB0x2n5Z94ioetIITsBtvJbiOyxa/6s9AtyweBLCdPmPko/w==",
"dev": true
},
"registry-auth-token": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.4.0.tgz",
"integrity": "sha512-4LM6Fw8eBQdwMYcES4yTnn2TqIasbXuwDx3um+QRs7S55aMKCBKBxvPXl2RiUjHwuJLTyYfxSpmfSAjQpcuP+A==",
"requires": {
"rc": "^1.1.6",
"safe-buffer": "^5.0.1"
}
},
"registry-url": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz",
"integrity": "sha512-ZbgR5aZEdf4UKZVBPYIgaglBmSF2Hi94s2PcIHhRGFjKYu+chjJdYfHn4rt3hB6eCKLJ8giVIIfgMa1ehDfZKA==",
"requires": {
"rc": "^1.0.1"
}
},
"remove-trailing-separator": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
"integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=",
"dev": true
},
"repeat-element": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz",
"integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==",
"dev": true
},
"repeat-string": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
"integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
"dev": true
},
"require-directory": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
"integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=",
"dev": true
},
"require-from-string": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true
},
"require-main-filename": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz",
"integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==",
"dev": true
},
"requires-port": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
"integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
"dev": true
},
"resolve": {
"version": "1.17.0",
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz",
"integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==",
"requires": {
"path-parse": "^1.0.6"
}
},
"resolve-cwd": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz",
"integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==",
"dev": true,
"requires": {
"resolve-from": "^5.0.0"
},
"dependencies": {
"resolve-from": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
"integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
"dev": true
}
}
},
"resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true
},
"resolve-url": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
"integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
"dev": true
},
"restore-cursor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
"integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
"dev": true,
"requires": {
"onetime": "^5.1.0",
"signal-exit": "^3.0.2"
}
},
"ret": {
"version": "0.1.15",
"resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
"integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
"dev": true
},
"rimraf": {
"version": "2.7.1",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
"integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
"requires": {
"glob": "^7.1.3"
}
},
"rsvp": {
"version": "4.8.5",
"resolved": "https://registry.npmjs.org/rsvp/-/rsvp-4.8.5.tgz",
"integrity": "sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==",
"dev": true
},
"run-async": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz",
"integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==",
"dev": true
},
"rxjs": {
"version": "6.6.7",
"resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz",
"integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==",
"dev": true,
"requires": {
"tslib": "^1.9.0"
}
},
"safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"safe-regex": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
"integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
"dev": true,
"requires": {
"ret": "~0.1.10"
}
},
"safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"sane": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/sane/-/sane-4.1.0.tgz",
"integrity": "sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==",
"dev": true,
"requires": {
"@cnakazawa/watch": "^1.0.3",
"anymatch": "^2.0.0",
"capture-exit": "^2.0.0",
"exec-sh": "^0.3.2",
"execa": "^1.0.0",
"fb-watchman": "^2.0.0",
"micromatch": "^3.1.4",
"minimist": "^1.1.1",
"walker": "~1.0.5"
},
"dependencies": {
"anymatch": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
"integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
"dev": true,
"requires": {
"micromatch": "^3.1.4",
"normalize-path": "^2.1.1"
}
},
"braces": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
"integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
"dev": true,
"requires": {
"arr-flatten": "^1.1.0",
"array-unique": "^0.3.2",
"extend-shallow": "^2.0.1",
"fill-range": "^4.0.0",
"isobject": "^3.0.1",
"repeat-element": "^1.1.2",
"snapdragon": "^0.8.1",
"snapdragon-node": "^2.0.1",
"split-string": "^3.0.2",
"to-regex": "^3.0.1"
},
"dependencies": {
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
}
}
},
"fill-range": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
"integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
"dev": true,
"requires": {
"extend-shallow": "^2.0.1",
"is-number": "^3.0.0",
"repeat-string": "^1.6.1",
"to-regex-range": "^2.1.0"
},
"dependencies": {
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
}
}
},
"is-number": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
"integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
"dev": true,
"requires": {
"kind-of": "^3.0.2"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"micromatch": {
"version": "3.1.10",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
"integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
"dev": true,
"requires": {
"arr-diff": "^4.0.0",
"array-unique": "^0.3.2",
"braces": "^2.3.1",
"define-property": "^2.0.2",
"extend-shallow": "^3.0.2",
"extglob": "^2.0.4",
"fragment-cache": "^0.2.1",
"kind-of": "^6.0.2",
"nanomatch": "^1.2.9",
"object.pick": "^1.3.0",
"regex-not": "^1.0.0",
"snapdragon": "^0.8.1",
"to-regex": "^3.0.2"
}
},
"normalize-path": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
"integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
"dev": true,
"requires": {
"remove-trailing-separator": "^1.0.1"
}
},
"to-regex-range": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
"integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
"dev": true,
"requires": {
"is-number": "^3.0.0",
"repeat-string": "^1.6.1"
}
}
}
},
"sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
},
"saxes": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/saxes/-/saxes-5.0.1.tgz",
"integrity": "sha512-5LBh1Tls8c9xgGjw3QrMwETmTMVk0oFgvrFSvWx62llR2hcEInrKNZ2GZCCuuy2lvWrdl5jhbpeqc5hRYKFOcw==",
"dev": true,
"requires": {
"xmlchars": "^2.2.0"
}
},
"semver": {
"version": "5.7.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
"integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
},
"set-blocking": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
"integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc="
},
"set-value": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
"integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
"dev": true,
"requires": {
"extend-shallow": "^2.0.1",
"is-extendable": "^0.1.1",
"is-plain-object": "^2.0.3",
"split-string": "^3.0.1"
},
"dependencies": {
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
}
}
},
"shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"requires": {
"shebang-regex": "^3.0.0"
}
},
"shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true
},
"shelljs": {
"version": "0.8.5",
"resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz",
"integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==",
"dev": true,
"requires": {
"glob": "^7.0.0",
"interpret": "^1.0.0",
"rechoir": "^0.6.2"
}
},
"shellwords": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/shellwords/-/shellwords-0.1.1.tgz",
"integrity": "sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==",
"dev": true,
"optional": true
},
"signal-exit": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz",
"integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA=="
},
"sisteransi": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
"integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==",
"dev": true
},
"slash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
"integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
"dev": true
},
"slice-ansi": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz",
"integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==",
"dev": true,
"requires": {
"ansi-styles": "^4.0.0",
"astral-regex": "^2.0.0",
"is-fullwidth-code-point": "^3.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
}
}
},
"sliced": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz",
"integrity": "sha512-VZBmZP8WU3sMOZm1bdgTadsQbcscK0UM8oKxKVBs4XAhUo2Xxzm/OFMGBkPusxw9xL3Uy8LrzEqGqJhclsr0yA=="
},
"snapdragon": {
"version": "0.8.2",
"resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
"integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
"dev": true,
"requires": {
"base": "^0.11.1",
"debug": "^2.2.0",
"define-property": "^0.2.5",
"extend-shallow": "^2.0.1",
"map-cache": "^0.2.2",
"source-map": "^0.5.6",
"source-map-resolve": "^0.5.0",
"use": "^3.1.0"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"requires": {
"ms": "2.0.0"
}
},
"define-property": {
"version": "0.2.5",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
"integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
"dev": true,
"requires": {
"is-descriptor": "^0.1.0"
}
},
"extend-shallow": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
"integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
"dev": true,
"requires": {
"is-extendable": "^0.1.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
"dev": true
},
"source-map": {
"version": "0.5.7",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
"integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
"dev": true
}
}
},
"snapdragon-node": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
"integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
"dev": true,
"requires": {
"define-property": "^1.0.0",
"isobject": "^3.0.0",
"snapdragon-util": "^3.0.1"
},
"dependencies": {
"define-property": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
"integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
"dev": true,
"requires": {
"is-descriptor": "^1.0.0"
}
},
"is-accessor-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
"integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-data-descriptor": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
"integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
"dev": true,
"requires": {
"kind-of": "^6.0.0"
}
},
"is-descriptor": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
"integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
"dev": true,
"requires": {
"is-accessor-descriptor": "^1.0.0",
"is-data-descriptor": "^1.0.0",
"kind-of": "^6.0.2"
}
}
}
},
"snapdragon-util": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
"integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
"dev": true,
"requires": {
"kind-of": "^3.2.0"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
"dev": true
},
"source-map-resolve": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
"integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
"dev": true,
"requires": {
"atob": "^2.1.2",
"decode-uri-component": "^0.2.0",
"resolve-url": "^0.2.1",
"source-map-url": "^0.4.0",
"urix": "^0.1.0"
}
},
"source-map-support": {
"version": "0.5.19",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz",
"integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==",
"dev": true,
"requires": {
"buffer-from": "^1.0.0",
"source-map": "^0.6.0"
}
},
"source-map-url": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz",
"integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==",
"dev": true
},
"spdx-correct": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz",
"integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==",
"requires": {
"spdx-expression-parse": "^3.0.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-exceptions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
"integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A=="
},
"spdx-expression-parse": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
"integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
"requires": {
"spdx-exceptions": "^2.1.0",
"spdx-license-ids": "^3.0.0"
}
},
"spdx-license-ids": {
"version": "3.0.5",
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz",
"integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q=="
},
"split-string": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
"integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
"dev": true,
"requires": {
"extend-shallow": "^3.0.0"
}
},
"sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
"dev": true
},
"stack-utils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.3.tgz",
"integrity": "sha512-gL//fkxfWUsIlFL2Tl42Cl6+HFALEaB1FU76I/Fy+oZjRreP7OPMXFlGbxM7NQsI0ZpUfw76sHnv0WNYuTb7Iw==",
"dev": true,
"requires": {
"escape-string-regexp": "^2.0.0"
},
"dependencies": {
"escape-string-regexp": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
"integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
"dev": true
}
}
},
"static-extend": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
"integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
"dev": true,
"requires": {
"define-property": "^0.2.5",
"object-copy": "^0.1.0"
},
"dependencies": {
"define-property": {
"version": "0.2.5",
"resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
"integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
"dev": true,
"requires": {
"is-descriptor": "^0.1.0"
}
}
}
},
"string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
"integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==",
"dev": true,
"requires": {
"char-regex": "^1.0.2",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"string-width": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz",
"integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=",
"requires": {
"code-point-at": "^1.0.0",
"is-fullwidth-code-point": "^1.0.0",
"strip-ansi": "^3.0.0"
}
},
"string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"requires": {
"safe-buffer": "~5.1.0"
}
},
"strip-ansi": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
"integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
"requires": {
"ansi-regex": "^2.0.0"
}
},
"strip-bom": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz",
"integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==",
"dev": true
},
"strip-eof": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
"integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=",
"dev": true
},
"strip-final-newline": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
"integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
"dev": true
},
"strip-json-comments": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo="
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"requires": {
"has-flag": "^4.0.0"
}
},
"supports-hyperlinks": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.2.0.tgz",
"integrity": "sha512-6sXEzV5+I5j8Bmq9/vUphGRM/RJNT9SCURJLjwfOg51heRtguGWDzcaBlgAzKhQa0EVNpPEKzQuBwZ8S8WaCeQ==",
"dev": true,
"requires": {
"has-flag": "^4.0.0",
"supports-color": "^7.0.0"
}
},
"symbol-tree": {
"version": "3.2.4",
"resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
"integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==",
"dev": true
},
"table": {
"version": "6.7.1",
"resolved": "https://registry.npmjs.org/table/-/table-6.7.1.tgz",
"integrity": "sha512-ZGum47Yi6KOOFDE8m223td53ath2enHcYLgOCjGr5ngu8bdIARQk6mN/wRMv4yMRcHnCSnHbCEha4sobQx5yWg==",
"dev": true,
"requires": {
"ajv": "^8.0.1",
"lodash.clonedeep": "^4.5.0",
"lodash.truncate": "^4.4.2",
"slice-ansi": "^4.0.0",
"string-width": "^4.2.0",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ajv": {
"version": "8.6.2",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.2.tgz",
"integrity": "sha512-9807RlWAgT564wT+DjeyU5OFMPjmzxVobvDFmNAhY+5zD6A2ly3jDp6sgnfyDtlIQ+7H97oc/DGCzzfu9rjw9w==",
"dev": true,
"requires": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
}
},
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true
},
"string-width": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"table-layout": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/table-layout/-/table-layout-1.0.2.tgz",
"integrity": "sha512-qd/R7n5rQTRFi+Zf2sk5XVVd9UQl6ZkduPFC3S7WEGJAmetDTjY3qPN50eSKzwuzEyQKy5TN2TiZdkIjos2L6A==",
"dev": true,
"requires": {
"array-back": "^4.0.1",
"deep-extend": "~0.6.0",
"typical": "^5.2.0",
"wordwrapjs": "^4.0.0"
},
"dependencies": {
"array-back": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/array-back/-/array-back-4.0.2.tgz",
"integrity": "sha512-NbdMezxqf94cnNfWLL7V/im0Ub+Anbb0IoZhvzie8+4HJ4nMQuzHuy49FkGYCJK2yAloZ3meiB6AVMClbrI1vg==",
"dev": true
},
"typical": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/typical/-/typical-5.2.0.tgz",
"integrity": "sha512-dvdQgNDNJo+8B2uBQoqdb11eUCE1JQXhvjC/CZtgvZseVd5TYMXnq0+vuUemXbd/Se29cTaUuPX3YIc2xgbvIg==",
"dev": true
}
}
},
"tar": {
"version": "4.4.19",
"resolved": "https://registry.npmjs.org/tar/-/tar-4.4.19.tgz",
"integrity": "sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==",
"requires": {
"chownr": "^1.1.4",
"fs-minipass": "^1.2.7",
"minipass": "^2.9.0",
"minizlib": "^1.3.3",
"mkdirp": "^0.5.5",
"safe-buffer": "^5.2.1",
"yallist": "^3.1.1"
},
"dependencies": {
"safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
}
}
},
"terminal-link": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/terminal-link/-/terminal-link-2.1.1.tgz",
"integrity": "sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ==",
"dev": true,
"requires": {
"ansi-escapes": "^4.2.1",
"supports-hyperlinks": "^2.0.0"
}
},
"test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
"integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==",
"dev": true,
"requires": {
"@istanbuljs/schema": "^0.1.2",
"glob": "^7.1.4",
"minimatch": "^3.0.4"
}
},
"text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
"integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
"dev": true
},
"throat": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/throat/-/throat-5.0.0.tgz",
"integrity": "sha512-fcwX4mndzpLQKBS1DVYhGAcYaYt7vsHNIvQV+WXMvnow5cgjPphq5CaayLaGsjRdSCKZFNGt7/GYAuXaNOiYCA==",
"dev": true
},
"through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
"dev": true
},
"timed-out": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/timed-out/-/timed-out-3.1.3.tgz",
"integrity": "sha512-3RB4qgvPkxF/FGPnrzaWLhW1rxNK2sdH0mFjbhxkfTR6QXvcM3EtYm9L44UrhODZrZ+yhDXeMncLqi8QXn2MJg=="
},
"tmp": {
"version": "0.0.33",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
"integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
"dev": true,
"requires": {
"os-tmpdir": "~1.0.2"
}
},
"tmpl": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
"dev": true
},
"to-fast-properties": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
"integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
"dev": true
},
"to-object-path": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
"integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
"dev": true,
"requires": {
"kind-of": "^3.0.2"
},
"dependencies": {
"kind-of": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
"integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
"dev": true,
"requires": {
"is-buffer": "^1.1.5"
}
}
}
},
"to-regex": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
"integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
"dev": true,
"requires": {
"define-property": "^2.0.2",
"extend-shallow": "^3.0.2",
"regex-not": "^1.0.2",
"safe-regex": "^1.1.0"
}
},
"to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"requires": {
"is-number": "^7.0.0"
}
},
"toml": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz",
"integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==",
"dev": true
},
"tough-cookie": {
"version": "4.1.3",
"resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz",
"integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==",
"dev": true,
"requires": {
"psl": "^1.1.33",
"punycode": "^2.1.1",
"universalify": "^0.2.0",
"url-parse": "^1.5.3"
},
"dependencies": {
"universalify": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
"integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==",
"dev": true
}
}
},
"tr46": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-2.1.0.tgz",
"integrity": "sha512-15Ih7phfcdP5YxqiB+iDtLoaTz4Nd35+IiAv0kQ5FNKHzXgdWqPoTIqEDDJmXceQt4JZk6lVPT8lnDlPpGDppw==",
"dev": true,
"requires": {
"punycode": "^2.1.1"
}
},
"ts-jest": {
"version": "26.5.6",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-26.5.6.tgz",
"integrity": "sha512-rua+rCP8DxpA8b4DQD/6X2HQS8Zy/xzViVYfEs2OQu68tkCuKLV0Md8pmX55+W24uRIyAsf/BajRfxOs+R2MKA==",
"dev": true,
"requires": {
"bs-logger": "0.x",
"buffer-from": "1.x",
"fast-json-stable-stringify": "2.x",
"jest-util": "^26.1.0",
"json5": "2.x",
"lodash": "4.x",
"make-error": "1.x",
"mkdirp": "1.x",
"semver": "7.x",
"yargs-parser": "20.x"
},
"dependencies": {
"mkdirp": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
"integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
"dev": true
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
},
"yargs-parser": {
"version": "20.2.9",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
"integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
"dev": true
}
}
},
"ts-typed-json": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/ts-typed-json/-/ts-typed-json-0.3.2.tgz",
"integrity": "sha512-Tdu3BWzaer7R5RvBIJcg9r8HrTZgpJmsX+1meXMJzYypbkj8NK2oJN0yvm4Dp/Iv6tzFa/L5jKRmEVTga6K3nA==",
"dev": true
},
"tslib": {
"version": "1.14.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
"integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
"dev": true
},
"tsutils": {
"version": "3.21.0",
"resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
"integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
"dev": true,
"requires": {
"tslib": "^1.8.1"
}
},
"type": {
"version": "2.7.2",
"resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz",
"integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw=="
},
"type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
"integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
"dev": true,
"requires": {
"prelude-ls": "^1.2.1"
}
},
"type-detect": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
"integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
"dev": true
},
"type-fest": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
"integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
"dev": true
},
"typedarray-to-buffer": {
"version": "3.1.5",
"resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
"integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
"dev": true,
"requires": {
"is-typedarray": "^1.0.0"
}
},
"typescript": {
"version": "3.9.10",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.10.tgz",
"integrity": "sha512-w6fIxVE/H1PkLKcCPsFqKE7Kv7QUwhU8qQY2MueZXWx5cPZdwFupLgKK3vntcK98BtNHZtAF4LA/yl2a7k8R6Q==",
"dev": true
},
"typical": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/typical/-/typical-4.0.0.tgz",
"integrity": "sha512-VAH4IvQ7BDFYglMd7BPRDfLgxZZX4O4TFcRDA6EN5X7erNJJq+McIEp8np9aVtxrCJ6qx4GTYVfOWNjcqwZgRw==",
"dev": true
},
"typpy": {
"version": "2.3.13",
"resolved": "https://registry.npmjs.org/typpy/-/typpy-2.3.13.tgz",
"integrity": "sha512-vOxIcQz9sxHi+rT09SJ5aDgVgrPppQjwnnayTrMye1ODaU8gIZTDM19t9TxmEElbMihx2Nq/0/b/MtyKfayRqA==",
"requires": {
"function.name": "^1.0.3"
}
},
"uglify-js": {
"version": "3.14.5",
"resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.14.5.tgz",
"integrity": "sha512-qZukoSxOG0urUTvjc2ERMTcAy+BiFh3weWAkeurLwjrCba73poHmG3E36XEjd/JGukMzwTL7uCxZiAexj8ppvQ==",
"dev": true,
"optional": true
},
"ul": {
"version": "5.2.15",
"resolved": "https://registry.npmjs.org/ul/-/ul-5.2.15.tgz",
"integrity": "sha512-svLEUy8xSCip5IWnsRa0UOg+2zP0Wsj4qlbjTmX6GJSmvKMHADBuHOm1dpNkWqWPIGuVSqzUkV3Cris5JrlTRQ==",
"requires": {
"deffy": "^2.2.2",
"typpy": "^2.3.4"
}
},
"union-value": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
"integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
"dev": true,
"requires": {
"arr-union": "^3.1.0",
"get-value": "^2.0.6",
"is-extendable": "^0.1.1",
"set-value": "^2.0.1"
}
},
"unset-value": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
"integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
"dev": true,
"requires": {
"has-value": "^0.3.1",
"isobject": "^3.0.0"
},
"dependencies": {
"has-value": {
"version": "0.3.1",
"resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
"integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
"dev": true,
"requires": {
"get-value": "^2.0.3",
"has-values": "^0.1.4",
"isobject": "^2.0.0"
},
"dependencies": {
"isobject": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
"integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
"dev": true,
"requires": {
"isarray": "1.0.0"
}
}
}
},
"has-values": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
"integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
"dev": true
}
}
},
"unzip-response": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-1.0.2.tgz",
"integrity": "sha512-pwCcjjhEcpW45JZIySExBHYv5Y9EeL2OIGEfrSKp2dMUFGFv4CpvZkwJbVge8OvGH2BNNtJBx67DuKuJhf+N5Q=="
},
"uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dev": true,
"requires": {
"punycode": "^2.1.0"
}
},
"urix": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
"integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
"dev": true
},
"url-parse": {
"version": "1.5.10",
"resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
"integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
"dev": true,
"requires": {
"querystringify": "^2.1.1",
"requires-port": "^1.0.0"
}
},
"url-parse-lax": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz",
"integrity": "sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA==",
"requires": {
"prepend-http": "^1.0.1"
}
},
"url2": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/url2/-/url2-0.0.0.tgz",
"integrity": "sha512-gb/XT1m2mnWOIbQwa5V9Dq2O07fkZbtu1K0WAAKuaNSX0c8psp2jovJTbbvPKCpimutdoK9jXOejDCtvQOoKOA=="
},
"use": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
"integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
"dev": true
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
},
"uuid": {
"version": "8.3.2",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==",
"dev": true,
"optional": true
},
"v8-compile-cache": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz",
"integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==",
"dev": true
},
"v8-to-istanbul": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-7.1.2.tgz",
"integrity": "sha512-TxNb7YEUwkLXCQYeudi6lgQ/SZrzNO4kMdlqVxaZPUIUjCv6iSSypUQX70kNBSERpQ8fk48+d61FXk+tgqcWow==",
"dev": true,
"requires": {
"@types/istanbul-lib-coverage": "^2.0.1",
"convert-source-map": "^1.6.0",
"source-map": "^0.7.3"
},
"dependencies": {
"source-map": {
"version": "0.7.3",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz",
"integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==",
"dev": true
}
}
},
"validate-npm-package-license": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
"integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
"requires": {
"spdx-correct": "^3.0.0",
"spdx-expression-parse": "^3.0.0"
}
},
"validate-npm-package-name": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz",
"integrity": "sha1-X6kS2B630MdK/BQN5zF/DKffQ34=",
"dev": true,
"requires": {
"builtins": "^1.0.3"
}
},
"w3c-hr-time": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz",
"integrity": "sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==",
"dev": true,
"requires": {
"browser-process-hrtime": "^1.0.0"
}
},
"w3c-xmlserializer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-2.0.0.tgz",
"integrity": "sha512-4tzD0mF8iSiMiNs30BiLO3EpfGLZUT2MSX/G+o7ZywDzliWQ3OPtTZ0PTC3B3ca1UAf4cJMHB+2Bf56EriJuRA==",
"dev": true,
"requires": {
"xml-name-validator": "^3.0.0"
}
},
"walker": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/walker/-/walker-1.0.7.tgz",
"integrity": "sha1-L3+bj9ENZ3JisYqITijRlhjgKPs=",
"dev": true,
"requires": {
"makeerror": "1.0.x"
}
},
"weak-map": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/weak-map/-/weak-map-1.0.8.tgz",
"integrity": "sha512-lNR9aAefbGPpHO7AEnY0hCFjz1eTkWCXYvkTRrTHs9qv8zJp+SkVYpzfLIFXQQiG3tVvbNFQgVg2bQS8YGgxyw=="
},
"webidl-conversions": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-6.1.0.tgz",
"integrity": "sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w==",
"dev": true
},
"whatwg-encoding": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz",
"integrity": "sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==",
"dev": true,
"requires": {
"iconv-lite": "0.4.24"
}
},
"whatwg-mimetype": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz",
"integrity": "sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==",
"dev": true
},
"whatwg-url": {
"version": "8.7.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-8.7.0.tgz",
"integrity": "sha512-gAojqb/m9Q8a5IV96E3fHJM70AzCkgt4uXYX2O7EmuyOnLrViCQlsEBmF9UQIu3/aeAIp2U17rtbpZWNntQqdg==",
"dev": true,
"requires": {
"lodash": "^4.7.0",
"tr46": "^2.1.0",
"webidl-conversions": "^6.1.0"
}
},
"which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"requires": {
"isexe": "^2.0.0"
}
},
"which-module": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz",
"integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=",
"dev": true
},
"wide-align": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz",
"integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==",
"requires": {
"string-width": "^1.0.2 || 2"
}
},
"word-wrap": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz",
"integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==",
"dev": true
},
"wordwrap": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
"integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=",
"dev": true
},
"wordwrapjs": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-4.0.1.tgz",
"integrity": "sha512-kKlNACbvHrkpIw6oPeYDSmdCTu2hdMHoyXLTcUKala++lx5Y+wjJ/e474Jqv5abnVmwxw08DiTuHmw69lJGksA==",
"dev": true,
"requires": {
"reduce-flatten": "^2.0.0",
"typical": "^5.2.0"
},
"dependencies": {
"typical": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/typical/-/typical-5.2.0.tgz",
"integrity": "sha512-dvdQgNDNJo+8B2uBQoqdb11eUCE1JQXhvjC/CZtgvZseVd5TYMXnq0+vuUemXbd/Se29cTaUuPX3YIc2xgbvIg==",
"dev": true
}
}
},
"wrap-ansi": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
"integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
"dev": true,
"requires": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"string-width": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
},
"write-file-atomic": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
"integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
"dev": true,
"requires": {
"imurmurhash": "^0.1.4",
"is-typedarray": "^1.0.0",
"signal-exit": "^3.0.2",
"typedarray-to-buffer": "^3.1.5"
}
},
"ws": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.5.4.tgz",
"integrity": "sha512-zP9z6GXm6zC27YtspwH99T3qTG7bBFv2VIkeHstMLrLlDJuzA7tQ5ls3OJ1hOGGCzTQPniNJoHXIAOS0Jljohg==",
"dev": true
},
"xml-name-validator": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-3.0.0.tgz",
"integrity": "sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==",
"dev": true
},
"xmlchars": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz",
"integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==",
"dev": true
},
"y18n": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz",
"integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==",
"dev": true
},
"yallist": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
"integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
},
"yargs": {
"version": "15.4.1",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz",
"integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==",
"dev": true,
"requires": {
"cliui": "^6.0.0",
"decamelize": "^1.2.0",
"find-up": "^4.1.0",
"get-caller-file": "^2.0.1",
"require-directory": "^2.1.1",
"require-main-filename": "^2.0.0",
"set-blocking": "^2.0.0",
"string-width": "^4.2.0",
"which-module": "^2.0.0",
"y18n": "^4.0.0",
"yargs-parser": "^18.1.2"
},
"dependencies": {
"ansi-regex": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
"integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"string-width": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
}
}
},
"yargs-parser": {
"version": "18.1.3",
"resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz",
"integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==",
"dev": true,
"requires": {
"camelcase": "^5.0.0",
"decamelize": "^1.2.0"
}
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/Makefile | .PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
# Format source code automatically
style:
npm run lint
# Check the source code is formatted correctly
check-style:
npm run lint-check
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json
# Launch the test suite
test: $(TESTS_RESOURCES)
npm run test
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
$(DATA_DIR)/tokenizer-wiki.json :
$(dir_guard)
wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@
$(DATA_DIR)/bert-wiki.json :
$(dir_guard)
wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
| 0 |
hf_public_repos/tokenizers/bindings/node/examples | hf_public_repos/tokenizers/bindings/node/examples/documentation/pipeline.test.ts | /* eslint-disable */
var globRequire = require;
describe("pipelineExample", () => {
// This is a hack to let us require using path similar to what the user has to use
function require(mod: string) {
if (mod.startsWith("tokenizers/")) {
let path = mod.slice("tokenizers/".length);
return globRequire("../../lib/" + path);
} else {
return globRequire(mod);
}
}
let console = {
log: (..._args: any[]) => {}
};
it("shows pipeline parts", async () => {
// START reload_tokenizer
let { Tokenizer } = require("tokenizers/bindings/tokenizer");
let tokenizer = Tokenizer.fromFile("data/tokenizer-wiki.json");
// END reload_tokenizer
// START setup_normalizer
let { sequenceNormalizer, nfdNormalizer, stripAccentsNormalizer } = require("tokenizers/bindings/normalizers");
let normalizer = sequenceNormalizer([nfdNormalizer(), stripAccentsNormalizer()]);
// END setup_normalizer
// START test_normalizer
let normalized = normalizer.normalizeString("Héllò hôw are ü?")
// "Hello how are u?"
// END test_normalizer
expect(normalized).toEqual("Hello how are u?");
// START replace_normalizer
tokenizer.setNormalizer(normalizer)
// END replace_normalizer
// START setup_pre_tokenizer
let { whitespacePreTokenizer } = require("tokenizers/bindings/pre-tokenizers");
var preTokenizer = whitespacePreTokenizer();
var preTokenized = preTokenizer.preTokenizeString("Hello! How are you? I'm fine, thank you.");
// END setup_pre_tokenizer
expect(preTokenized).toEqual([
["Hello", [0, 5]],
["!", [5, 6]],
["How", [7, 10]],
["are", [11, 14]],
["you", [15, 18]],
["?", [18, 19]],
["I", [20, 21]],
["'", [21, 22]],
['m', [22, 23]],
["fine", [24, 28]],
[",", [28, 29]],
["thank", [30, 35]],
["you", [36, 39]],
[".", [39, 40]]
]);
// START combine_pre_tokenizer
let { sequencePreTokenizer, digitsPreTokenizer } = require("tokenizers/bindings/pre-tokenizers");
var preTokenizer = sequencePreTokenizer([whitespacePreTokenizer(), digitsPreTokenizer(true)]);
var preTokenized = preTokenizer.preTokenizeString("Call 911!");
// END combine_pre_tokenizer
// START replace_pre_tokenizer
tokenizer.setPreTokenizer(preTokenizer)
// END replace_pre_tokenizer
// START setup_processor
let { templateProcessing } = require("tokenizers/bindings/post-processors");
tokenizer.setPostProcessor(templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[["[CLS]", 1], ["[SEP]", 2]]
));
// END setup_processor
// START test_decoding
let { promisify } = require('util');
let encode = promisify(tokenizer.encode.bind(tokenizer));
let decode = promisify(tokenizer.decode.bind(tokenizer));
let output = await encode("Hello, y'all! How are you 😁 ?");
console.log(output.getIds());
// [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
let decoded = await decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true);
// "Hello , y ' all ! How are you ?"
// END test_decoding
expect(decoded).toEqual("Hello , y ' all ! How are you ?");
});
it.skip("trains the tokenizer", async () => {
// START bert_setup_tokenizer
let { Tokenizer } = require("tokenizers/bindings/tokenizer");
let { WordPiece } = require("tokenizers/bindings/models");
let bertTokenizer = new Tokenizer(WordPiece.init({}, { unkToken: "[UNK]" }));
// END bert_setup_tokenizer
// START bert_setup_normalizer
let { sequenceNormalizer, lowercaseNormalizer, nfdNormalizer, stripAccentsNormalizer }
= require("tokenizers/bindings/normalizers");
bertTokenizer.setNormalizer(sequenceNormalizer([
nfdNormalizer(), lowercaseNormalizer(), stripAccentsNormalizer()
]))
// END bert_setup_normalizer
// START bert_setup_pre_tokenizer
let { whitespacePreTokenizer } = require("tokenizers/bindings/pre-tokenizers");
bertTokenizer.setPreTokenizer(whitespacePreTokenizer());
// END bert_setup_pre_tokenizer
// START bert_setup_processor
let { templateProcessing } = require("tokenizers/bindings/post-processors");
bertTokenizer.setPostProcessor(templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[["[CLS]", 1], ["[SEP]", 2]]
));
// END bert_setup_processor
// START bert_train_tokenizer
let { wordPieceTrainer } = require("tokenizers/bindings/trainers");
let trainer = wordPieceTrainer({
vocabSize: 30522,
specialTokens: ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
});
let files = ["test", "train", "valid"].map(split => `data/wikitext-103-raw/wiki.${split}.raw`);
bertTokenizer.train(files, trainer);
bertTokenizer.save("data/bert-wiki.json")
// END bert_train_tokenizer
});
it("shows a full bert example", async () => {
let { Tokenizer } = require("tokenizers/bindings/tokenizer");
let bertTokenizer = await Tokenizer.fromFile("data/bert-wiki.json")
// START bert_test_decoding
let { promisify } = require("util");
let encode = promisify(bertTokenizer.encode.bind(bertTokenizer));
let decode = promisify(bertTokenizer.decode.bind(bertTokenizer));
let output = await encode("Welcome to the 🤗 Tokenizers library.");
console.log(output.getTokens());
// ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"]
var decoded = await decode(output.getIds(), true);
// "welcome to the tok ##eni ##zer ##s library ."
// END bert_test_decoding
expect(decoded).toEqual("welcome to the tok ##eni ##zer ##s library .");
// START bert_proper_decoding
let { wordPieceDecoder } = require("tokenizers/bindings/decoders");
bertTokenizer.setDecoder(wordPieceDecoder());
var decoded = await decode(output.getIds(), true);
// "welcome to the tokenizers library."
// END bert_proper_decoding
expect(decoded).toEqual("welcome to the tokenizers library.");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/examples | hf_public_repos/tokenizers/bindings/node/examples/documentation/quicktour.test.ts | /* eslint-disable */
var globRequire = require;
describe("quicktourExample", () => {
function require(mod: string) {
if (mod.startsWith("tokenizers/")) {
let path = mod.slice("tokenizers/".length);
return globRequire("../../lib/" + path);
} else {
return globRequire(mod);
}
}
it.skip("trains the tokenizer", async () => {
// START init_tokenizer
let { Tokenizer } = require("tokenizers/bindings/tokenizer");
let { BPE } = require("tokenizers/bindings/models");
let tokenizer = new Tokenizer(BPE.init({}, [], { unkToken: "[UNK]" }));
// END init_tokenizer
// START init_trainer
let { bpeTrainer } = require("tokenizers/bindings/trainers");
let trainer = bpeTrainer({
specialTokens: ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
});
// END init_trainer
// START init_pretok
let { whitespacePreTokenizer } = require("tokenizers/bindings/pre-tokenizers");
tokenizer.setPreTokenizer(whitespacePreTokenizer());
// END init_pretok
// START train
let files = ["test", "train", "valid"].map(split => `data/wikitext-103-raw/wiki.${split}.raw`);
tokenizer.train(files, trainer);
// END train
// START save
tokenizer.save("data/tokenizer-wiki.json");
// END save
});
it("shows a quicktour example", async () => {
let { Tokenizer } = require("tokenizers/bindings/tokenizer");
let console = {
log: (..._args: any[]) => {}
};
// START reload_tokenizer
let tokenizer = Tokenizer.fromFile("data/tokenizer-wiki.json");
// END reload_tokenizer
// START encode
let { promisify } = require('util');
let encode = promisify(tokenizer.encode.bind(tokenizer));
var output = await encode("Hello, y'all! How are you 😁 ?");
// END encode
// START print_tokens
console.log(output.getTokens());
// ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
// END print_tokens
expect(output.getTokens()).toEqual([
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
]);
// START print_ids
console.log(output.getIds());
// [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
// END print_ids
expect(output.getIds()).toEqual([27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]);
// START print_offsets
let offsets = output.getOffsets();
console.log(offsets[9]);
// (26, 27)
// END print_offsets
expect(offsets[9]).toEqual([26, 27]);
// START use_offsets
let { slice } = require("tokenizers/bindings/utils");
let sentence = "Hello, y'all! How are you 😁 ?"
let [start, end] = offsets[9];
console.log(slice(sentence, start, end));
// "😁"
// END use_offsets
expect(slice(sentence, start, end)).toEqual("😁");
// START check_sep
console.log(tokenizer.tokenToId("[SEP]"));
// 2
// END check_sep
expect(tokenizer.tokenToId("[SEP]")).toEqual(2);
// START init_template_processing
let { templateProcessing } = require("tokenizers/bindings/post-processors");
tokenizer.setPostProcessor(templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[
["[CLS]", tokenizer.tokenToId("[CLS]")],
["[SEP]", tokenizer.tokenToId("[SEP]")],
],
));
// END init_template_processing
// START print_special_tokens
var output = await encode("Hello, y'all! How are you 😁 ?");
console.log(output.getTokens());
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END print_special_tokens
expect(output.getTokens()).toEqual([
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]);
// START print_special_tokens_pair
var output = await encode("Hello, y'all!", "How are you 😁 ?");
console.log(output.getTokens());
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END print_special_tokens_pair
expect(output.getTokens()).toEqual([
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"[SEP]",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]);
// START print_type_ids
console.log(output.getTypeIds());
// [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
// END print_type_ids
expect(output.getTypeIds()).toEqual([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]);
// START encode_batch
let encodeBatch = promisify(tokenizer.encodeBatch.bind(tokenizer));
var output = await encodeBatch(["Hello, y'all!", "How are you 😁 ?"]);
// END encode_batch
// START encode_batch_pair
var output = await encodeBatch(
[["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]]
);
// END encode_batch_pair
// START enable_padding
tokenizer.setPadding({ padId: 3, padToken: "[PAD]" });
// END enable_padding
// START print_batch_tokens
var output = await encodeBatch(["Hello, y'all!", "How are you 😁 ?"]);
console.log(output[1].getTokens());
// ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
// END print_batch_tokens
expect(output[1].getTokens()).toEqual(["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]);
// START print_attention_mask
console.log(output[1].getAttentionMask());
// [1, 1, 1, 1, 1, 1, 1, 0]
// END print_attention_mask
expect(output[1].getAttentionMask()).toEqual([1, 1, 1, 1, 1, 1, 1, 0]);
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node | hf_public_repos/tokenizers/bindings/node/lib/index.ts | // export * from "./bindings";
export * from "./implementations/tokenizers";
export * from "./bindings/enums";
export { slice } from "./bindings/utils";
export {
AddedToken,
AddedTokenOptions,
PaddingConfiguration,
PaddingOptions,
InputSequence,
EncodeInput,
EncodeOptions,
Tokenizer,
TruncationConfiguration,
TruncationOptions,
} from "./bindings/tokenizer";
export * as models from "./bindings/models";
export * as normalizers from "./bindings/normalizers";
export * as pre_tokenizers from "./bindings/pre-tokenizers";
export * as decoders from "./bindings/decoders";
export * as post_processors from "./bindings/post-processors";
export * as trainers from "./bindings/trainers";
export { Encoding } from "./implementations/encoding";
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/implementations/encoding.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { RawEncoding } from "../bindings/raw-encoding";
import { Encoding } from "./encoding";
describe("Encoding", () => {
let encoding: Encoding;
const rawEncodingMock = jest.fn<Partial<RawEncoding>, any>();
describe("ids", () => {
const getIdsMock = jest.fn(() => [3]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
}));
encoding = new Encoding(m() as RawEncoding);
it("returns the ids from the raw encoding when not called before", () => {
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([3]);
});
it("returns the ids without using the raw encoding when already called before", () => {
getIdsMock.mockReset();
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(0);
expect(ids).toEqual([3]);
});
});
describe("pad", () => {
it('reset internal "cache" properties', () => {
const getIdsMock = jest.fn(() => [4]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
pad: jest.fn(),
}));
encoding = new Encoding(m() as RawEncoding);
encoding["_ids"] = [3];
encoding.pad(10);
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([4]);
});
});
describe("truncate", () => {
it('reset internal "cache" properties', () => {
const getIdsMock = jest.fn(() => [4]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
truncate: jest.fn(),
}));
encoding = new Encoding(m() as RawEncoding);
encoding["_ids"] = [3];
encoding.truncate(10);
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([4]);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/implementations/encoding.ts | import { PaddingOptions, RawEncoding } from "../bindings/raw-encoding";
import { mergeEncodings } from "../bindings/utils";
export class Encoding {
private _attentionMask?: number[];
private _ids?: number[];
private _length?: number;
private _offsets?: [number, number][];
private _overflowing?: Encoding[];
private _specialTokensMask?: number[];
private _tokens?: string[];
private _typeIds?: number[];
private _wordIndexes?: (number | undefined)[];
private _sequenceIndexes?: (number | undefined)[];
constructor(private _rawEncoding: RawEncoding) {}
/**
* Merge a list of Encoding into one final Encoding
* @param encodings The list of encodings to merge
* @param [growingOffsets=false] Whether the offsets should accumulate while merging
*/
static merge(encodings: Encoding[], growingOffsets?: boolean): Encoding {
const mergedRaw = mergeEncodings(
encodings.map((e) => e.rawEncoding),
growingOffsets
);
return new Encoding(mergedRaw);
}
/**
* Number of sequences
*/
get nSequences(): number {
return this._rawEncoding.getNSequences();
}
setSequenceId(seqId: number): void {
return this._rawEncoding.setSequenceId(seqId);
}
/**
* Attention mask
*/
get attentionMask(): number[] {
if (this._attentionMask) {
return this._attentionMask;
}
return (this._attentionMask = this._rawEncoding.getAttentionMask());
}
/**
* Tokenized ids
*/
get ids(): number[] {
if (this._ids) {
return this._ids;
}
return (this._ids = this._rawEncoding.getIds());
}
/**
* Number of tokens
*/
get length(): number {
if (this._length !== undefined) {
return this._length;
}
return (this._length = this._rawEncoding.getLength());
}
/**
* Offsets
*/
get offsets(): [number, number][] {
if (this._offsets) {
return this._offsets;
}
return (this._offsets = this._rawEncoding.getOffsets());
}
/**
* Overflowing encodings, after truncation
*/
get overflowing(): Encoding[] {
if (this._overflowing) {
return this._overflowing;
}
return (this._overflowing = this._rawEncoding
.getOverflowing()
.map((e) => new Encoding(e)));
}
/**
* __⚠️ DANGER ZONE: do not touch unless you know what you're doing ⚠️__
* Access to the `rawEncoding` returned by the internal Rust code.
* @private
* @ignore
* @since 0.6.0
*/
get rawEncoding(): Readonly<RawEncoding> {
return this._rawEncoding;
}
/**
* Special tokens mask
*/
get specialTokensMask(): number[] {
if (this._specialTokensMask) {
return this._specialTokensMask;
}
return (this._specialTokensMask = this._rawEncoding.getSpecialTokensMask());
}
/**
* Tokenized string
*/
get tokens(): string[] {
if (this._tokens) {
return this._tokens;
}
return (this._tokens = this._rawEncoding.getTokens());
}
/**
* Type ids
*/
get typeIds(): number[] {
if (this._typeIds) {
return this._typeIds;
}
return (this._typeIds = this._rawEncoding.getTypeIds());
}
/**
* The tokenized words indexes
*/
get wordIndexes(): (number | undefined)[] {
if (this._wordIndexes) {
return this._wordIndexes;
}
return (this._wordIndexes = this._rawEncoding.getWordIds());
}
get sequenceIndexes(): (number | undefined)[] {
if (this._sequenceIndexes) {
return this._sequenceIndexes;
}
return (this._sequenceIndexes = this._rawEncoding.getSequenceIds());
}
/**
* Get the encoded tokens corresponding to the word at the given index in one of the input
* sequences, with the form [startToken, endToken+1]
* @param word The position of a word in one of the input sequences
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToTokens(word: number, seqId?: number): [number, number] | undefined {
return this._rawEncoding.wordToTokens(word, seqId);
}
/**
* Get the offsets of the word at the given index in the input sequence
* @param word The index of the word in the input sequence
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToChars(word: number, seqId?: number): [number, number] | undefined {
return this._rawEncoding.wordToChars(word, seqId);
}
/**
* Get the index of the sequence that contains the given token
* @param token The index of the token in the encoded sequence
*/
tokenToSequence(token: number): number | undefined {
return this._rawEncoding.tokenToSequence(token);
}
/**
* Get the offsets of the token at the given index
*
* The returned offsets are related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToChars(token: number): [number, number] | undefined {
return this._rawEncoding.tokenToChars(token);
}
/**
* Get the word that contains the token at the given index
*
* The returned index is related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToWord(token: number): number | undefined {
return this._rawEncoding.tokenToWord(token);
}
/**
* Find the index of the token at the position of the given char
* @param pos The position of a char in one of the input strings
* @param seqId The index of the input sequence that contains said char
* @since 0.6.0
*/
charToToken(pos: number, seqId?: number): number | undefined {
return this._rawEncoding.charToToken(pos, seqId);
}
/**
* Get the word that contains the given char
* @param pos The position of a char in the input string
* @param seqId The index of the input sequence that contains said char
* @since 0.7.0
*/
charToWord(pos: number, seqId?: number): number | undefined {
return this._rawEncoding.charToWord(pos, seqId);
}
/**
* Pad the current Encoding at the given length
*
* @param length The length at which to pad
* @param [options] Padding options
*/
pad(length: number, options?: PaddingOptions): void {
this._rawEncoding.pad(length, options);
this.resetInternalProperties();
}
/**
* Truncate the current Encoding at the given max length
*
* @param length The maximum length to be kept
* @param [stride=0] The length of the previous first sequence
* to be included in the overflowing sequence
* @param [direction='right'] Truncate direction
*/
truncate(length: number, stride?: number, direction = "right"): void {
this._rawEncoding.truncate(length, stride, direction);
this.resetInternalProperties();
}
private resetInternalProperties(): void {
for (const prop of [
"_attentionMask",
"_ids",
"_length",
"_offsets",
"_overflowing",
"_specialTokensMask",
"_tokens",
"_typeIds",
"_wordIndexes",
]) {
delete this[prop as keyof this];
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/base.tokenizer.ts | import { promisify } from "util";
import { PostProcessor } from "../../bindings/post-processors";
import {
AddedToken,
EncodeInput,
EncodeOptions,
InputSequence,
PaddingConfiguration,
PaddingOptions,
Tokenizer,
TruncationConfiguration,
TruncationOptions,
} from "../../bindings/tokenizer";
import { Encoding } from "../encoding";
export type Token = string | AddedToken;
// eslint-disable-next-line @typescript-eslint/ban-types
export class BaseTokenizer<TConfig extends object> {
private _truncation?: TruncationConfiguration;
private _padding?: PaddingConfiguration;
constructor(
protected tokenizer: Tokenizer,
/**
* @since 0.4.0
*/
readonly configuration: Readonly<TConfig>
) {}
/**
* Instantiate a new Tokenizer from the given file
* @param path Path to a file containing a Tokenizer
*/
static fromFile = Tokenizer.fromFile;
/**
* Instantiate a new Tokenizer from the given JSON string
* @param s A JSON string representation of the Tokenizer
*/
static fromString = Tokenizer.fromString;
/**
* Truncation configuration if enabled, `null` otherwise.
*
* @see {@link BaseTokenizer#setTruncation} to change truncation configuration
* @see {@link BaseTokenizer#disableTruncation} to disable truncation
* @since 0.4.0
*/
get truncation(): Readonly<TruncationConfiguration> | null {
return this._truncation ?? null;
}
/**
* Padding configuration if enabled, `null` otherwise
*
* @see {@link BaseTokenizer#setPadding} to change padding configuration
* @see {@link BaseTokenizer#disablePadding} to disable padding
* @since 0.4.0
*/
get padding(): Readonly<PaddingConfiguration> | null {
return this._padding ?? null;
}
/**
* Add the given tokens to the vocabulary
*
* @param tokens A list of tokens to add to the vocabulary.
* Each token can either be a string, or an instance of AddedToken.
*/
addTokens(tokens: Token[]): number {
return this.tokenizer.addTokens(tokens);
}
/**
* Add the given special tokens to the vocabulary, and treat them as special tokens.
* The special tokens will never be processed by the model, and will be removed while decoding.
*
* @param tokens The list of special tokens to add.
* Each token can either be a string, or an instance of AddedToken
* @returns The number of tokens that were added to the vocabulary
*/
addSpecialTokens(tokens: Token[]): number {
return this.tokenizer.addSpecialTokens(tokens);
}
/**
* Encode the given sequence
*
* @param sequence The sequence to encode
* @param [pair] The optional pair sequence
* @param [options] Some options to customize the encoding
*/
async encode(
sequence: InputSequence,
pair?: InputSequence,
options?: EncodeOptions
): Promise<Encoding> {
const encode = promisify(this.tokenizer.encode.bind(this.tokenizer));
const rawEncoding = await encode(sequence, pair ?? null, options ?? null);
return new Encoding(rawEncoding);
}
/**
* Encode the given sequences or pair of sequences
*
* @param sequences A list of sequences or pair of sequences.
* The list can contain both at the same time.
* @param [options] Sope options to customize the encoding
*/
async encodeBatch(
sequences: EncodeInput[],
options?: EncodeOptions
): Promise<Encoding[]> {
const encodeBatch = promisify(this.tokenizer.encodeBatch.bind(this.tokenizer));
const rawEncodings = await encodeBatch(sequences, options);
return rawEncodings.map((e) => new Encoding(e));
}
/**
* Decode the given list of ids to a string sequence
*
* @param ids A list of ids to be decoded
* @param [skipSpecialTokens=true] Whether to remove all the special tokens from the output string
*/
decode(ids: number[], skipSpecialTokens = true): Promise<string> {
const decode = promisify(this.tokenizer.decode.bind(this.tokenizer));
return decode(ids, skipSpecialTokens);
}
/**
* Decode the list of sequences to a list of string sequences
*
* @param sequences A list of sequences of ids to be decoded
* @param [skipSpecialTokens=true] Whether to remove all the special tokens from the output strings
*/
decodeBatch(ids: number[][], skipSpecialTokens = true): Promise<string[]> {
const decodeBatch = promisify(this.tokenizer.decodeBatch.bind(this.tokenizer));
return decodeBatch(ids, skipSpecialTokens);
}
/**
* Enable/change truncation with specified options
*
* @param maxLength The maximum length at which to truncate
* @param [options] Additional truncation options
* @returns Full truncation configuration
*/
setTruncation(
maxLength: number,
options?: TruncationOptions
): Readonly<TruncationConfiguration> {
const result = this.tokenizer.setTruncation(maxLength, options);
return (this._truncation = result);
}
/**
* Disable truncation
*/
disableTruncation(): void {
this.tokenizer.disableTruncation();
delete this._truncation;
}
/**
* Enable/change padding with specified options
* @param [options] Padding options
* @returns Full padding configuration
*/
setPadding(options?: PaddingOptions): Readonly<PaddingConfiguration> {
const result = this.tokenizer.setPadding(options);
return (this._padding = result);
}
/**
* Disable padding
*/
disablePadding(): void {
this.tokenizer.disablePadding();
delete this._padding;
}
/**
* Convert the given token id to its corresponding string
*
* @param id The token id to convert
* @returns The corresponding string if it exists
*/
idToToken(id: number): string | undefined {
return this.tokenizer.idToToken(id);
}
/**
* Convert the given token to its corresponding id
*
* @param token The token to convert
* @returns The corresponding id if it exists
*/
tokenToId(token: string): number | undefined {
return this.tokenizer.tokenToId(token);
}
/**
* Apply all the post-processing steps to the given encodings.
* The various steps are:
* 1. Truncate according to global params (@see setTruncation)
* 2. Apply the PostProcessor
* 3. Pad according to global params (@see setPadding)
* @param encoding The main Encoding to post process
* @param [pair] An optional pair Encoding
* @param [addSpecialTokens=true] Whether to add special tokens. Default to `true`.
* @since 0.6.0
*/
postProcess(encoding: Encoding, pair?: Encoding, addSpecialTokens?: boolean): Encoding {
const rawEncoding = this.tokenizer.postProcess(
encoding.rawEncoding,
pair?.rawEncoding,
addSpecialTokens
);
return new Encoding(rawEncoding);
}
/**
* Change the post-processor to use with this Tokenizer
* @param postProcessor New post-processor to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the post-processor is already used in another Tokenizer
*/
setPostProcessor(processor: PostProcessor): void {
return this.tokenizer.setPostProcessor(processor);
}
/**
* Save the Tokenizer as JSON to the given path
* @param path Path to the JSON file to write
* @param [pretty=false] Whether the JSON string should be prettified
*/
save(path: string, pretty?: boolean): void {
return this.tokenizer.save(path, pretty);
}
/**
* Get a serialized JSON version of the Tokenizer as a string
* @param [pretty=false] Whether the JSON string should be prettified
*/
toString(pretty?: boolean): string {
return this.tokenizer.toString(pretty);
}
}
/**
* Get the string content from a token, which can be a string or AddedToken
* @param token The token from which get the content
*/
export function getTokenContent(token: Token): string {
return typeof token === "string" ? token : token.getContent();
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/base.tokenizer.test.ts | import {
PaddingDirection,
TruncationDirection,
TruncationStrategy,
} from "../../bindings/enums";
import { BPE } from "../../bindings/models";
import {
PaddingConfiguration,
Tokenizer,
TruncationConfiguration,
} from "../../bindings/tokenizer";
import { BaseTokenizer } from "./base.tokenizer";
describe("BaseTokenizer", () => {
let tokenizer: BaseTokenizer<Record<string, unknown>>;
beforeEach(() => {
// Clear all instances and calls to constructor and all methods:
// TokenizerMock.mockClear();
const model = BPE.empty();
const t = new Tokenizer(model);
tokenizer = new BaseTokenizer(t, {});
});
describe("truncation", () => {
it("returns `null` if no truncation setted", () => {
expect(tokenizer.truncation).toBeNull();
});
it("returns configuration when `setTruncation` has been called", () => {
tokenizer.setTruncation(2);
const expectedConfig: TruncationConfiguration = {
maxLength: 2,
strategy: TruncationStrategy.LongestFirst,
direction: TruncationDirection.Right,
stride: 0,
};
expect(tokenizer.truncation).toEqual(expectedConfig);
});
it("returns null when `disableTruncation` has been called", () => {
tokenizer.setTruncation(2);
tokenizer.disableTruncation();
expect(tokenizer.truncation).toBeNull();
});
});
describe("padding", () => {
it("returns `null` if no padding setted", () => {
expect(tokenizer.padding).toBeNull();
});
it("returns configuration when `setPadding` has been called", () => {
tokenizer.setPadding();
const expectedConfig: PaddingConfiguration = {
direction: PaddingDirection.Right,
padId: 0,
padToken: "[PAD]",
padTypeId: 0,
};
expect(tokenizer.padding).toEqual(expectedConfig);
});
it("returns null when `disablePadding` has been called", () => {
tokenizer.setPadding();
tokenizer.disablePadding();
expect(tokenizer.padding).toBeNull();
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bert-wordpiece.tokenizer.test.ts | import { BertWordPieceOptions, BertWordPieceTokenizer } from "./bert-wordpiece.tokenizer";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("BertWordPieceTokenizer", () => {
describe("fromOptions", () => {
it("does not throw any error if no vocabFile is provided", async () => {
const tokenizer = await BertWordPieceTokenizer.fromOptions();
expect(tokenizer).toBeDefined();
});
describe("when a vocabFile is provided and `addSpecialTokens === true`", () => {
it("throws a `sepToken error` if no `sepToken` is provided", async () => {
const options: BertWordPieceOptions = {
vocabFile: MOCKS_DIR + "/bert-vocab-empty.txt",
};
await expect(BertWordPieceTokenizer.fromOptions(options)).rejects.toThrow(
"sepToken not found in the vocabulary"
);
});
it("throws a `clsToken error` if no `clsToken` is provided", async () => {
const options: BertWordPieceOptions = {
vocabFile: MOCKS_DIR + "/bert-vocab-without-cls.txt",
};
await expect(BertWordPieceTokenizer.fromOptions(options)).rejects.toThrow(
"clsToken not found in the vocabulary"
);
});
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bpe.tokenizer.ts | import { promisify } from "util";
import { bpeDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import {
lowercaseNormalizer,
nfkcNormalizer,
sequenceNormalizer,
} from "../../bindings/normalizers";
import { whitespaceSplitPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface BPETokenizerOptions {
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* @default false
*/
lowercase?: boolean;
mergesFile?: string;
/**
* @default "</w>"
*/
suffix?: string;
/**
* The unknown token to be used by the model
* @default "<unk>"
*/
unkToken?: Token;
vocabFile?: string;
}
export interface BPETokenizerTrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["<unk>"]
*/
specialTokens?: Token[];
/**
* @default "</w>"
*/
suffix?: string;
/**
* @default 30000
*/
vocabSize?: number;
}
type BPETokenizerConfig = BPETokenizerOptions &
Required<Pick<BPETokenizerOptions, "unkToken" | "suffix">>;
/**
* Original BPE Tokenizer.
* Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909)
*/
export class BPETokenizer extends BaseTokenizer<BPETokenizerConfig> {
private static readonly defaultBPEOptions: BPETokenizerConfig = {
suffix: "</w>",
unkToken: "<unk>",
};
private readonly defaultTrainOptions: Required<BPETokenizerTrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
suffix: "</w>",
vocabSize: 30000,
};
private constructor(tokenizer: Tokenizer, configuration: BPETokenizerConfig) {
super(tokenizer, configuration);
}
/**
* Instantiate and returns a new BPE tokenizer
* @param [options] Optional tokenizer options
*/
static async fromOptions(options?: BPETokenizerOptions): Promise<BPETokenizer> {
const opts = { ...this.defaultBPEOptions, ...options };
const unkToken = getTokenContent(opts.unkToken);
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const modelOptions: BPEOptions = {
dropout: opts.dropout,
endOfWordSuffix: opts.suffix,
unkToken: unkToken,
};
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, modelOptions);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (tokenizer.tokenToId(unkToken) !== undefined) {
tokenizer.addSpecialTokens([opts.unkToken]);
}
if (opts.lowercase) {
tokenizer.setNormalizer(
sequenceNormalizer([nfkcNormalizer(), lowercaseNormalizer()])
);
} else {
tokenizer.setNormalizer(nfkcNormalizer());
}
tokenizer.setPreTokenizer(whitespaceSplitPreTokenizer());
const decoder = bpeDecoder(opts.suffix);
tokenizer.setDecoder(decoder);
return new BPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: BPETokenizerTrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/sentence-piece-bpe.tokenizer.ts | import { promisify } from "util";
import { metaspaceDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import { nfkcNormalizer } from "../../bindings/normalizers";
import { metaspacePreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface SentencePieceBPETokenizerOptions extends OptionsWithDefaults {
dropout?: number;
mergesFile?: string;
vocabFile?: string;
}
interface OptionsWithDefaults {
/**
* @default true
*/
addPrefixSpace?: boolean;
/**
* @default "▁"
*/
replacement?: string;
/**
* @default "<unk>"
*/
unkToken?: Token;
}
export interface SentencePieceBPETrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["<unk>"]
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
}
type SentencePieceBPETokenizerConfig = SentencePieceBPETokenizerOptions &
Required<OptionsWithDefaults>;
/**
* Represents the BPE algorithm, with the pretokenization used by SentencePiece
*/
export class SentencePieceBPETokenizer extends BaseTokenizer<SentencePieceBPETokenizerConfig> {
private static readonly defaultOptions: SentencePieceBPETokenizerConfig = {
addPrefixSpace: true,
replacement: "▁",
unkToken: "<unk>",
};
private readonly defaultTrainOptions: Required<SentencePieceBPETrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
vocabSize: 30000,
};
private constructor(
tokenizer: Tokenizer,
configuration: SentencePieceBPETokenizerConfig
) {
super(tokenizer, configuration);
}
static async fromOptions(
options?: SentencePieceBPETokenizerOptions
): Promise<SentencePieceBPETokenizer> {
const opts = { ...this.defaultOptions, ...options };
const unkToken = getTokenContent(opts.unkToken);
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const modelOptions: BPEOptions = {
dropout: opts.dropout,
unkToken: unkToken,
};
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, modelOptions);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (tokenizer.tokenToId(unkToken) !== undefined) {
tokenizer.addSpecialTokens([opts.unkToken]);
}
tokenizer.setNormalizer(nfkcNormalizer());
const preTokenizer = metaspacePreTokenizer(opts.replacement, opts.addPrefixSpace);
tokenizer.setPreTokenizer(preTokenizer);
const decoder = metaspaceDecoder(opts.replacement, opts.addPrefixSpace);
tokenizer.setDecoder(decoder);
return new SentencePieceBPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: SentencePieceBPETrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bert-wordpiece.tokenizer.ts | import { promisify } from "util";
import { wordPieceDecoder } from "../../bindings/decoders";
import { Model, WordPiece, WordPieceOptions } from "../../bindings/models";
import { bertNormalizer } from "../../bindings/normalizers";
import { bertProcessing } from "../../bindings/post-processors";
import { bertPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { wordPieceTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface BertWordPieceOptions {
/**
* @default true
*/
cleanText?: boolean;
/**
* @default "[CLS]"
*/
clsToken?: Token;
/**
* @default true
*/
handleChineseChars?: boolean;
/**
* @default true
*/
lowercase?: boolean;
/**
* @default "[MASK]"
*/
maskToken?: Token;
/**
* @default "[PAD]"
*/
padToken?: Token;
/**
* @default "[SEP]"
*/
sepToken?: Token;
/**
* @default true
*/
stripAccents?: boolean;
/**
* @default "[UNK]"
*/
unkToken?: Token;
vocabFile?: string;
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
wordpiecesPrefix?: string;
}
export interface BertWordPieceTrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
wordpiecesPrefix?: string;
}
type BertTokenizerConfig = Required<Omit<BertWordPieceOptions, "vocabFile">> & {
vocabFile?: string;
};
/**
* Bert WordPiece Tokenizer
*/
export class BertWordPieceTokenizer extends BaseTokenizer<BertTokenizerConfig> {
private static readonly defaultBertOptions: BertTokenizerConfig = {
cleanText: true,
clsToken: "[CLS]",
handleChineseChars: true,
lowercase: true,
maskToken: "[MASK]",
padToken: "[PAD]",
sepToken: "[SEP]",
stripAccents: true,
unkToken: "[UNK]",
wordpiecesPrefix: "##",
};
private readonly defaultTrainOptions: Required<BertWordPieceTrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
vocabSize: 30000,
wordpiecesPrefix: "##",
};
private constructor(tokenizer: Tokenizer, configuration: BertTokenizerConfig) {
super(tokenizer, configuration);
}
/**
* Instantiate and returns a new Bert WordPiece tokenizer
* @param [options] Optional tokenizer options
*/
static async fromOptions(
options?: BertWordPieceOptions
): Promise<BertWordPieceTokenizer> {
const opts = { ...this.defaultBertOptions, ...options };
let model: Model;
if (opts.vocabFile) {
const fromFile = promisify<string, WordPieceOptions, Model>(WordPiece.fromFile);
model = await fromFile(opts.vocabFile, {
unkToken: getTokenContent(opts.unkToken),
continuingSubwordPrefix: opts.wordpiecesPrefix,
});
} else {
model = WordPiece.empty();
}
const tokenizer = new Tokenizer(model);
for (const token of [
opts.clsToken,
opts.sepToken,
opts.unkToken,
opts.padToken,
opts.maskToken,
]) {
if (tokenizer.tokenToId(getTokenContent(token)) !== undefined) {
tokenizer.addSpecialTokens([token]);
}
}
const normalizer = bertNormalizer(opts);
tokenizer.setNormalizer(normalizer);
tokenizer.setPreTokenizer(bertPreTokenizer());
if (opts.vocabFile) {
const sepTokenId = tokenizer.tokenToId(getTokenContent(opts.sepToken));
if (sepTokenId === undefined) {
throw new Error("sepToken not found in the vocabulary");
}
const clsTokenId = tokenizer.tokenToId(getTokenContent(opts.clsToken));
if (clsTokenId === undefined) {
throw new Error("clsToken not found in the vocabulary");
}
const processor = bertProcessing(
[getTokenContent(opts.sepToken), sepTokenId],
[getTokenContent(opts.clsToken), clsTokenId]
);
tokenizer.setPostProcessor(processor);
}
const decoder = wordPieceDecoder(opts.wordpiecesPrefix);
tokenizer.setDecoder(decoder);
return new BertWordPieceTokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: BertWordPieceTrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = wordPieceTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/byte-level-bpe.tokenizer.ts | import { promisify } from "util";
import { byteLevelDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import {
lowercaseNormalizer,
nfkcNormalizer,
sequenceNormalizer,
} from "../../bindings/normalizers";
import { byteLevelProcessing } from "../../bindings/post-processors";
import { byteLevelAlphabet, byteLevelPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, Token } from "./base.tokenizer";
export interface ByteLevelBPETokenizerOptions {
/**
* @default false
*/
addPrefixSpace?: boolean;
/**
* The prefix to attach to subword units that don't represent a beginning of word
*/
continuingSubwordPrefix?: string;
/**
* @default false
*/
lowercase?: boolean;
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* The suffix to attach to subword units that represent an end of word
*/
endOfWordSuffix?: string;
mergesFile?: string;
unicodeNormalizer?: string;
/**
* Whether to trim the whitespaces from the produced offsets
* @default false
*/
trimOffsets?: boolean;
vocabFile?: string;
}
export interface ByteLevelBPETrainOptions {
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default []
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
}
type ByteLevelBPETokenizerConfig = ByteLevelBPETokenizerOptions &
Required<Pick<ByteLevelBPETokenizerOptions, "addPrefixSpace">>;
/**
* Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
*/
export class ByteLevelBPETokenizer extends BaseTokenizer<ByteLevelBPETokenizerConfig> {
private static readonly defaultOptions: ByteLevelBPETokenizerConfig = {
addPrefixSpace: false,
trimOffsets: false,
};
private readonly defaultTrainOptions: Required<ByteLevelBPETrainOptions> = {
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
vocabSize: 30000,
};
private constructor(tokenizer: Tokenizer, configuration: ByteLevelBPETokenizerConfig) {
super(tokenizer, configuration);
}
static async fromOptions(
options?: ByteLevelBPETokenizerOptions
): Promise<ByteLevelBPETokenizer> {
const opts = { ...this.defaultOptions, ...options };
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, opts);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (opts.lowercase) {
tokenizer.setNormalizer(
sequenceNormalizer([nfkcNormalizer(), lowercaseNormalizer()])
);
} else {
tokenizer.setNormalizer(nfkcNormalizer());
}
const preTokenizer = byteLevelPreTokenizer(opts.addPrefixSpace);
tokenizer.setPreTokenizer(preTokenizer);
tokenizer.setDecoder(byteLevelDecoder());
tokenizer.setPostProcessor(byteLevelProcessing(opts.trimOffsets));
return new ByteLevelBPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: ByteLevelBPETrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer({
...mergedOptions,
initialAlphabet: byteLevelAlphabet(),
});
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/index.ts | export * from "./bert-wordpiece.tokenizer";
export * from "./bpe.tokenizer";
export * from "./byte-level-bpe.tokenizer";
export * from "./sentence-piece-bpe.tokenizer";
export { getTokenContent, BaseTokenizer, Token } from "./base.tokenizer";
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/__mocks__/bert-vocab-without-cls.txt | [SEP]
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/trainers.js | const native = require("./native");
module.exports = {
bpeTrainer: native.trainers_BPETrainer,
wordPieceTrainer: native.trainers_WordPieceTrainer,
wordLevelTrainer: native.trainers_WordLevelTrainer,
unigramTrainer: native.trainers_UnigramTrainer,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/decoders.test.ts | import {
bpeDecoder,
byteFallbackDecoder,
ctcDecoder,
fuseDecoder,
metaspaceDecoder,
replaceDecoder,
sequenceDecoder,
stripDecoder,
wordPieceDecoder,
} from "./decoders";
describe("wordPieceDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(wordPieceDecoder(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(wordPieceDecoder("test", undefined)).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(
wordPieceDecoder().decode(["Hel", "##lo", "there", "my", "fr", "##iend"])
).toEqual("Hello there my friend");
});
});
describe("byteFallbackDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(byteFallbackDecoder()).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(byteFallbackDecoder().decode(["Hel", "lo"])).toEqual("Hello");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["My", " na", "me"])).toEqual("My name");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["<0xE5>"])).toEqual("�");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>"])).toEqual("��");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "<0xab>"])).toEqual("叫");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "a"])).toEqual("��a");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "<0xab>", "a"])).toEqual(
"叫a"
);
});
});
describe("replaceDecoder", () => {
it("can decode arrays of strings", () => {
expect(replaceDecoder("_", " ").decode(["Hello", "_Hello"])).toEqual("Hello Hello");
});
});
describe("fuseDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(fuseDecoder()).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(fuseDecoder().decode(["Hel", "lo"])).toEqual("Hello");
});
});
describe("stripDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(stripDecoder("_", 0, 0)).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(stripDecoder("_", 1, 0).decode(["_Hel", "lo", "__there"])).toEqual(
"Hello_there"
);
});
});
describe("metaspaceDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(metaspaceDecoder(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(metaspaceDecoder("t", undefined)).toBeDefined();
});
});
describe("bpeDecoder", () => {
it("accepts `undefined` as parameter", () => {
expect(bpeDecoder(undefined)).toBeDefined();
});
});
describe("ctcDecoder", () => {
it("accepts `undefined` as parameter", () => {
expect(ctcDecoder(undefined)).toBeDefined();
});
it("encodes correctly", () => {
expect(
ctcDecoder().decode(["<pad>", "h", "h", "e", "e", "l", "l", "<pad>", "l", "l", "o"])
).toEqual("hello");
});
});
describe("sequenceDecoder", () => {
it("accepts `empty list` as parameter", () => {
expect(sequenceDecoder([])).toBeDefined();
});
it("encodes correctly", () => {
expect(
sequenceDecoder([ctcDecoder(), metaspaceDecoder()]).decode([
"▁",
"▁",
"H",
"H",
"i",
"i",
"▁",
"y",
"o",
"u",
])
).toEqual("Hi you");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/raw-encoding.test.ts | import { promisify } from "util";
import { PaddingDirection } from "./enums";
import { Model, WordPiece, WordPieceOptions } from "./models";
import {
punctuationPreTokenizer,
sequencePreTokenizer,
whitespacePreTokenizer,
} from "./pre-tokenizers";
import { RawEncoding } from "./raw-encoding";
import { EncodeOptions, InputSequence, Tokenizer } from "./tokenizer";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("Can modify pretokenizers on the fly", () => {
let encoding: RawEncoding;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let tokenizer: Tokenizer;
beforeAll(async () => {
const model = await promisify<string, WordPieceOptions, Model>(WordPiece.fromFile)(
`${MOCKS_DIR}/vocab.txt`,
{
continuingSubwordPrefix: "##",
}
);
tokenizer = new Tokenizer(model);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
it("Can change pre tokenizer", async () => {
const input = "my name is john.!?";
tokenizer.setPreTokenizer(sequencePreTokenizer([whitespacePreTokenizer()]));
encoding = await encode(input, null);
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8]);
// Change pre tokenizer
tokenizer.setPreTokenizer(
sequencePreTokenizer([whitespacePreTokenizer(), punctuationPreTokenizer()])
);
encoding = await encode(input, null);
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8, 8, 8]);
});
});
describe("RawEncoding", () => {
const originalString = "my name is john";
const originalPairString = "what is yours?";
let encoding: RawEncoding;
let encodingDual: RawEncoding;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
beforeAll(async () => {
const model = await promisify<string, WordPieceOptions, Model>(WordPiece.fromFile)(
`${MOCKS_DIR}/vocab.txt`,
{
continuingSubwordPrefix: "##",
}
);
const tokenizer = new Tokenizer(model);
tokenizer.setPreTokenizer(whitespacePreTokenizer());
encode = promisify(tokenizer.encode.bind(tokenizer));
});
beforeEach(async () => {
encoding = await encode(originalString, null);
encodingDual = await encode(originalString, originalPairString);
});
it("has a list of defined methods", async () => {
expect(typeof encoding.wordToTokens).toBe("function");
expect(typeof encoding.wordToChars).toBe("function");
expect(typeof encoding.tokenToChars).toBe("function");
expect(typeof encoding.tokenToWord).toBe("function");
expect(typeof encoding.charToToken).toBe("function");
expect(typeof encoding.charToWord).toBe("function");
expect(typeof encoding.getAttentionMask).toBe("function");
expect(typeof encoding.getIds).toBe("function");
expect(typeof encoding.getLength).toBe("function");
expect(typeof encoding.getOffsets).toBe("function");
expect(typeof encoding.getOverflowing).toBe("function");
expect(typeof encoding.getSpecialTokensMask).toBe("function");
expect(typeof encoding.getTokens).toBe("function");
expect(typeof encoding.getTypeIds).toBe("function");
expect(typeof encoding.getWordIds).toBe("function");
expect(typeof encoding.getSequenceIds).toBe("function");
expect(typeof encoding.pad).toBe("function");
expect(typeof encoding.truncate).toBe("function");
});
describe("truncate", () => {
it("accepts `undefined` as second parameter", () => {
expect(encoding.truncate(10, undefined)).toBeUndefined();
});
it("should throw an Error on invalid direction", () => {
const t = () => encoding.truncate(10, 3, "not_valid");
expect(t).toThrow(`Invalid truncation direction value : not_valid`);
});
});
describe("getWordIds", () => {
it("returns the correct list of indexes", () => {
const indexes = encoding.getWordIds();
expect(indexes).toEqual([0, 1, 2, 3, 3]);
});
});
describe("getSequenceIds", () => {
it("returns the correct list of indexes", () => {
expect(encoding.getSequenceIds()).toEqual([0, 0, 0, 0, 0]);
expect(encodingDual.getSequenceIds()).toEqual([0, 0, 0, 0, 0, 1, 1, 1, 1]);
});
});
describe("wordToTokens", () => {
it("returns the correct indexes", () => {
const indexes = encoding.wordToTokens(3);
expect(indexes).toEqual([3, 5]);
});
it("returns the corrent indexes with pair sequences", () => {
expect(encodingDual.wordToTokens(3, 0)).toEqual([3, 5]);
expect(encodingDual.wordToTokens(3, 1)).toEqual([8, 9]);
});
it("returns undefined when out of range word", () => {
const index = encoding.wordToTokens(100);
expect(index).toBeUndefined();
});
});
describe("wordToChars", () => {
it("returns the correct offsets", () => {
const offsets = encoding.wordToChars(3);
expect(offsets).toEqual([11, 15]);
});
it("returns the correct offsets with pair sequences", () => {
expect(encodingDual.wordToChars(3, 0)).toEqual([11, 15]);
expect(encodingDual.wordToChars(3, 1)).toEqual([13, 14]);
});
it("returns undefined when out of range word", () => {
const offsets = encoding.wordToChars(100);
expect(offsets).toBeUndefined();
});
});
describe("tokenToSequence", () => {
it("returns the correct value", () => {
expect(encodingDual.tokenToSequence(4)).toEqual(0);
expect(encodingDual.tokenToSequence(6)).toEqual(1);
});
});
describe("tokenToChars", () => {
it("returns the correct offsets", () => {
const offsets = encoding.tokenToChars(3);
expect(offsets).toEqual([11, 13]);
});
it("returns the correct offsets with pair sequences", () => {
expect(encodingDual.tokenToChars(3)).toEqual([11, 13]);
expect(encodingDual.tokenToChars(7)).toEqual([8, 13]);
});
it("returns undefined when out of range token", () => {
const offsets = encoding.tokenToChars(100);
expect(offsets).toBeUndefined();
});
});
describe("tokenToWord", () => {
it("returns the correct index", () => {
const index = encoding.tokenToWord(3);
expect(index).toEqual(3);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.tokenToWord(3)).toEqual(3);
expect(encodingDual.tokenToWord(7)).toEqual(2);
});
it("returns undefined when out of range token", () => {
const index = encoding.tokenToWord(100);
expect(index).toBeUndefined();
});
});
describe("charToToken", () => {
it("returns the correct index", () => {
const index = encoding.charToToken(3);
expect(index).toEqual(1);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.charToToken(3, 0)).toEqual(1);
expect(encodingDual.charToToken(3, 1)).toEqual(5);
});
it("returns undefined when out of range char", () => {
const index = encoding.charToToken(100);
expect(index).toBeUndefined();
});
});
describe("charToWord", () => {
it("returns the correct index", () => {
const index = encoding.charToWord(3);
expect(index).toEqual(1);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.charToWord(3, 0)).toEqual(1);
expect(encodingDual.charToWord(3, 1)).toEqual(0);
});
it("returns undefined when out of range char", () => {
const index = encoding.charToWord(100);
expect(index).toBeUndefined();
});
});
describe("pad", () => {
it("works correctly with only one parameter", () => {
encoding.pad(10);
expect(encoding.getTokens()).toHaveLength(10);
});
it("accepts `undefined` as second parameter", () => {
encoding.pad(10, undefined);
expect(encoding.getTokens()).toHaveLength(10);
});
it("accepts options as second parameter", () => {
encoding.pad(10, {
direction: PaddingDirection.Left,
padToken: "[PA]",
padTypeId: 10,
padId: 400,
});
const tokens = encoding.getTokens();
expect(tokens).toHaveLength(10);
expect(tokens[0]).toBe("[PA]");
expect(encoding.getTypeIds()[0]).toBe(10);
expect(encoding.getIds()[0]).toBe(400);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.test.ts | import {
prependNormalizer,
stripAccentsNormalizer,
stripNormalizer,
} from "./normalizers";
describe("stripNormalizer", () => {
it("instantiates with no parameters", () => {
const normalizer = stripNormalizer();
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("accepts `undefined` as first parameter", () => {
expect(stripNormalizer(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(stripNormalizer(false, undefined)).toBeDefined();
});
it("instantiates with one parameter", () => {
const normalizer = stripNormalizer(false);
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("instantiates with two parameters", () => {
const normalizer = stripNormalizer(false, true);
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("prepend instantiates with one parameter", () => {
const normalizer = prependNormalizer("_");
expect(normalizer.constructor.name).toEqual("Normalizer");
expect(normalizer.normalizeString("Hello")).toEqual("_Hello");
});
it("can normalize strings", () => {
const normalizer = stripNormalizer();
expect(normalizer.normalizeString(" Hello there ")).toEqual("Hello there");
});
});
describe("stripAccentsNormalizer", () => {
it("initialize", () => {
const normalizer = stripAccentsNormalizer();
expect(normalizer.constructor.name).toEqual("Normalizer");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import {
bertProcessing,
byteLevelProcessing,
robertaProcessing,
sequenceProcessing,
templateProcessing,
} from "./post-processors";
describe("bertProcessing", () => {
it("instantiates correctly with only two parameters", () => {
const processor = bertProcessing(["sep", 1], ["cls", 2]);
expect(processor.constructor.name).toEqual("Processor");
});
it("throws if only one argument is provided", () => {
expect(() => (bertProcessing as any)(["sep", 1])).toThrow("Argument 1 is missing");
});
it("throws if arguments are malformed", () => {
expect(() => (bertProcessing as any)(["sep", "1"], ["cls", "2"])).toThrow(
'invalid type: string "1", expected u32'
);
expect(() => (bertProcessing as any)(["sep"], ["cls"])).toThrow(
"invalid length 1, expected a tuple of size 2"
);
});
});
describe("byteLevelProcessing", () => {
it("instantiates correctly without any parameter", () => {
const processor = byteLevelProcessing();
expect(processor.constructor.name).toEqual("Processor");
});
it("accepts `undefined` as first parameter", () => {
expect(byteLevelProcessing(undefined)).toBeDefined();
});
it("accepts `boolean` as first parameter", () => {
expect(byteLevelProcessing(true)).toBeDefined();
});
});
describe("robertaProcessing", () => {
it("instantiates correctly with only two parameters", () => {
const processor = robertaProcessing(["sep", 1], ["cls", 2]);
expect(processor.constructor.name).toEqual("Processor");
});
it("accepts `undefined` as third and fourth parameters", () => {
expect(robertaProcessing(["sep", 1], ["cls", 2], undefined, undefined)).toBeDefined();
});
it("accepts `boolean` as third and fourth parameter", () => {
expect(robertaProcessing(["sep", 1], ["cls", 2], true, true)).toBeDefined();
});
});
describe("templateProcessing", () => {
it("instantiates correctly with only a single template", () => {
const processor = templateProcessing("$A $A");
expect(processor.constructor.name).toEqual("Processor");
});
it("throws if special tokens are missing", () => {
expect(() => templateProcessing("[CLS] $A [SEP]")).toThrow(
"Missing SpecialToken(s) with id(s)"
);
});
it("instantiates correctly with both templates", () => {
const processor = templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[
["[CLS]", 1],
["[SEP]", 2],
]
);
expect(processor.constructor.name).toEqual("Processor");
});
});
describe("sequenceProcessing", () => {
it("accepts `PostProcessor[]` as first parameter", () => {
const template = templateProcessing("[CLS] $A [SEP]", "[CLS] $A [SEP] $B:1 [SEP]:1", [
["[CLS]", 1],
["[SEP]", 2],
]);
const bytelevel = byteLevelProcessing(true);
expect(sequenceProcessing([bytelevel, template])).toBeDefined();
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* Normalizer will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface Normalizer {
normalizeString(s: string): string;
}
export interface BertNormalizerOptions {
/**
* Whether to clean the text, by removing any control characters
* and replacing all whitespaces by the classic one.
* @default true
*/
cleanText?: boolean;
/**
* Whether to handle chinese chars by putting spaces around them.
* @default true
*/
handleChineseChars?: boolean;
/**
* Whether to lowercase.
* @default true
*/
lowercase?: boolean;
/**
* Whether to strip all accents.
* @default undefined
*/
stripAccents?: boolean;
}
/**
* Instantiate a Bert Normalizer with the given options
*
* @param [options] Normalizer options
* @returns Bert Normalizer. Takes care of normalizing raw text before giving it to a Bert model.
* This includes cleaning the text, handling accents, chinese chars and lowercasing
*/
export function bertNormalizer(options?: BertNormalizerOptions): Normalizer;
/**
* Returns a new NFC Unicode Normalizer
*/
export function nfcNormalizer(): Normalizer;
/**
* Returns a new NFD Unicode Normalizer
*/
export function nfdNormalizer(): Normalizer;
/**
* Returns a new NFKC Unicode Normalizer
*/
export function nfkcNormalizer(): Normalizer;
/**
* Returns a new NFKD Unicode Normalizer
*/
export function nfkdNormalizer(): Normalizer;
/**
* Instantiate a new Normalization Sequence using the given normalizers
* @param normalizers A list of Normalizer to be run as a sequence
*/
export function sequenceNormalizer(normalizers: Normalizer[]): Normalizer;
/**
* Returns a new Lowercase Normalizer
*/
export function lowercaseNormalizer(): Normalizer;
/**
* Returns a new Strip Normalizer
* @param [left=true] Whether or not to strip on the left (defaults to `true`)
* @param [right=true] Whether or not to strip on the right (defaults to `true`)
*/
export function stripNormalizer(left?: boolean, right?: boolean): Normalizer;
/**
* Returns a new Prepend Normalizer
* @param [prepend] The string to prepend
*/
export function prependNormalizer(prepend: string): Normalizer;
/**
* Returns a new StripAccents Normalizer
*/
export function stripAccentsNormalizer(): Normalizer;
/**
* Returns a new Nmt Normalizer
*/
export function nmtNormalizer(): Normalizer;
/**
* Returns a new Precompiled Normalizer
*/
export function precompiledNormalizer(): Normalizer;
/**
* Returns a new Replace Normalizer
*/
export function replaceNormalizer(): Normalizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/decoders.js | const native = require("./native");
module.exports = {
byteLevelDecoder: native.decoders_ByteLevel,
replaceDecoder: native.decoders_Replace,
wordPieceDecoder: native.decoders_WordPiece,
byteFallbackDecoder: native.decoders_ByteFallback,
fuseDecoder: native.decoders_Fuse,
stripDecoder: native.decoders_Strip,
metaspaceDecoder: native.decoders_Metaspace,
bpeDecoder: native.decoders_BPEDecoder,
ctcDecoder: native.decoders_CTC,
sequenceDecoder: native.decoders_Sequence,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/no-empty-function */
import { promisify } from "util";
import { PaddingDirection, TruncationDirection, TruncationStrategy } from "./enums";
import { BPE } from "./models";
import { RawEncoding } from "./raw-encoding";
import {
AddedToken,
EncodeInput,
EncodeOptions,
InputSequence,
PaddingConfiguration,
Tokenizer,
TruncationConfiguration,
} from "./tokenizer";
// jest.mock('../bindings/tokenizer');
// jest.mock('../bindings/models', () => ({
// __esModule: true,
// Model: jest.fn()
// }));
// Or:
// jest.mock('../bindings/models', () => {
// return require('../bindings/__mocks__/models');
// });
// const TokenizerMock = mocked(Tokenizer);
describe("AddedToken", () => {
it("instantiates with only content", () => {
const addToken = new AddedToken("test", false);
expect(addToken.constructor.name).toEqual("AddedToken");
});
it("instantiates with empty options", () => {
const addToken = new AddedToken("test", false, {});
expect(addToken.constructor.name).toEqual("AddedToken");
});
it("instantiates with options", () => {
const addToken = new AddedToken("test", false, {
leftStrip: true,
rightStrip: true,
singleWord: true,
});
expect(addToken.constructor.name).toEqual("AddedToken");
});
describe("getContent", () => {
it("returns the string content of AddedToken", () => {
const addedToken = new AddedToken("test", false);
expect(addedToken.getContent()).toEqual("test");
});
});
});
describe("Tokenizer", () => {
it("has expected methods", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(typeof Tokenizer.fromFile).toBe("function");
expect(typeof Tokenizer.fromString).toBe("function");
expect(typeof Tokenizer.fromPretrained).toBe("function");
expect(typeof tokenizer.addSpecialTokens).toBe("function");
expect(typeof tokenizer.addTokens).toBe("function");
expect(typeof tokenizer.decode).toBe("function");
expect(typeof tokenizer.decodeBatch).toBe("function");
expect(typeof tokenizer.disablePadding).toBe("function");
expect(typeof tokenizer.disableTruncation).toBe("function");
expect(typeof tokenizer.encode).toBe("function");
expect(typeof tokenizer.encodeBatch).toBe("function");
expect(typeof tokenizer.getDecoder).toBe("function");
expect(typeof tokenizer.getNormalizer).toBe("function");
expect(typeof tokenizer.getPostProcessor).toBe("function");
expect(typeof tokenizer.getPreTokenizer).toBe("function");
expect(typeof tokenizer.getVocab).toBe("function");
expect(typeof tokenizer.getVocabSize).toBe("function");
expect(typeof tokenizer.idToToken).toBe("function");
expect(typeof tokenizer.runningTasks).toBe("function");
expect(typeof tokenizer.save).toBe("function");
expect(typeof tokenizer.setDecoder).toBe("function");
expect(typeof tokenizer.setModel).toBe("function");
expect(typeof tokenizer.setNormalizer).toBe("function");
expect(typeof tokenizer.setPadding).toBe("function");
expect(typeof tokenizer.setPostProcessor).toBe("function");
expect(typeof tokenizer.setPreTokenizer).toBe("function");
expect(typeof tokenizer.setTruncation).toBe("function");
expect(typeof tokenizer.tokenToId).toBe("function");
expect(typeof tokenizer.toString).toBe("function");
expect(typeof tokenizer.train).toBe("function");
});
it("can be instantiated from the hub", async () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let output: RawEncoding;
tokenizer = Tokenizer.fromPretrained("bert-base-cased");
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["Hey", "there", "dear", "friend", "!"]);
tokenizer = Tokenizer.fromPretrained("anthony/tokenizers-test");
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["hey", "there", "dear", "friend", "!"]);
tokenizer = Tokenizer.fromPretrained("anthony/tokenizers-test", {
revision: "gpt-2",
});
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]);
});
describe("addTokens", () => {
it("accepts a list of string as new tokens when initial model is empty", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const nbAdd = tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
expect(nbAdd).toBe(5);
});
it("accepts a list of AddedToken as new tokens when initial model is empty", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const addedToken = new AddedToken("test", false);
const nbAdd = tokenizer.addTokens([addedToken]);
expect(nbAdd).toBe(1);
});
});
describe("encode", () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let encodeBatch: (
inputs: EncodeInput[],
options?: EncodeOptions | null
) => Promise<RawEncoding[]>;
beforeEach(() => {
// Clear all instances and calls to constructor and all methods:
// TokenizerMock.mockClear();
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", new AddedToken("pair", false)]);
encode = promisify(tokenizer.encode.bind(tokenizer));
encodeBatch = promisify(tokenizer.encodeBatch.bind(tokenizer));
});
it("accepts a pair of strings as parameters", async () => {
const encoding = await encode("my name is john", "pair");
expect(encoding).toBeDefined();
});
it("accepts a string with a null pair", async () => {
const encoding = await encode("my name is john", null);
expect(encoding).toBeDefined();
});
it("throws if we try to encode a pre-tokenized string without isPretokenized=true", async () => {
await expect((encode as any)(["my", "name", "is", "john"], null)).rejects.toThrow(
"encode with isPreTokenized=false expect string"
);
});
it("accepts a pre-tokenized string as parameter", async () => {
const encoding = await encode(["my", "name", "is", "john"], undefined, {
isPretokenized: true,
});
expect(encoding).toBeDefined();
});
it("throws if we try to encodeBatch pre-tokenized strings without isPretokenized=true", async () => {
await expect((encodeBatch as any)([["my", "name", "is", "john"]])).rejects.toThrow(
"encodeBatch with isPretokenized=false expects input to be `EncodeInput[]` " +
"with `EncodeInput = string | [string, string]`"
);
});
it("accepts a pre-tokenized input in encodeBatch", async () => {
const encoding = await encodeBatch([["my", "name", "is", "john"]], {
isPretokenized: true,
});
expect(encoding).toBeDefined();
});
it("Encodes correctly if called with only one argument", async () => {
const encoded = await encode("my name is john");
expect(encoded.getIds()).toEqual([0, 1, 2, 3]);
});
it("returns an Encoding", async () => {
const encoding = await encode("my name is john", "pair");
expect(encoding.getAttentionMask()).toEqual([1, 1, 1, 1, 1]);
const ids = encoding.getIds();
expect(Array.isArray(ids)).toBe(true);
expect(ids).toHaveLength(5);
for (const id of ids) {
expect(typeof id).toBe("number");
}
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[11, 15],
[0, 4],
]);
expect(encoding.getOverflowing()).toEqual([]);
expect(encoding.getSpecialTokensMask()).toEqual([0, 0, 0, 0, 0]);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john", "pair"]);
expect(encoding.getTypeIds()).toEqual([0, 0, 0, 0, 1]);
});
describe("when truncation is enabled", () => {
it("truncates with default if no truncation options provided", async () => {
tokenizer.setTruncation(2);
const singleEncoding = await encode("my name is john", null);
expect(singleEncoding.getTokens()).toEqual(["my", "name"]);
const pairEncoding = await encode("my name is john", "pair");
expect(pairEncoding.getTokens()).toEqual(["my", "pair"]);
});
it("throws an error with strategy `only_second` and no pair is encoded", async () => {
tokenizer.setTruncation(2, { strategy: TruncationStrategy.OnlySecond });
await expect(encode("my name is john", null)).rejects.toThrow();
});
});
describe("when padding is enabled", () => {
it("does not pad anything with default options", async () => {
tokenizer.setPadding();
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toEqual(["my", "name"]);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toEqual(["my", "name", "pair"]);
});
it("pads to the right by default", async () => {
tokenizer.setPadding({ maxLength: 5 });
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toEqual([
"my",
"name",
"[PAD]",
"[PAD]",
"[PAD]",
]);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toEqual([
"my",
"name",
"pair",
"[PAD]",
"[PAD]",
]);
});
it("pads to multiple of the given value", async () => {
tokenizer.setPadding({ padToMultipleOf: 8 });
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toHaveLength(8);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toHaveLength(8);
});
});
});
describe("decode", () => {
let tokenizer: Tokenizer;
beforeEach(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
});
it("returns `undefined`", () => {
expect(tokenizer.decode([0, 1, 2, 3], true, () => {})).toBeUndefined();
});
it("has its callback called with the decoded string", async () => {
const decode = promisify(tokenizer.decode.bind(tokenizer));
await expect(decode([0, 1, 2, 3], true)).resolves.toEqual("my name is john");
});
});
describe("decodeBatch", () => {
let tokenizer: Tokenizer;
beforeEach(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
});
it("returns `undefined`", () => {
expect(tokenizer.decodeBatch([[0, 1, 2, 3], [4]], true, () => {})).toBeUndefined();
});
it("has its callback called with the decoded string", async () => {
const decodeBatch = promisify(tokenizer.decodeBatch.bind(tokenizer));
await expect(decodeBatch([[0, 1, 2, 3], [4]], true)).resolves.toEqual([
"my name is john",
"pair",
]);
});
});
describe("getVocab", () => {
it("accepts `undefined` as parameter", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(tokenizer.getVocab(undefined)).toBeDefined();
});
it("returns the vocabulary", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john"]);
expect(tokenizer.getVocab(true)).toEqual({
my: 0,
name: 1,
is: 2,
john: 3,
});
});
});
describe("getVocabSize", () => {
it("accepts `undefined` as parameter", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(tokenizer.getVocabSize(undefined)).toBeDefined();
});
});
describe("setTruncation", () => {
it("returns the full truncation configuration", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const truncation = tokenizer.setTruncation(2);
const expectedConfig: TruncationConfiguration = {
maxLength: 2,
strategy: TruncationStrategy.LongestFirst,
stride: 0,
direction: TruncationDirection.Right,
};
expect(truncation).toEqual(expectedConfig);
});
});
describe("setPadding", () => {
it("returns the full padding params", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const padding = tokenizer.setPadding();
const expectedConfig: PaddingConfiguration = {
direction: PaddingDirection.Right,
padId: 0,
padToken: "[PAD]",
padTypeId: 0,
};
expect(padding).toEqual(expectedConfig);
});
});
describe("postProcess", () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let firstEncoding: RawEncoding;
let secondEncoding: RawEncoding;
beforeAll(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
beforeEach(async () => {
firstEncoding = await encode("my name is john", null);
secondEncoding = await encode("pair", null);
tokenizer.setTruncation(2);
tokenizer.setPadding({ maxLength: 5 });
});
it("returns correctly with a single Encoding param", () => {
const encoding = tokenizer.postProcess(firstEncoding);
expect(encoding.getTokens()).toEqual(["my", "name", "[PAD]", "[PAD]", "[PAD]"]);
});
it("returns correctly with `undefined` as second and third parameters", () => {
const encoding = tokenizer.postProcess(firstEncoding, undefined, undefined);
expect(encoding.getTokens()).toEqual(["my", "name", "[PAD]", "[PAD]", "[PAD]"]);
});
it("returns correctly with 2 encodings", () => {
const encoding = tokenizer.postProcess(firstEncoding, secondEncoding);
expect(encoding.getTokens()).toEqual(["my", "pair", "[PAD]", "[PAD]", "[PAD]"]);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.js | const native = require("./native");
class Tokenizer extends native.tokenizer_Tokenizer {
static fromString = native.tokenizer_Tokenizer_from_string;
static fromFile = native.tokenizer_Tokenizer_from_file;
static fromPretrained = native.tokenizer_Tokenizer_from_pretrained;
}
module.exports = {
AddedToken: native.tokenizer_AddedToken,
Tokenizer,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* PreTokenizer will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface PreTokenizer {
preTokenizeString(s: string): [string, [number, number]][];
}
/**
* Instantiate a new ByteLevel PreTokenizer
*
* @param [addPrefixSpace=true] Whether to add a space to the first word if there isn't already one.
* This lets us treat `hello` exactly like `say hello`.
* @returns ByteLevel PreTokenizer.
* This pre-tokenizer takes care of replacing all bytes of the given string
* with a corresponding representation, as well as splitting into words.
*/
export function byteLevelPreTokenizer(addPrefixSpace?: boolean): PreTokenizer;
/**
* Returns the alphabet used by the ByteLevel PreTokenizer.
* Since the ByteLevel works as its name suggests, at the byte level, it
* encodes any byte to one visible character. This means that there is a
* total of 256 different characters composing this alphabet.
*/
export function byteLevelAlphabet(): string[];
/**
* Returns a Whitespace PreTokenizer
* This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
*/
export function whitespacePreTokenizer(): PreTokenizer;
/**
* Returns a WhitespaceSplit PreTokenizer
* This pre-tokenizer simply splits on whitespaces only. Works almost like the `.split(' ')`
* function, except that it accounts for multiple consecutive spaces
*/
export function whitespaceSplitPreTokenizer(): PreTokenizer;
/**
* Returns a Split PreTokenizer
* This versatile pre-tokenizer splits using the provided pattern and
* according to the provided behavior. The pattern can be inverted by
* making use of the invert flag.
*
* @param [pattern] A pattern used to split the string. Usually a string or a Regex.
* @param [behavior] The behavior to use when splitting.
* Choices: "removed", "isolated", "mergedWithPrevious", "mergedWithNext",
* "contiguous".
* @param [invert=false] Whether to invert the pattern.
*/
export function splitPreTokenizer(
pattern?: string,
behavior?: string,
invert?: boolean
): PreTokenizer;
/**
* Returns a new Bert PreTokenizer.
* This pre-tokenizer splits tokens on spaces, and also on punctuation.
* Each occurrence of a punctuation character will be treated separately.
*/
export function bertPreTokenizer(): PreTokenizer;
/**
* Returns a new Metaspace PreTokenizer.
* This pre-tokenizer replaces any whitespace by the provided replacement character.
* It then tries to split on these spaces.
*
* @param [replacement="▁"] The replacement character. Must be exactly one character.
* By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
* @param [addPrefixSpace] Whether to add a space to the first word if there isn't already one.
* This lets us treat `hello` exactly like `say hello`.
*/
export function metaspacePreTokenizer(
replacement?: string,
addPrefixSpace?: boolean
): PreTokenizer;
/**
* Returns a CharDelimiterSplit PreTokenizer
* This pre-tokenizer simply splits on the provided delimiter. Works almost like the `.split(delimiter)`
* function, except that it accounts for multiple consecutive spaces
*
* @param delimiter The delimiter character on which the sequence will be split.
*/
export function charDelimiterSplitPreTokenizer(delimiter: string): PreTokenizer;
/**
* Returns a new Punctuation PreTokenizer.
* This pre-tokenizer splits tokens on punctuation according to the provided behavior.
* Each occurrence of a punctuation character is treated separately.
*
* @param [behavior="isolated"] The behavior to use when splitting.
* Choices: "removed", "isolated", "mergedWithPrevious", "mergedWithNext",
* "contiguous"
*/
export function punctuationPreTokenizer(behavior?: string): PreTokenizer;
/**
* Returns a new Sequence PreTokenizer.
* This pre-tokenizer combines other pretokenizers and applies them.
* sequentially.
*/
export function sequencePreTokenizer(pretokenizers: PreTokenizer[]): PreTokenizer;
/**
* Returns a new Digits PreTokenizer.
* This pre-tokenizer splits on numbers. Optionnaly it can split on individual digits.
*
* @param [individualDigits=false] Whether to split on individual digits.
*/
export function digitsPreTokenizer(individualDigits?: boolean): PreTokenizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/raw-encoding.d.ts | import { PaddingDirection } from "./enums";
/**
* An Encoding as returned by the Tokenizer
*/
export interface RawEncoding {
/**
* Get the encoded tokens corresponding to the word at the given index in one of the input
* sequences, with the form [startToken, endToken+1]
* @param word The position of a word in one of the input sequences
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToTokens(word: number, seqId?: number): [number, number] | undefined;
/**
* Get the offsets of the word at the given index in the input sequence
* @param word The index of the word in the input sequence
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToChars(word: number, seqId?: number): [number, number] | undefined;
/**
* Get the index of the sequence that contains the given token
* @param token The index of the token in the encoded sequence
*/
tokenToSequence(token: number): number | undefined;
/**
* Get the offsets of the token at the given index
*
* The returned offsets are related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToChars(token: number): [number, number] | undefined;
/**
* Get the word that contains the token at the given index
*
* The returned index is related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToWord(token: number): number | undefined;
/**
* Find the index of the token at the position of the given char
* @param pos The position of a char in one of the input strings
* @param seqId The index of the input sequence that contains said char
* @since 0.6.0
*/
charToToken(pos: number, seqId?: number): number | undefined;
/**
* Get the word that contains the given char
* @param pos The position of a char in the input string
* @param seqId The index of the input sequence that contains said char
* @since 0.7.0
*/
charToWord(pos: number, seqId?: number): number | undefined;
/**
* Returns the attention mask
*/
getAttentionMask(): number[];
/**
* Returns the number of sequences
*/
getNSequences(): number;
/**
* Set the sequence id for this encoding
*/
setSequenceId(seqId: number): undefined;
/**
* Returns the tokenized ids
*/
getIds(): number[];
/**
* Returns the number of tokens
*/
getLength(): number;
/**
* Returns the offsets
*/
getOffsets(): [number, number][];
/**
* Returns the overflowing encodings, after truncation
*/
getOverflowing(): RawEncoding[];
/**
* Returns the special tokens mask
*/
getSpecialTokensMask(): number[];
/**
* Returns the tokenized string
*/
getTokens(): string[];
/**
* Returns the type ids
*/
getTypeIds(): number[];
/**
* The tokenized words indexes
* @since 0.6.0
*/
getWordIds(): (number | undefined)[];
/**
* The sequences indices
*/
getSequenceIds(): (number | undefined)[];
/**
* Pad the current Encoding at the given length
*
* @param length The length at which to pad
* @param [options] Padding options
*/
pad(length: number, options?: PaddingOptions): void;
/**
* Truncate the current Encoding at the given max_length
*
* @param length The maximum length to be kept
* @param [stride=0] The length of the previous first sequence
* to be included in the overflowing sequence
* @param [direction='right'] Truncate direction
*/
truncate(length: number, stride?: number, direction?: string): void;
}
interface PaddingOptions {
/**
* @default "right"
*/
direction?: PaddingDirection;
/**
* The index to be used when padding
* @default 0
*/
padId?: number;
/**
* The type index to be used when padding
* @default 0
*/
padTypeId?: number;
/**
* The pad token to be used when padding
* @default "[PAD]"
*/
padToken?: string;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.test.ts | /* eslint-disable @typescript-eslint/no-empty-function */
/* eslint-disable @typescript-eslint/no-explicit-any */
import { BPE, Unigram, WordPiece } from "./models";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("WordPiece", () => {
describe("fromFile", () => {
it("throws if called with only one argument", () => {
expect(() => (WordPiece as any).fromFile("test")).toThrow("not enough arguments");
});
it("throws if called with 2 arguments without a callback as third argument", () => {
expect(() => (WordPiece as any).fromFile("test", {})).toThrow(
"not enough arguments"
);
});
describe("when called with 2 correct arguments", () => {
it("returns `undefined` ", () => {
expect(WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, () => {})).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, (err, model) => {
expect(model).toBeDefined();
done();
});
});
});
});
describe("when called with 3 correct arguments", () => {
it("returns `undefined`", () => {
expect(
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {}, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {}, (err, model) => {
expect(model).toBeDefined();
done();
});
});
});
});
});
});
describe("BPE", () => {
describe("fromFile", () => {
it("throws if called with only two arguments", () => {
expect(() => (BPE as any).fromFile("test", "bis")).toThrow("not enough arguments");
});
it("throws if called with 3 arguments without a callback as last argument", () => {
expect(() => (BPE as any).fromFile("test", "bis", {})).toThrow(
"not enough arguments"
);
});
});
describe("when called with 3 correct arguments", () => {
it("returns `undefined`", () => {
expect(
BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
BPE.fromFile(
`${MOCKS_DIR}/vocab.json`,
`${MOCKS_DIR}/merges.txt`,
(err, model) => {
expect(model).toBeDefined();
done();
}
);
});
});
});
describe("when called with 4 correct arguments", () => {
it("returns `undefined`", () => {
expect(
BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, {}, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
BPE.fromFile(
`${MOCKS_DIR}/vocab.json`,
`${MOCKS_DIR}/merges.txt`,
{},
(err, model) => {
expect(model).toBeDefined();
done();
}
);
});
});
});
describe("When initialized from memory", () => {
it("returns the loaded Model", () => {
const bpe = BPE.init({ a: 0, b: 1, ab: 2 }, [["a", "b"]]);
expect(bpe.constructor.name).toEqual("Model");
});
});
});
describe("Unigram", () => {
it("can be initialized from memory", () => {
const unigram = Unigram.init(
[
["<unk>", 0],
["Hello", -1],
["there", -2],
],
{
unkId: 0,
byte_fallback: false,
}
);
expect(unigram.constructor.name).toEqual("Model");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/trainers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* Trainer will return an instance of this class when instantiated.
*/
import { AddedToken } from "./tokenizer";
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface Trainer {}
export interface TrainerOptions {
/**
* A prefix to be used for every subword that is not a beginning-of-word.
*/
continuingSubwordPrefix?: string;
/**
* A suffix to be used for every subword that is a end-of-word.
*/
endOfWordSuffix?: string;
/**
* A list of characters to include in the initial alphabet, even
* if not seen in the training dataset.
* If the strings contains more than one character, only the first one
* is kept.
* @default []
*/
initialAlphabet?: string[];
/**
* The maximum different characters to keep in the alphabet.
*/
limitAlphabet?: number;
/**
* The minimum frequency a pair should have in order to be merged.
* @default 2
*/
minFrequency?: number;
/**
* Whether to show progress bars while training.
* @default true
*/
showProgress?: boolean;
/**
* A list of special tokens the model should know of.
* @default []
*/
specialTokens?: (string | AddedToken)[];
/**
* The size of the final vocabulary, including all tokens and alphabet.
* @default 30000
*/
vocabSize?: number;
}
/**
* Instantiate a new BPE Trainer
* @param [options] BPE Trainer options
*/
export function bpeTrainer(options?: TrainerOptions): Trainer;
/**
* Instantiate a new WordPiece Trainer
* @param [options] WordPiece Trainer options
*/
export function wordPieceTrainer(options?: TrainerOptions): Trainer;
export interface WordLevelTrainerOptions {
/**
* The minimum frequency a pair should have in order to be merged.
* @default 2
*/
minFrequency?: number;
/**
* Whether to show progress bars while training.
* @default true
*/
showProgress?: boolean;
/**
* A list of special tokens the model should know of.
* @default []
*/
specialTokens?: (string | AddedToken)[];
/**
* The size of the final vocabulary, including all tokens and alphabet.
* @default 30000
*/
vocabSize?: number;
}
/**
* Instantiate a new WordLevel Trainer
* @param [options] WordLevel Trainer options
*/
export function wordLevelTrainer(options?: WordLevelTrainerOptions): Trainer;
export interface UnigramTrainerOptions {
vocabSize?: number;
nSubIterations?: number;
shrinkingFactor?: number;
specialTokens?: string[];
initialAlphabet?: string[];
unkToken?: string;
maxPieceLength?: number;
seedSize?: number;
showProgress?: boolean;
}
/**
* Instantiate a new Unigram Trainer
* @param [options] Unigram Trainer options
*/
export function unigramTrainer(options?: UnigramTrainerOptions): Trainer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.js | const native = require("./native");
module.exports = {
bertNormalizer: native.normalizers_BertNormalizer,
nfcNormalizer: native.normalizers_NFC,
nfdNormalizer: native.normalizers_NFD,
nfkcNormalizer: native.normalizers_NFKC,
nfkdNormalizer: native.normalizers_NFKD,
sequenceNormalizer: native.normalizers_Sequence,
lowercaseNormalizer: native.normalizers_Lowercase,
stripNormalizer: native.normalizers_Strip,
prependNormalizer: native.normalizers_Prepend,
stripAccentsNormalizer: native.normalizers_StripAccents,
nmtNormalizer: native.normalizers_Nmt,
precompiledNormalizer: native.normalizers_Precompiled,
replaceNormalizer: native.normalizers_Replace,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.test.ts | import { promisify } from "util";
import { BPE } from "./models";
import { RawEncoding } from "./raw-encoding";
import { EncodeOptions, InputSequence, Tokenizer } from "./tokenizer";
import { mergeEncodings, slice } from "./utils";
describe("slice", () => {
const text = "My name is John 👋";
const sliceText = slice.bind({}, text);
it("returns the full text when no params", () => {
const sliced = sliceText();
expect(sliced).toEqual(text);
});
it("accepts `undefined` as second parameter", () => {
const original = sliceText(undefined);
expect(original).toEqual(text);
});
it("accepts `undefined` as third parameter", () => {
const original = sliceText(0, undefined);
expect(original).toEqual(text);
});
it("throws an error when `begin` is out of range", () => {
expect(() => sliceText(1000)).toThrow();
});
it("returns slice starting at the specified index", () => {
const original = sliceText(3);
expect(original).toEqual("name is John 👋");
});
it("throws an error when `end` is out of range", () => {
expect(() => sliceText(0, 1000)).toThrow();
});
it("returns the text between the two specified indexes", () => {
const original = sliceText(3, 7);
expect(original).toEqual("name");
});
describe("with only a negative `begin`", () => {
it("returns the original string counting from the end when in the range", () => {
const original = sliceText(-1);
expect(original).toEqual("👋");
});
it("throws an error when out of range", () => {
expect(() => sliceText(-1000)).toThrow();
});
});
describe("with a positive `begin` and a negative `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(3, -7);
expect(original).toEqual("name is");
});
it("throws an error when resulting `end` index is lower than `begin`", () => {
expect(() => sliceText(7, -12)).toThrow();
});
it("throws an error when `begin` is out of range", () => {
expect(() => sliceText(1000, -12)).toThrow();
});
it("throws an error when resulting `end` index is out of range", () => {
expect(() => sliceText(7, -1000)).toThrow();
});
});
describe("with a negative `begin` and a positive `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(-9, 10);
expect(original).toEqual("is");
});
it("throws an error when resulting `begin` index is upper than `end`", () => {
expect(() => sliceText(-3, 5)).toThrow();
});
it("throws an error when `end` is out of range", () => {
expect(() => sliceText(-5, 1000)).toThrow();
});
it("throws an error when resulting `begin` index is out of range", () => {
expect(() => sliceText(-1000, 10)).toThrow();
});
});
describe("with negatives `begin` and `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(-9, -7);
expect(original).toEqual("is");
});
it("throws an error when resulting `end` index is lower than `begin`", () => {
expect(() => sliceText(-5, -10)).toThrow();
});
it("throws an error when resulting `begin` index is out of range", () => {
expect(() => sliceText(-1000, -10)).toThrow();
});
it("throws an error when resulting `end` index is out of range", () => {
expect(() => sliceText(-10, -1000)).toThrow();
});
});
});
describe("mergeEncodings", () => {
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
beforeAll(async () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john"]);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
it("accepts `undefined` as a second parameter", () => {
const encoding = mergeEncodings([], undefined);
expect(encoding.constructor.name).toEqual("Encoding");
});
it("returns correct result with `growingOffsets` not provided", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding]);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
]);
});
it("returns correct result when `growingOffsets` is `false`", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding], false);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
]);
});
it("returns correct result when `growingOffsets` is `true`", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding], true);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[10, 14],
]);
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of
* a PostProcessor will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface PostProcessor {}
/**
* Instantiate a new BertProcessing with the given tokens
*
* @param sep A tuple with the string representation of the SEP token, and its id
* @param cls A tuple with the string representation of the CLS token, and its id
*/
export function bertProcessing(
sep: [string, number],
cls: [string, number]
): PostProcessor;
/**
* Instantiate a new ByteLevelProcessing.
*
* @param [trimOffsets=true] Whether to trim the whitespaces from the produced offsets.
* Takes care of trimming the produced offsets to avoid whitespaces.
* By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you
* don't want the offsets to include these whitespaces, then this processing step must be used.
* @since 0.6.0
*/
export function byteLevelProcessing(trimOffsets?: boolean): PostProcessor;
/**
* Instantiate a new RobertaProcessing with the given tokens
*
* @param sep A tuple with the string representation of the SEP token, and its id
* @param cls A tuple with the string representation of the CLS token, and its id
* @param [trimOffsets=true] Whether to trim the whitespaces in the produced offsets
* @param [addPrefixSpace=true] Whether addPrefixSpace was ON during the pre-tokenization
*/
export function robertaProcessing(
sep: [string, number],
cls: [string, number],
trimOffsets?: boolean,
addPrefixSpace?: boolean
): PostProcessor;
/**
* Instantiate a new TemplateProcessing.
*
* @param single A string describing the template for a single sequence
* @param pair A string describing the template for a pair of sequences
* @param specialTokens An array with all the special tokens
*/
export function templateProcessing(
single: string,
pair?: string,
specialTokens?: [string, number][]
): PostProcessor;
/**
* Instantiate a new SequenceProcessing.
*
* @param PostProcessor[] The list of Processors to use
* @since 0.13.0
*/
export function sequenceProcessing(processors: PostProcessor[]): PostProcessor;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.js | const native = require("./native");
module.exports = {
BPE: {
init: native.models_BPE_init,
fromFile: native.models_BPE_from_file,
empty: native.models_BPE_empty,
},
WordPiece: {
init: native.models_WordPiece_init,
fromFile: native.models_WordPiece_from_file,
empty: native.models_WordPiece_empty,
},
WordLevel: {
init: native.models_WordLevel_init,
fromFile: native.models_WordLevel_from_file,
empty: native.models_WordLevel_empty,
},
Unigram: {
init: native.models_Unigram_init,
empty: native.models_Unigram_empty,
},
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.js | const native = require("./native");
module.exports = {
mergeEncodings: native.utils_mergeEncodings,
slice: native.utils_slice,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts | import {
byteLevelPreTokenizer,
metaspacePreTokenizer,
punctuationPreTokenizer,
sequencePreTokenizer,
splitPreTokenizer,
whitespaceSplitPreTokenizer,
} from "./pre-tokenizers";
describe("byteLevelPreTokenizer", () => {
it("instantiates correctly", () => {
const processor = byteLevelPreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("metaspacePreTokenizer", () => {
it("instantiates correctly without any parameter", () => {
const processor = metaspacePreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
it("accepts `undefined` as first parameter", () => {
expect(metaspacePreTokenizer(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(metaspacePreTokenizer("t", undefined)).toBeDefined();
});
it("can pre-tokenize strings", () => {
const pretok = metaspacePreTokenizer();
expect(pretok.preTokenizeString("Hello there friend")).toEqual([
["▁Hello", [0, 5]],
["▁there", [5, 11]],
["▁friend", [11, 18]],
]);
});
});
describe("punctuationPreTokenizer", () => {
it("instantiates correctly without any parameter", () => {
const processor = punctuationPreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
it("instantiates correctly with non-default split delimeter", () => {
const processor = punctuationPreTokenizer("removed");
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("splitPreTokenizer", () => {
it("instantiates correctly with invert parameter", () => {
const processor = splitPreTokenizer(" ", "mergedWithPrevious", false);
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("sequencePreTokenizer", () => {
it("instantiates correctly", () => {
const punctuation = punctuationPreTokenizer();
const whitespace = whitespaceSplitPreTokenizer();
const sequence2 = sequencePreTokenizer([]);
expect(sequence2.constructor.name).toEqual("PreTokenizer");
const sequence3 = sequencePreTokenizer([punctuation, whitespace]);
expect(sequence3.constructor.name).toEqual("PreTokenizer");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/native.js | const addon = require("../../native");
module.exports = addon;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.d.ts | import { RawEncoding } from "./raw-encoding";
/**
* Returns a subpart of a string according to specified indexes, and respecting unicode characters
*
* @param text The text for which to return a subpart
* @param [begin] The index from which to start (can be negative).
* @param [end] The index (excluded) to which to stop (can be negative).
* Stopping at the end of the string if not provided.
* @returns The full string if no start/end indexes are provided,
* otherwise the original string between `begin` (included) and `end` (excluded)
* @since 0.6.0
*/
export function slice(text: string, start?: number, end?: number): string;
/**
* Merge the list of RawEncoding into one final RawEncoding
* @param encodings The list of encodings to merge
* @param [growingOffsets=false] Whether the offsets should accumulate while merging
*/
export function mergeEncodings(
encodings: RawEncoding[],
growingOffsets?: boolean
): RawEncoding;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/decoders.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of
* a Decoder will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface Decoder {
decode(tokens: string[]): string;
}
/**
* Instantiate a new ByteLevel Decoder
*/
export function byteLevelDecoder(): Decoder;
/**
* Instantiate a new Replace Decoder
* @param [pattern] The pattern to replace
* @param [content] The replacement.
*/
export function replaceDecoder(pattern: string, content: string): Decoder;
/**
* Instantiate a new WordPiece Decoder
* @param [prefix='##'] The prefix to use for subwords that are not a beginning-of-word
* @param [cleanup=true] Whether to cleanup some tokenization artifacts.
* Mainly spaces before punctuation, and some abbreviated english forms.
*/
export function wordPieceDecoder(prefix?: string, cleanup?: boolean): Decoder;
/**
* Instantiate a new ByteFallback Decoder
* ByteFallback is a simple trick which converts tokens looking like `<0x61>`
* to pure bytes, and attempts to make them into a string. If the tokens
* cannot be decoded you will get � instead for each inconvertable byte token
*/
export function byteFallbackDecoder(): Decoder;
/**
* Instantiate a new Fuse Decoder which fuses all tokens into one string
*/
export function fuseDecoder(): Decoder;
/**
* Instantiate a new Strip Decoder
* @param [content] The character to strip
* @param [left] The number of chars to remove from the left of each token
* @param [right] The number of chars to remove from the right of each token
*/
export function stripDecoder(content: string, left: number, right: number): Decoder;
/**
* Instantiate a new Metaspace
*
* @param [replacement='▁'] The replacement character.
* Must be exactly one character. By default we use the `▁` (U+2581) meta symbol (same as in SentencePiece).
* @param [addPrefixSpace=true] Whether to add a space to the first word if there isn't already one.
* This lets us treat `hello` exactly like `say hello`.
*/
export function metaspaceDecoder(replacement?: string, addPrefixSpace?: boolean): Decoder;
/**
* Instantiate a new BPE Decoder
* @param [suffix='</w>'] The suffix that was used to characterize an end-of-word.
* This suffix will be replaced by whitespaces during the decoding
*/
export function bpeDecoder(suffix?: string): Decoder;
/**
* Instantiate a new CTC Decoder
* @param [pad_token='pad'] The pad token used by CTC to delimit a new token.
* @param [word_delimiter_token='|'] The word delimiter token. It will be replaced by a space
* @param [cleanup=true] Whether to cleanup some tokenization artifacts.
* Mainly spaces before punctuation, and some abbreviated english forms.
*/
export function ctcDecoder(
pad_token?: string,
word_delimiter_token?: string,
cleanup?: boolean
): Decoder;
/**
* Instantiate a new Sequence Decoder
* @param [decoders] The decoders to chain
*/
export function sequenceDecoder(decoders: Decoder[]): Decoder;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/enums.ts | export enum TruncationStrategy {
LongestFirst = "longest_first",
OnlyFirst = "only_first",
OnlySecond = "only_second",
}
export enum TruncationDirection {
Left = "left",
Right = "right",
}
export enum PaddingDirection {
Left = "left",
Right = "right",
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.d.ts | import { Decoder } from "./decoders";
import { PaddingDirection, TruncationDirection, TruncationStrategy } from "./enums";
import { Model } from "./models";
import { Normalizer } from "./normalizers";
import { PostProcessor } from "./post-processors";
import { PreTokenizer } from "./pre-tokenizers";
import { RawEncoding } from "./raw-encoding";
import { Trainer } from "./trainers";
export interface FromPretrainedOptions {
/**
* The revision to download
* @default "main"
*/
revision?: string;
/**
* The auth token to use to access private repositories on the Hugging Face Hub
* @default undefined
*/
authToken?: string;
}
export interface TruncationOptions {
/**
* The length of the previous sequence to be included in the overflowing sequence
* @default 0
*/
stride?: number;
/**
* Strategy to use:
* - `TruncationStrategy.LongestFirst` Iteratively reduce the inputs sequence until the input is under max_length
* starting from the longest one at each token (when there is a pair of input sequences).
* - `TruncationStrategy.OnlyFirst` Only truncate the first sequence.
* - `TruncationStrategy.OnlySecond` Only truncate the second sequence.
* @default TruncationStrategy.LongestFirst
*/
strategy?: TruncationStrategy;
/**
* Which side to truncate
* @default TruncationDirection.Left
*/
direction?: TruncationDirection;
}
export interface TruncationConfiguration extends Required<TruncationOptions> {
/**
* The maximum length at which to truncate
*/
maxLength: number;
}
export type PaddingConfiguration = Required<
Omit<PaddingOptions, "maxLength" | "padToMultipleOf">
> &
Pick<PaddingOptions, "maxLength" | "padToMultipleOf">;
export interface PaddingOptions {
/**
* @default PaddingDirection.Right
*/
direction?: PaddingDirection;
/**
* Padding length. If not provided:
* - Will default to the longest sequence when encoding in batch.
* - No padding will be applied when single encoding
*/
maxLength?: number;
/**
* If specified, the padding will snap to a multiple of the given value.
* @default undefined
*/
padToMultipleOf?: number;
/**
* The index to be used when padding
* @default 0
*/
padId?: number;
/**
* The type index to be used when padding
* @default 0
*/
padTypeId?: number;
/**
* The pad token to be used when padding
* @default "[PAD]"
*/
padToken?: string;
}
export type TextInputSequence = string;
export type PreTokenizedInputSequence = string[];
export type InputSequence = TextInputSequence | PreTokenizedInputSequence;
export type TextEncodeInput = TextInputSequence | [TextInputSequence, TextInputSequence];
export type PreTokenizedEncodeInput =
| PreTokenizedInputSequence
| [PreTokenizedInputSequence, PreTokenizedInputSequence];
export type EncodeInput = TextEncodeInput | PreTokenizedEncodeInput;
export interface EncodeOptions {
/**
* Whether the given sequence is pre-tokenized
* @default false
*/
isPretokenized?: boolean;
/**
* Whether we should add special tokens
* @default true
*/
addSpecialTokens?: boolean;
}
/**
* A Tokenizer works as a pipeline, it processes some raw text as input and outputs
* an `Encoding`.
* The various steps of the pipeline are:
* 1. The `Normalizer`: in charge of normalizing the text. Common examples of
* normalization are the unicode normalization standards, such as NFD or NFKC.
* 2. The `PreTokenizer`: in charge of creating initial words splits in the text.
* The most common way of splitting text is simply on whitespace.
* 3. The `Model`: in charge of doing the actual tokenization. An example of a
* `Model` would be `BPE` or `WordPiece`.
* 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything
* relevant that, for example, a language model would need, such as special tokens.
*/
export class Tokenizer {
/**
* Instantiate a new Tokenizer using the given Model
*/
constructor(model: Model);
/**
* Instantiate a new Tokenizer from the given file
* @param path Path to a file containing a Tokenizer
*/
static fromFile(path: string): Tokenizer;
/**
* Instantiate a new Tokenizer from the given JSON string
* @param s A JSON string representation of the Tokenizer
*/
static fromString(s: string): Tokenizer;
/**
* Instantiate a new Tokenizer from an existing file on the
* Hugging Face Hub. Any model repo containing a `tokenizer.json`
* can be used here.
* @param identifier A model identifier on the Hub
* @param options Additional options
*/
static fromPretrained(s: string, options?: FromPretrainedOptions): Tokenizer;
/**
* Add the given tokens to the vocabulary
*
* @param tokens A list of tokens to add to the vocabulary.
* Each token can either be a string, or an instance of {@link AddedToken}.
* @returns The number of tokens that were added to the vocabulary
*/
addTokens(tokens: (string | AddedToken)[]): number;
/**
* Add the given special tokens to the vocabulary, and treat them as special tokens.
* The special tokens will never be processed by the model, and will be removed while decoding.
*
* @param tokens The list of special tokens to add.
* Each token can either be a string or an instance of {@link AddedToken}.
* @returns The number of tokens that were added to the vocabulary
*/
addSpecialTokens(tokens: (string | AddedToken)[]): number;
/**
* Encode the given sequence
*
* @param sequence The sequence to encode
* @param pair The optional pair sequence
* @param addSpecialTokens Whether to add the special tokens while encoding
* @param __callback Callback called when encoding is complete
*/
encode(
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null, // |(err: Error, encoding: RawEncoding) => void,
__callback?: (err: Error, encoding: RawEncoding) => void
): void;
/**
* Encode the given sequences or pair of sequences
*
* @param sequences A list of sequences or pair of sequences. The list can contain both at the same time.
* @param addSpecialTokens Whether to add the special tokens while encoding
* @param __callback Callback called when encoding is complete
*/
encodeBatch(
inputs: EncodeInput[],
options?: EncodeOptions | null, // (err: Error, encodings: RawEncoding[]) => void,
__callback?: (err: Error, encodings: RawEncoding[]) => void
): void;
/**
* Decode the given list of ids to a string sequence
*
* @param ids A list of ids to be decoded
* @param skipSpecialTokens Whether to remove all the special tokens from the output string
* @param __callback Callback called with decoded string
*/
decode(
ids: number[],
skipSpecialTokens: boolean,
__callback: (err: Error, encodings: string) => void
): void;
/**
* Decode the list of sequences to a list of string sequences
*
* @param sequences A list of sequence of ids to be decoded
* @param skipSpecialTokens Whether to remove all the special tokens from the output strings
* @param __callback Callback called with decoded strings
*/
decodeBatch(
sequences: number[][],
skipSpecialTokens: boolean,
__callback: (err: Error, encodings: string[]) => void
): void[];
/**
* Convert the given token id to its corresponding string
*
* @param id The token id to convert
* @returns The corresponding string if it exists
*/
idToToken(id: number): string | undefined;
/**
* Convert the given token to its corresponding id
*
* @param token The token to convert
* @returns The corresponding id if it exists
*/
tokenToId(token: string): number | undefined;
/**
* Enable/change padding with specified options
* @param [options] Padding options
*/
setPadding(options?: PaddingOptions): PaddingConfiguration;
/**
* Disable padding
*/
disablePadding(): void;
/**
* Enable/change truncation with specified options
*
* @param maxLength The maximum length at which to truncate
* @param [options] Additional truncation options
*/
setTruncation(maxLength: number, options?: TruncationOptions): TruncationConfiguration;
/**
* Disable truncation
*/
disableTruncation(): void;
/**
* Train the model using the given files
*
* @param trainer Trainer to use
* @param files List of files to use
*/
train(trainer: Trainer, files: string[]): void;
/**
* Returns the vocabulary
*
* @param [withAddedTokens=true] Whether to include the added tokens in the vocabulary
*/
getVocab(withAddedTokens?: boolean): { [token: string]: number };
/**
* Returns the size of the vocabulary
*
* @param [withAddedTokens=true] Whether to include the added tokens in the vocabulary's size
*/
getVocabSize(withAddedTokens?: boolean): number;
/**
* Returns the number of encoding tasks running currently
*/
runningTasks(): number;
/**
* Returns the model in use
*/
getModel(): Model;
/**
* Change the model to use with this Tokenizer
* @param model New model to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the model is already used in another Tokenizer
*/
setModel(model: Model): void;
/**
* Returns the normalizer in use
*/
getNormalizer(): Normalizer | undefined;
/**
* Change the normalizer to use with this Tokenizer
* @param normalizer New normalizer to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the normalizer is already used in another Tokenizer
*/
setNormalizer(normalizer: Normalizer): void;
/**
* Returns the pre-tokenizer in use
*/
getPreTokenizer(): PreTokenizer | undefined;
/**
* Change the pre-tokenizer to use with this Tokenizer
* @param preTokenizer New pre-tokenizer to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the pre-tokenizer is already used in another Tokenizer
*/
setPreTokenizer(preTokenizer: PreTokenizer): void;
/**
* Returns the post-processor in use
*/
getPostProcessor(): PostProcessor | undefined;
/**
* Change the post-processor to use with this Tokenizer
* @param postProcessor New post-processor to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the post-processor is already used in another Tokenizer
*/
setPostProcessor(processor: PostProcessor): void;
/**
* Returns the decoder in use
*/
getDecoder(): Decoder | undefined;
/**
* Change the decoder to use with this Tokenizer
* @param decoder New decoder to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the decoder is already used in another Tokenizer
*/
setDecoder(decoder: Decoder): void;
/**
* Apply all the post-processing steps to the given encodings.
* The various steps are:
* 1. Truncate according to global params (@see setTruncation)
* 2. Apply the PostProcessor
* 3. Pad according to global params (@see setPadding)
* @param encoding The main Encoding to post process
* @param [pair] An optional pair Encoding
* @param [addSpecialTokens=true] Whether to add special tokens. Default to `true`.
* @since 0.6.0
*/
postProcess(
encoding: RawEncoding,
pair?: RawEncoding,
addSpecialTokens?: boolean
): RawEncoding;
/**
* Save the Tokenizer as JSON to the given path
* @param path Path to the JSON file to write
* @param [pretty=false] Whether the JSON string should be prettified
*/
save(path: string, pretty?: boolean): void;
/**
* Get a serialized JSON version of the Tokenizer as a string
* @param [pretty=false] Whether the JSON string should be prettified
*/
toString(pretty?: boolean): string;
}
/**
* Options used to construct an AddedToken
* @since 0.6.0
*/
export interface AddedTokenOptions {
/**
* Whether this token should strip all potential whitespaces on the left side.
* If True, this token will greedily match any whitespace on the left and then strip
* them out.
* @default False
*/
leftStrip?: boolean;
/**
* Whether this token should strip all potential whitespaces on the right side.
* If True, this token will greedily match any whitespace on the right and then strip
* them out.
* @default False
*/
rightStrip?: boolean;
/**
* Whether this token should only match against single word.
* If True, this token will never match inside of a word.
* @default False
*/
singleWord?: boolean;
/**
* Whether this token should match on the normalized version of the text. For example
* with the added token `yesterday` and a normalizer in charge of lowercasing the text,
* the input `I saw a lion Yesterday` would match the token.
* This is False for special tokens by default, true otherwise
* @default True
*/
normalized?: boolean;
}
/**
* AddedToken represents a token to be added to a Tokenizer.
* An AddedToken can have special options defining the way it should behave.
*
* @since 0.6.0
*/
export class AddedToken {
/**
* Instantiate a new AddedToken
* @param content The content of the token
* @param special Whether this is a special token
* @param [options] Options for the token
*/
constructor(content: string, special: boolean, options?: AddedTokenOptions);
/**
* Get the content of the AddedToken
*/
getContent(): string;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.js | const native = require("./native");
module.exports = {
byteLevelPreTokenizer: native.pre_tokenizers_ByteLevel,
byteLevelAlphabet: native.pre_tokenizers_ByteLevel_Alphabet,
whitespacePreTokenizer: native.pre_tokenizers_Whitespace,
whitespaceSplitPreTokenizer: native.pre_tokenizers_WhitespaceSplit,
bertPreTokenizer: native.pre_tokenizers_BertPreTokenizer,
metaspacePreTokenizer: native.pre_tokenizers_Metaspace,
charDelimiterSplitPreTokenizer: native.pre_tokenizers_CharDelimiterSplit,
punctuationPreTokenizer: native.pre_tokenizers_Punctuation,
sequencePreTokenizer: native.pre_tokenizers_Sequence,
digitsPreTokenizer: native.pre_tokenizers_Digits,
splitPreTokenizer: native.pre_tokenizers_Split,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/native.prod.js | const native = require("../bin-package");
module.exports = native;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.js | const native = require("./native");
module.exports = {
bertProcessing: native.processors_BertProcessing,
byteLevelProcessing: native.processors_ByteLevel,
robertaProcessing: native.processors_RobertaProcessing,
templateProcessing: native.processors_TemplateProcessing,
sequenceProcessing: native.processors_Sequence,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of
* a Model will return a instance of this class when instantiated.
*/
interface Model {
/**
* Save the current model in the given folder, using the given name
* for the various files that will get created.
* Any file with the same name that already exist in this folder will be overwritten.
*
* @param folder Name of the destination folder
* @param name Prefix to use in the name of created files
*/
save(folder: string, name?: string): string[];
}
type ModelCallback = (err: Error, model: Model) => void;
export interface BPEOptions {
/**
* The number of words that the BPE cache can contain. The cache allows
* to speed-up the process by keeping the result of the merge operations
* for a number of words.
* @default 10_000
*/
cacheCapacity?: number;
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* The unknown token to be used by the model
*/
unkToken?: string;
/**
* The prefix to attach to subword units that don't represent a beginning of word
*/
continuingSubwordPrefix?: string;
/**
* The suffix to attach to subword units that represent an end of word
*/
endOfWordSuffix?: string;
}
export namespace BPE {
/**
* Instantiate a BPE model from the given vocab and merges
*
* @param vocab A dict mapping strings to number, representing the vocab
* @param merges An array of tuples of strings, representing two tokens to be merged
* @param options BPE model options
*/
export function init(
vocab: { [token: string]: number },
merges: [string, string][],
options?: BPEOptions
): Model;
/**
* Instantiate a BPE model from the given vocab and merges files
*
* @param vocab Path to a vocabulary JSON file
* @param merges Path to a merge file
* @param options BPE model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
merges: string,
optionsOrCallback?: BPEOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty BPE Model
*/
export function empty(): Model;
}
export interface WordPieceOptions {
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
continuingSubwordPrefix?: string;
/**
* The maximum number of characters to authorize in a single word.
* @default 100
*/
maxInputCharsPerWord?: number;
/**
* The unknown token to be used by the model.
* @default "[UNK]"
*/
unkToken?: string;
}
export namespace WordPiece {
/**
* Instantiate a WordPiece model from the given vocab
*
* @param vocab A dict mapping strings to numbers, representing the vocab
* @param options WordPiece model options
*/
export function init(
vocab: { [token: string]: number },
options?: WordPieceOptions
): Model;
/**
* Instantiate a WordPiece model from the given vocab file
*
* @param vocab Path to a vocabulary file
* @param options WordPiece model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
optionsOrCallback?: WordPieceOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty WordPiece model
*/
export function empty(): Model;
}
export interface WordLevelOptions {
/**
* The unknown token to be used by the model.
* @default "[UNK]"
*/
unkToken?: string;
}
export namespace WordLevel {
/**
* Instantiate a WordLevel model from the given vocab
*
* @param vocab A dict mapping strings to numbers, representing the vocab
* @param options WordLevel model options
*/
export function init(
vocab: { [token: string]: number },
options?: WordLevelOptions
): Model;
/**
* Instantiate a WordLevel model from the given vocab file
*
* @param vocab Path to a vocabulary file
* @param options WordLevel model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
optionsOrCallback?: WordLevelOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty WordLevel model
*/
export function empty(): Model;
}
export interface UnigramOptions {
/**
* The unknown token id to be used by the model.
* @default undefined
*/
unkId?: number;
/**
* Whether or not bytefallback support should be enabled.
* @default false
*/
byte_fallback?: boolean;
}
export namespace Unigram {
/**
* Instantiate a Unigram model from the given vocab
*
* @param vocab An array of token and id tuples
* @param optiosn Unigram model options
*/
export function init(vocab: [string, number][], options?: UnigramOptions): Model;
/**
* Instantiate an empty Unigram model
*/
export function empty(): Model;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/bindings | hf_public_repos/tokenizers/bindings/node/lib/bindings/__mocks__/vocab.txt | my
name
is
jo
##hn
what
yours
pair
[UNK]
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/bindings | hf_public_repos/tokenizers/bindings/node/lib/bindings/__mocks__/vocab.json | {} | 0 |
hf_public_repos/tokenizers/bindings/node | hf_public_repos/tokenizers/bindings/node/native/Cargo.toml | [package]
name = "node"
version = "0.13.3"
authors = ["Anthony MOI <[email protected]>"]
license = "Apache-2.0"
build = "build.rs"
exclude = ["artifacts.json", "index.node"]
[lib]
name = "node"
crate-type = ["cdylib"]
[build-dependencies]
neon-build = "0.3.3"
[dependencies]
neon = "0.3"
neon-runtime = "0.3"
neon-serde = "0.3"
serde = { version = "1.0", features = [ "rc", "derive" ] }
tokenizers = { path = "../../../tokenizers" }
serde_json = "1.0"
| 0 |
hf_public_repos/tokenizers/bindings/node | hf_public_repos/tokenizers/bindings/node/native/build.rs | extern crate neon_build;
fn main() {
neon_build::setup(); // must be called in build.rs
// add project-specific build logic here...
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/decoders.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use tk::decoders::DecoderWrapper;
/// Decoder
#[derive(Clone, Serialize, Deserialize)]
pub struct Decoder {
#[serde(flatten)]
pub decoder: Option<Arc<DecoderWrapper>>,
}
impl tk::Decoder for Decoder {
fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> {
self.decoder
.as_ref()
.ok_or("Uninitialized Decoder")?
.decode_chain(tokens)
}
}
declare_types! {
pub class JsDecoder for Decoder {
init(_) {
// This should not be called from JS
Ok(Decoder { decoder: None })
}
method decode(mut cx) {
use tk::Decoder;
let tokens = cx.extract_vec::<String>(0)?;
let this = cx.this();
let guard = cx.lock();
let output = this.borrow(&guard)
.decoder.as_ref().unwrap()
.decode(tokens)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(output).upcast())
}
}
}
/// byte_level()
fn byte_level(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::byte_level::ByteLevel::default().into(),
));
Ok(decoder)
}
/// replace()
fn replace(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let pattern: String = cx.extract::<String>(0)?;
let content: String = cx.extract::<String>(1)?;
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error(e.to_string()))?
.into(),
));
Ok(decoder)
}
/// wordpiece(prefix: String = "##", cleanup: bool)
fn wordpiece(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let prefix = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("##"));
let cleanup = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::wordpiece::WordPiece::new(prefix, cleanup).into(),
));
Ok(decoder)
}
/// byte_fallback()
fn byte_fallback(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::byte_fallback::ByteFallback::new().into(),
));
Ok(decoder)
}
/// fuse()
fn fuse(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(tk::decoders::fuse::Fuse::new().into()));
Ok(decoder)
}
/// strip(content: char, left: usize, right: usize)
fn strip(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let content: char = cx.extract(0)?;
let left: usize = cx.extract(1)?;
let right: usize = cx.extract(2)?;
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::strip::Strip::new(content, left, right).into(),
));
Ok(decoder)
}
/// metaspace(replacement: String = "_", add_prefix_space: bool = true)
fn metaspace(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let replacement = cx.extract_opt::<char>(0)?.unwrap_or('▁');
let add_prefix_space = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::metaspace::Metaspace::new(replacement, add_prefix_space).into(),
));
Ok(decoder)
}
/// bpe_decoder(suffix: String = "</w>")
fn bpe_decoder(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let suffix = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("</w>"));
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder =
Some(Arc::new(tk::decoders::bpe::BPEDecoder::new(suffix).into()));
Ok(decoder)
}
/// ctc_decoder(pad_token: String = "<pad>", word_delimiter_token: String = "|", cleanup = true)
fn ctc_decoder(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let pad_token = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("<pad>"));
let word_delimiter_token = cx
.extract_opt::<String>(1)?
.unwrap_or_else(|| String::from("|"));
let cleanup = cx.extract_opt::<bool>(2)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::ctc::CTC::new(pad_token, word_delimiter_token, cleanup).into(),
));
Ok(decoder)
}
/// sequence()
fn sequence(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let decoders = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(decoders.len());
decoders.into_iter().try_for_each(|decoder| {
match decoder.downcast::<JsDecoder>().or_throw(&mut cx) {
Ok(decoder) => {
let guard = cx.lock();
if let Some(decoder_arc) = &decoder.borrow(&guard).decoder {
let decoder: DecoderWrapper = (**decoder_arc).clone();
sequence.push(decoder);
}
Ok(())
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).decoder = Some(Arc::new(tk::DecoderWrapper::Sequence(
tk::decoders::sequence::Sequence::new(sequence),
)));
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_ByteLevel", prefix), byte_level)?;
m.export_function(&format!("{}_Replace", prefix), replace)?;
m.export_function(&format!("{}_WordPiece", prefix), wordpiece)?;
m.export_function(&format!("{}_ByteFallback", prefix), byte_fallback)?;
m.export_function(&format!("{}_Fuse", prefix), fuse)?;
m.export_function(&format!("{}_Strip", prefix), strip)?;
m.export_function(&format!("{}_Metaspace", prefix), metaspace)?;
m.export_function(&format!("{}_BPEDecoder", prefix), bpe_decoder)?;
m.export_function(&format!("{}_CTC", prefix), ctc_decoder)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/processors.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use tk::processors::PostProcessorWrapper;
use tk::Encoding;
/// Processor
#[derive(Clone, Serialize, Deserialize)]
pub struct Processor {
#[serde(flatten)]
pub processor: Option<Arc<PostProcessorWrapper>>,
}
impl tk::PostProcessor for Processor {
fn added_tokens(&self, is_pair: bool) -> usize {
self.processor
.as_ref()
.expect("Uninitialized PostProcessor")
.added_tokens(is_pair)
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
self.processor
.as_ref()
.ok_or("Uninitialized PostProcessor")?
.process_encodings(encodings, add_special_tokens)
}
}
declare_types! {
pub class JsPostProcessor for Processor {
init(_) {
// This should not be called from JS
Ok(Processor { processor: None })
}
}
}
/// bert_processing(sep: [String, number], cls: [String, number])
fn bert_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let sep = cx.extract::<(String, u32)>(0)?;
let cls = cx.extract::<(String, u32)>(1)?;
let mut processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
processor.borrow_mut(&guard).processor = Some(Arc::new(
tk::processors::bert::BertProcessing::new(sep, cls).into(),
));
Ok(processor)
}
/// roberta_processing(
/// sep: [String, number],
/// cls: [String, number],
/// trimOffsets: boolean = true,
/// addPrefixSpace: boolean = true
/// )
fn roberta_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let sep = cx.extract::<(String, u32)>(0)?;
let cls = cx.extract::<(String, u32)>(1)?;
let mut processor = tk::processors::roberta::RobertaProcessing::new(sep, cls);
if let Some(trim_offsets) = cx.extract_opt::<bool>(2)? {
processor = processor.trim_offsets(trim_offsets);
}
if let Some(add_prefix_space) = cx.extract_opt::<bool>(3)? {
processor = processor.add_prefix_space(add_prefix_space);
}
let mut js_processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = Some(Arc::new(processor.into()));
Ok(js_processor)
}
/// bytelevel(trimOffsets?: boolean)
fn bytelevel(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let mut byte_level = tk::processors::byte_level::ByteLevel::default();
if let Some(trim_offsets) = cx.extract_opt::<bool>(0)? {
byte_level = byte_level.trim_offsets(trim_offsets);
}
let mut processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
processor.borrow_mut(&guard).processor = Some(Arc::new(byte_level.into()));
Ok(processor)
}
/// template_processing(
/// single: String,
/// pair?: String,
/// special_tokens?: [String, number][] = [],
/// )
fn template_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let mut i = 1;
let special_tokens = loop {
if let Ok(Some(spe)) = cx.extract_opt::<Vec<(String, u32)>>(i) {
break spe;
}
i += 1;
if i == 3 {
break vec![];
}
};
let single = cx.extract::<String>(0)?;
let pair = cx.extract_opt::<String>(1)?;
let mut builder = tk::processors::template::TemplateProcessing::builder();
builder.try_single(single).map_err(Error)?;
builder.special_tokens(special_tokens);
if let Some(pair) = pair {
builder.try_pair(pair).map_err(Error)?;
}
let processor = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = Some(Arc::new(processor.into()));
Ok(js_processor)
}
/// sequence(processors: List[Processor])
fn sequence(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let processors = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(processors.len());
processors.into_iter().try_for_each(|processor| {
match processor.downcast::<JsPostProcessor>().or_throw(&mut cx) {
Ok(processor) => {
let guard = cx.lock();
if let Some(processor_arc) = &processor.borrow(&guard).processor {
let processor: PostProcessorWrapper = (**processor_arc).clone();
sequence.push(processor);
}
Ok(())
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).processor = Some(Arc::new(PostProcessorWrapper::Sequence(
tk::processors::sequence::Sequence::new(sequence),
)));
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BertProcessing", prefix), bert_processing)?;
m.export_function(&format!("{}_RobertaProcessing", prefix), roberta_processing)?;
m.export_function(&format!("{}_ByteLevel", prefix), bytelevel)?;
m.export_function(
&format!("{}_TemplateProcessing", prefix),
template_processing,
)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/encoding.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::tokenizer::PaddingParams;
use neon::prelude::*;
use tk::utils::truncation::TruncationDirection;
/// Encoding
pub struct Encoding {
pub encoding: Option<tk::tokenizer::Encoding>,
}
declare_types! {
pub class JsEncoding for Encoding {
init(_) {
// This should never be called from JavaScript
Ok(Encoding { encoding: None })
}
method getLength(mut cx) {
let this = cx.this();
let guard = cx.lock();
let length = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_ids()
.len();
Ok(cx.number(length as f64).upcast())
}
method getNSequences(mut cx) {
let this = cx.this();
let guard = cx.lock();
let n = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.n_sequences();
Ok(cx.number(n as f64).upcast())
}
method setSequenceId(mut cx) {
let seq_id = cx.extract::<usize>(0)?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.set_sequence_id(seq_id);
Ok(cx.undefined().upcast())
}
method getIds(mut cx) {
// getIds(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getTypeIds(mut cx) {
// getTypeIds(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_type_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getAttentionMask(mut cx) {
// getAttentionMask(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_attention_mask()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getSpecialTokensMask(mut cx) {
// getSpecialTokensMask(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_special_tokens_mask()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getTokens(mut cx) {
// getTokens(): string[]
let this = cx.this();
let guard = cx.lock();
let tokens = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_tokens()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &tokens)?)
}
method getWordIds(mut cx) {
// getWordIds(): (number | undefined)[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_word_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getSequenceIds(mut cx) {
// getSequenceIds(): (number | undefined)[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_sequence_ids();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getOffsets(mut cx) {
// getOffsets(): [number, number][]
let this = cx.this();
let guard = cx.lock();
let offsets = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_offsets()
.to_vec();
let js_offsets = neon_serde::to_value(&mut cx, &offsets)?;
Ok(js_offsets)
}
method getOverflowing(mut cx) {
// getOverflowing(): Encoding[]
let this = cx.this();
let guard = cx.lock();
let overflowings = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_overflowing()
.clone();
let js_overflowings = JsArray::new(&mut cx, overflowings.len() as u32);
for (index, overflowing) in overflowings.iter().enumerate() {
let mut js_overflowing = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the content
let guard = cx.lock();
js_overflowing.borrow_mut(&guard).encoding = Some(overflowing.clone());
js_overflowings.set(&mut cx, index as u32, js_overflowing)?;
}
Ok(js_overflowings.upcast())
}
method wordToTokens(mut cx) {
// wordToTokens(word: number, seqId: number = 0): [number, number] | undefined
let word = cx.extract::<u32>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.word_to_tokens(word, seq_id);
if let Some(tokens) = res {
Ok(neon_serde::to_value(&mut cx, &tokens)?)
} else {
Ok(cx.undefined().upcast())
}
}
method wordToChars(mut cx) {
// wordToChars(word: number, seqId: number = 0): [number, number] | undefined
let word = cx.extract::<u32>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.word_to_chars(word, seq_id);
if let Some(offsets) = res {
Ok(neon_serde::to_value(&mut cx, &offsets)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToSequence(mut cx) {
// tokenToSequence(token: number): number | undefined
let token = cx.extract::<usize>(0)?;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_sequence(token);
if let Some(seq) = res {
Ok(neon_serde::to_value(&mut cx, &seq)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToChars(mut cx) {
// tokenToChars(token: number): [number, number] [number, [number, number]] | undefined
let token = cx.extract::<usize>(0)?;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_chars(token);
if let Some((_, offsets)) = res {
Ok(neon_serde::to_value(&mut cx, &offsets)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToWord(mut cx) {
// tokenToWord(token: number): number | [number, number] | undefined
let token = cx.argument::<JsNumber>(0)?.value() as usize;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_word(token);
if let Some((_, index)) = res {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method charToToken(mut cx) {
// charToToken(pos: number, seqId: number = 0): number | undefined
let pos = cx.extract::<usize>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let index = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.char_to_token(pos, seq_id);
if let Some(index) = index {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method charToWord(mut cx) {
// charToWord(pos: number, seqId: number = 0): number | undefined
let pos = cx.extract::<usize>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let index = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.char_to_word(pos, seq_id);
if let Some(index) = index {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method pad(mut cx) {
// pad(length: number, options?: {
// direction?: 'left' | 'right' = 'right',
// padId?: number = 0,
// padTypeId?: number = 0,
// padToken?: string = "[PAD]"
// }
let length = cx.extract::<usize>(0)?;
let params = cx.extract_opt::<PaddingParams>(1)?
.map_or_else(tk::PaddingParams::default, |p| p.0);
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.pad(
length,
params.pad_id,
params.pad_type_id,
¶ms.pad_token,
params.direction
);
Ok(cx.undefined().upcast())
}
method truncate(mut cx) {
// truncate(length: number, stride: number = 0, direction: string = 'right')
let length = cx.extract::<usize>(0)?;
let stride = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let direction = cx.extract_opt::<String>(2)?.unwrap_or_else(|| String::from("right"));
let tdir = match direction.as_str() {
"left" => Ok(TruncationDirection::Left),
"right" => Ok(TruncationDirection::Right),
_ => cx.throw_error(format!("Invalid truncation direction value : {}", direction)),
}?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.truncate(length, stride, tdir);
Ok(cx.undefined().upcast())
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/pre_tokenizers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use serde::{ser::SerializeStruct, Serialize, Serializer};
use tk::normalizer::SplitDelimiterBehavior;
use tk::pre_tokenizers::PreTokenizerWrapper;
use tk::PreTokenizedString;
#[derive(Clone)]
struct JsSplitDelimiterBehavior(SplitDelimiterBehavior);
impl FromJsValue for JsSplitDelimiterBehavior {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, _cx: &mut C) -> LibResult<Self> {
let s = from.downcast::<JsString>()?.value();
Ok(Self(match s.as_ref() {
"removed" => Ok(SplitDelimiterBehavior::Removed),
"isolated" => Ok(SplitDelimiterBehavior::Isolated),
"mergedWithPrevious" => Ok(SplitDelimiterBehavior::MergedWithPrevious),
"mergedWithNext" => Ok(SplitDelimiterBehavior::MergedWithNext),
"contiguous" => Ok(SplitDelimiterBehavior::Contiguous),
_ => Err(Error(
"Wrong value for SplitDelimiterBehavior, expected one of: \
`removed, isolated, mergedWithPrevious, mergedWithNext, contiguous`"
.into(),
)),
}?))
}
}
impl From<JsSplitDelimiterBehavior> for SplitDelimiterBehavior {
fn from(v: JsSplitDelimiterBehavior) -> Self {
v.0
}
}
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum JsPreTokenizerWrapper {
Sequence(Vec<Arc<PreTokenizerWrapper>>),
Wrapped(Arc<PreTokenizerWrapper>),
}
impl Serialize for JsPreTokenizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
JsPreTokenizerWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("pretokenizers", seq)?;
ser.end()
}
JsPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for JsPreTokenizerWrapper
where
I: Into<PreTokenizerWrapper>,
{
fn from(norm: I) -> Self {
JsPreTokenizerWrapper::Wrapped(Arc::new(norm.into()))
}
}
/// PreTokenizers
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct PreTokenizer {
#[serde(flatten)]
pub pretok: Option<JsPreTokenizerWrapper>,
}
impl tk::PreTokenizer for PreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> tk::Result<()> {
match self.pretok.as_ref().ok_or("Uninitialized PreTokenizer")? {
JsPreTokenizerWrapper::Sequence(seq) => {
for pretokenizer in seq {
pretokenizer.pre_tokenize(pretokenized)?;
}
}
JsPreTokenizerWrapper::Wrapped(pretokenizer) => {
pretokenizer.pre_tokenize(pretokenized)?
}
};
Ok(())
}
}
declare_types! {
pub class JsPreTokenizer for PreTokenizer {
init(_) {
// This should not be called from JS
Ok(PreTokenizer { pretok: None })
}
method preTokenizeString(mut cx) {
use tk::PreTokenizer;
let sequence = cx.extract::<String>(0)?;
let mut pretokenized = PreTokenizedString::from(sequence);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.pre_tokenize(&mut pretokenized)
.map_err(|e| Error(format!("{}", e)))?;
let splits = pretokenized
.get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char)
.into_iter()
.map(|(s, o, _)| (s.to_owned(), o))
.collect::<Vec<_>>();
Ok(neon_serde::to_value(&mut cx, &splits)?.upcast())
}
}
}
/// byte_level(addPrefixSpace: bool = true, useRegex: bool = true)
fn byte_level(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut byte_level = tk::pre_tokenizers::byte_level::ByteLevel::default();
if let Some(add_prefix_space) = cx.extract_opt::<bool>(0)? {
byte_level = byte_level.add_prefix_space(add_prefix_space);
}
if let Some(use_regex) = cx.extract_opt::<bool>(1)? {
byte_level = byte_level.use_regex(use_regex);
}
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(byte_level.into());
Ok(pretok)
}
/// byte_level_alphabet()
fn byte_level_alphabet(mut cx: FunctionContext) -> JsResult<JsValue> {
let chars = tk::pre_tokenizers::byte_level::ByteLevel::alphabet()
.into_iter()
.map(|c| c.to_string())
.collect::<Vec<_>>();
Ok(neon_serde::to_value(&mut cx, &chars)?)
}
/// whitespace()
fn whitespace(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::whitespace::Whitespace {}.into());
Ok(pretok)
}
/// whitespace_split()
fn whitespace_split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::whitespace::WhitespaceSplit.into());
Ok(pretok)
}
/// bert_pre_tokenizer()
fn bert_pre_tokenizer(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::bert::BertPreTokenizer.into());
Ok(pretok)
}
/// metaspace(replacement: string = '_', addPrefixSpace: bool = true)
fn metaspace(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let replacement = cx.extract_opt::<char>(0)?.unwrap_or('▁');
let add_prefix_space = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::metaspace::Metaspace::new(replacement, add_prefix_space).into());
Ok(pretok)
}
/// split(invert: bool = false)
fn split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let pattern: String = cx.extract::<String>(0)?;
let behavior: JsSplitDelimiterBehavior = cx.extract::<JsSplitDelimiterBehavior>(1)?;
let invert: bool = cx.extract_opt::<bool>(2)?.unwrap_or(false);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(
tk::pre_tokenizers::split::Split::new(pattern, behavior.into(), invert)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(pretok)
}
/// punctuation()
fn punctuation(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let behavior: JsSplitDelimiterBehavior = cx
.extract_opt::<JsSplitDelimiterBehavior>(0)?
.unwrap_or(JsSplitDelimiterBehavior(SplitDelimiterBehavior::Isolated));
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::punctuation::Punctuation::new(behavior.into()).into());
Ok(pretok)
}
/// sequence()
fn sequence(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let pretokenizers = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(pretokenizers.len());
pretokenizers.into_iter().try_for_each(|pretokenizer| {
match pretokenizer.downcast::<JsPreTokenizer>().or_throw(&mut cx) {
Ok(pretokenizer) => {
let guard = cx.lock();
let pretok = pretokenizer.borrow(&guard).pretok.clone();
if let Some(pretokenizer) = pretok {
match pretokenizer {
JsPreTokenizerWrapper::Sequence(seq) => sequence.extend(seq),
JsPreTokenizerWrapper::Wrapped(inner) => sequence.push(inner),
}
Ok(())
} else {
cx.throw_error("Uninitialized PreTokenizer")
}
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(JsPreTokenizerWrapper::Sequence(sequence));
Ok(pretok)
}
/// char_delimiter_split(delimiter: string)
fn char_delimiter_split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let delimiter = cx.extract::<char>(0)?;
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::delimiter::CharDelimiterSplit::new(delimiter).into());
Ok(pretok)
}
/// digits(individualDigits: bool)
fn digits(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let individual_digits = cx.extract_opt::<bool>(0)?.unwrap_or(false);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::digits::Digits::new(individual_digits).into());
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_ByteLevel", prefix), byte_level)?;
m.export_function(
&format!("{}_ByteLevel_Alphabet", prefix),
byte_level_alphabet,
)?;
m.export_function(&format!("{}_Whitespace", prefix), whitespace)?;
m.export_function(&format!("{}_WhitespaceSplit", prefix), whitespace_split)?;
m.export_function(&format!("{}_BertPreTokenizer", prefix), bert_pre_tokenizer)?;
m.export_function(&format!("{}_Metaspace", prefix), metaspace)?;
m.export_function(&format!("{}_Split", prefix), split)?;
m.export_function(
&format!("{}_CharDelimiterSplit", prefix),
char_delimiter_split,
)?;
m.export_function(&format!("{}_Punctuation", prefix), punctuation)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
m.export_function(&format!("{}_Digits", prefix), digits)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use tk::pre_tokenizers::sequence::Sequence;
use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use tk::pre_tokenizers::PreTokenizerWrapper;
#[test]
fn serialize() {
let js_wrapped: JsPreTokenizerWrapper = Whitespace {}.into();
let js_ser = serde_json::to_string(&js_wrapped).unwrap();
let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {});
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_ser, rs_ser);
let js_pretok: PreTokenizer = serde_json::from_str(&rs_ser).unwrap();
match js_pretok.pretok.unwrap() {
JsPreTokenizerWrapper::Wrapped(pretok) => match pretok.as_ref() {
PreTokenizerWrapper::Whitespace(_) => {}
_ => panic!("Expected Whitespace"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let js_seq: JsPreTokenizerWrapper =
Sequence::new(vec![WhitespaceSplit.into(), Whitespace {}.into()]).into();
let js_wrapper_ser = serde_json::to_string(&js_seq).unwrap();
let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![
WhitespaceSplit.into(),
Whitespace {}.into(),
]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
let js_seq = PreTokenizer {
pretok: Some(js_seq),
};
let js_ser = serde_json::to_string(&js_seq).unwrap();
assert_eq!(js_wrapper_ser, js_ser);
let rs_seq = Sequence::new(vec![WhitespaceSplit.into(), Whitespace {}.into()]);
let rs_ser = serde_json::to_string(&rs_seq).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/trainers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::models::Model;
use crate::tokenizer::AddedToken;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::models::{
bpe::BpeTrainer, unigram::UnigramTrainer, wordlevel::WordLevelTrainer,
wordpiece::WordPieceTrainer, TrainerWrapper,
};
/// Trainer
#[derive(Clone)]
pub struct Trainer {
pub trainer: Option<Arc<RwLock<TrainerWrapper>>>,
}
impl From<TrainerWrapper> for Trainer {
fn from(trainer: TrainerWrapper) -> Self {
Self {
trainer: Some(Arc::new(RwLock::new(trainer))),
}
}
}
impl tk::Trainer for Trainer {
type Model = Model;
fn should_show_progress(&self) -> bool {
self.trainer
.as_ref()
.expect("Uninitialized Trainer")
.read()
.unwrap()
.should_show_progress()
}
fn train(&self, model: &mut Self::Model) -> tk::Result<Vec<tk::AddedToken>> {
let special_tokens = self
.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.read()
.unwrap()
.train(
&mut model
.model
.as_ref()
.ok_or("Uninitialized Model")?
.write()
.unwrap(),
)?;
Ok(special_tokens)
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> tk::Result<Vec<String>> + Sync,
{
self.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.write()
.unwrap()
.feed(iterator, process)
}
}
declare_types! {
pub class JsTrainer for Trainer {
init(_) {
// This should not be called from JS
Ok(Trainer { trainer: None })
}
}
}
// BPE
struct BpeTrainerOptions(BpeTrainer);
impl From<BpeTrainerOptions> for BpeTrainer {
fn from(v: BpeTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for BpeTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = BpeTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder = builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder = builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder = builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(limit) = options.get(cx, "limitAlphabet") {
if let Some(limit) = Option::from_value(limit, cx)? {
builder = builder.limit_alphabet(limit);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder = builder.initial_alphabet(alphabet);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder = builder.show_progress(show);
}
}
if let Ok(prefix) = options.get(cx, "continuingSubwordPrefix") {
if let Some(prefix) = Option::from_value(prefix, cx)? {
builder = builder.continuing_subword_prefix(prefix);
}
}
if let Ok(suffix) = options.get(cx, "endOfWordSuffix") {
if let Some(suffix) = Option::from_value(suffix, cx)? {
builder = builder.end_of_word_suffix(suffix);
}
}
Ok(Self(builder.build()))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// bpe_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 2,
/// specialTokens?: (string | AddedToken)[] = [],
/// limitAlphabet?: number = undefined,
/// initialAlphabet?: string[] = [],
/// showProgress?: bool = true,
/// continuingSubwordPrefix?: string = undefined,
/// endOfWordSuffix?: string = undefined,
/// })
fn bpe_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<BpeTrainerOptions>(0)?
.map_or_else(|| BpeTrainer::builder().build(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// WordPiece
struct WordPieceTrainerOptions(WordPieceTrainer);
impl From<WordPieceTrainerOptions> for WordPieceTrainer {
fn from(v: WordPieceTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for WordPieceTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = WordPieceTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder = builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder = builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder = builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(limit) = options.get(cx, "limitAlphabet") {
if let Some(limit) = Option::from_value(limit, cx)? {
builder = builder.limit_alphabet(limit);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder = builder.initial_alphabet(alphabet);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder = builder.show_progress(show);
}
}
if let Ok(prefix) = options.get(cx, "continuingSubwordPrefix") {
if let Some(prefix) = Option::from_value(prefix, cx)? {
builder = builder.continuing_subword_prefix(prefix);
}
}
if let Ok(suffix) = options.get(cx, "endOfWordSuffix") {
if let Some(suffix) = Option::from_value(suffix, cx)? {
builder = builder.end_of_word_suffix(suffix);
}
}
Ok(Self(builder.build()))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// wordpiece_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 2,
/// specialTokens?: string[] = [],
/// limitAlphabet?: number = undefined,
/// initialAlphabet?: string[] = [],
/// showProgress?: bool = true,
/// continuingSubwordPrefix?: string = undefined,
/// endOfWordSuffix?: string = undefined,
/// })
fn wordpiece_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<WordPieceTrainerOptions>(0)?
.map_or_else(|| WordPieceTrainer::builder().build(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// WordLevel
struct WordLevelTrainerOptions(WordLevelTrainer);
impl From<WordLevelTrainerOptions> for WordLevelTrainer {
fn from(v: WordLevelTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for WordLevelTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = WordLevelTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder.show_progress(show);
}
}
Ok(Self(
builder
.build()
.expect("WordLevelTrainerBuilder cannot fail"),
))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// wordlevel_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 0,
/// specialTokens?: string[] = [],
/// showProgress?: bool = true,
/// })
fn wordlevel_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx.extract_opt::<WordLevelTrainerOptions>(0)?.map_or_else(
|| WordLevelTrainer::builder().build().unwrap(),
|o| o.into(),
);
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// Unigram
struct UnigramTrainerOptions(UnigramTrainer);
impl From<UnigramTrainerOptions> for UnigramTrainer {
fn from(v: UnigramTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for UnigramTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = UnigramTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.vocab_size(size);
}
}
if let Ok(nsub) = options.get(cx, "nSubIterations") {
if let Some(nsub) = Option::from_value(nsub, cx)? {
builder.n_sub_iterations(nsub);
}
}
if let Ok(factor) = options.get(cx, "shrinkingFactor") {
if let Some(factor) = Option::from_value(factor, cx)? {
builder.shrinking_factor(factor);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder.initial_alphabet(alphabet);
}
}
if let Ok(unk) = options.get(cx, "unkToken") {
let unk = Option::from_value(unk, cx)?;
builder.unk_token(unk);
}
if let Ok(max) = options.get(cx, "maxPieceLength") {
if let Some(max) = Option::from_value(max, cx)? {
builder.max_piece_length(max);
}
}
if let Ok(size) = options.get(cx, "seedSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.seed_size(size);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder.show_progress(show);
}
}
Ok(Self(builder.build()?))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// unigram_trainer(options?: {
/// vocabSize?: number = 8000,
/// nSubIterations?: number = 2,
/// shrinkingFactor?: number = 0.75,
/// specialTokens?: string[] = [],
/// initialAlphabet?: string[] = [],
/// unkToken?: string = undefined,
/// maxPieceLength?: number = 16,
/// seedSize?: number = 1000000,
/// showProgress?: boolean = true,
/// })
fn unigram_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<UnigramTrainerOptions>(0)?
.map_or_else(|| UnigramTrainer::builder().build().unwrap(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BPETrainer", prefix), bpe_trainer)?;
m.export_function(&format!("{}_WordPieceTrainer", prefix), wordpiece_trainer)?;
m.export_function(&format!("{}_WordLevelTrainer", prefix), wordlevel_trainer)?;
m.export_function(&format!("{}_UnigramTrainer", prefix), unigram_trainer)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/utils.rs | extern crate tokenizers as tk;
use crate::encoding::JsEncoding;
use crate::extraction::*;
use crate::tokenizer::Encoding;
use neon::prelude::*;
/// slice(s: string, start?: number, end?: number)
fn slice(mut cx: FunctionContext) -> JsResult<JsString> {
let s = cx.extract::<String>(0)?;
let len = s.chars().count();
let get_index = |x: i32| -> usize {
if x >= 0 {
x as usize
} else {
(len as i32 + x) as usize
}
};
let begin_index = get_index(cx.extract_opt::<i32>(1)?.unwrap_or(0));
let end_index = get_index(cx.extract_opt::<i32>(2)?.unwrap_or(len as i32));
if let Some(slice) = tk::tokenizer::normalizer::get_range_of(&s, begin_index..end_index) {
Ok(cx.string(slice))
} else {
cx.throw_error("Error in offsets")
}
}
/// merge_encodings(encodings: Encoding[], growing_offsets: boolean = false): Encoding
fn merge_encodings(mut cx: FunctionContext) -> JsResult<JsEncoding> {
let encodings: Vec<tk::Encoding> = cx
.extract_vec::<Encoding>(0)?
.into_iter()
.map(|e| e.into())
.collect();
let growing_offsets = cx.extract_opt::<bool>(1)?.unwrap_or(false);
let new_encoding = tk::tokenizer::Encoding::merge(encodings, growing_offsets);
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(new_encoding);
Ok(js_encoding)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_slice", prefix), slice)?;
m.export_function(&format!("{}_mergeEncodings", prefix), merge_encodings)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/extraction.rs | use neon::prelude::*;
use serde::de::DeserializeOwned;
/// Common Error that can be converted to a neon::result::Throw and put
/// the js engine in a throwing state. Makes it way easier to manage errors
pub struct Error(pub String);
impl<T> From<T> for Error
where
T: std::fmt::Display,
{
fn from(e: T) -> Self {
Self(format!("{}", e))
}
}
impl From<Error> for neon::result::Throw {
fn from(err: Error) -> Self {
let msg = err.0;
unsafe {
neon_runtime::error::throw_error_from_utf8(msg.as_ptr(), msg.len() as i32);
neon::result::Throw
}
}
}
pub type LibResult<T> = std::result::Result<T, Error>;
/// This trait is to be implemented for any type that we want to extract from
/// a JsValue.
pub trait FromJsValue: Sized {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self>;
}
/// Any type that implements DeserializeOwned from serde can easily be converted
impl<T> FromJsValue for T
where
T: DeserializeOwned,
{
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
let val: T = neon_serde::from_value(cx, from)?;
Ok(val)
}
}
/// This trait provides some extraction helpers, and we implement it for CallContext
/// so that we can easily extract any type that implements FromJsValue from the arguments.
pub trait Extract {
fn extract<T: FromJsValue>(&mut self, pos: i32) -> LibResult<T>;
fn extract_opt<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<T>>;
fn extract_vec<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Vec<T>>;
fn extract_vec_opt<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<Vec<T>>>;
}
impl<'c, T: neon::object::This> Extract for CallContext<'c, T> {
fn extract<E: FromJsValue>(&mut self, pos: i32) -> LibResult<E> {
let val = self
.argument_opt(pos)
.ok_or_else(|| Error(format!("Argument {} is missing", pos)))?;
let ext = E::from_value(val, self)?;
Ok(ext)
}
fn extract_opt<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<E>> {
let val = self.argument_opt(pos);
match val {
None => Ok(None),
Some(v) => {
// For any optional value, we accept both `undefined` and `null`
if v.downcast::<JsNull>().is_ok() || v.downcast::<JsUndefined>().is_ok() {
Ok(None)
} else if v.downcast::<JsFunction>().is_ok() {
// Could be parsed as an empty object, so we don't accept JsFunction here
Err(Error("Cannot extract from JsFunction".into()))
} else {
Ok(Some(E::from_value(v, self)?))
}
}
}
}
fn extract_vec<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Vec<E>> {
let vec = self
.argument_opt(pos)
.ok_or_else(|| Error(format!("Argument {} is missing", pos)))?
.downcast::<JsArray>()?
.to_vec(self)?;
vec.into_iter().map(|v| E::from_value(v, self)).collect()
}
fn extract_vec_opt<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<Vec<E>>> {
self.argument_opt(pos)
.map(|v| {
let vec = v.downcast::<JsArray>()?.to_vec(self)?;
vec.into_iter()
.map(|v| E::from_value(v, self))
.collect::<LibResult<Vec<_>>>()
})
.map_or(Ok(None), |v| v.map(Some))
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/tokenizer.rs | extern crate tokenizers as tk;
use crate::decoders::{Decoder, JsDecoder};
use crate::encoding::JsEncoding;
use crate::extraction::*;
use crate::models::{JsModel, Model};
use crate::normalizers::{JsNormalizer, Normalizer};
use crate::pre_tokenizers::{JsPreTokenizer, PreTokenizer};
use crate::processors::{JsPostProcessor, Processor};
use crate::tasks::tokenizer::{DecodeTask, EncodeTask};
use crate::trainers::JsTrainer;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::Model as ModelTrait;
use tk::TokenizerImpl;
// AddedToken
#[derive(Clone)]
pub struct AddedToken {
pub token: tk::AddedToken,
}
impl From<AddedToken> for tk::AddedToken {
fn from(v: AddedToken) -> Self {
v.token
}
}
#[allow(non_snake_case)]
#[derive(Debug, Default, Serialize, Deserialize)]
struct AddedTokenOptions {
singleWord: Option<bool>,
leftStrip: Option<bool>,
rightStrip: Option<bool>,
normalized: Option<bool>,
}
impl AddedTokenOptions {
fn into_added_token(self, content: String, special: bool) -> tk::AddedToken {
let mut token = tk::AddedToken::from(content, special);
if let Some(sw) = self.singleWord {
token = token.single_word(sw);
}
if let Some(ls) = self.leftStrip {
token = token.lstrip(ls);
}
if let Some(rs) = self.rightStrip {
token = token.rstrip(rs);
}
if let Some(n) = self.normalized {
token = token.normalized(n);
}
token
}
}
declare_types! {
pub class JsAddedToken for AddedToken {
init(mut cx) {
// init(
// content: string,
// special: boolean,
// options?: {
// singleWord?: boolean = false,
// leftStrip?: boolean = false,
// rightStrip?: boolean = false
// normalized?: boolean = true,
// }
// )
let content = cx.extract::<String>(0)?;
let special = cx.extract::<bool>(1)?;
let token = cx.extract_opt::<AddedTokenOptions>(2)?
.unwrap_or_default()
.into_added_token(content, special);
Ok(AddedToken { token })
}
method getContent(mut cx) {
// getContent()
let this = cx.this();
let content = {
let guard = cx.lock();
let token = this.borrow(&guard);
token.token.content.clone()
};
Ok(cx.string(content).upcast())
}
}
}
impl FromJsValue for AddedToken {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(token) = from.downcast::<JsString>() {
Ok(AddedToken {
token: tk::AddedToken::from(token.value(), false),
})
} else if let Ok(token) = from.downcast::<JsAddedToken>() {
let guard = cx.lock();
let token = token.borrow(&guard);
Ok(token.clone())
} else {
Err(Error("Expected `string | AddedToken`".into()))
}
}
}
struct SpecialToken(tk::AddedToken);
impl FromJsValue for SpecialToken {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(token) = from.downcast::<JsString>() {
Ok(SpecialToken(tk::AddedToken::from(token.value(), true)))
} else if let Ok(token) = from.downcast::<JsAddedToken>() {
let guard = cx.lock();
let token = token.borrow(&guard);
Ok(SpecialToken(token.token.clone()))
} else {
Err(Error("Expected `string | AddedToken`".into()))
}
}
}
// encode & encodeBatch types
struct TextInputSequence<'s>(tk::InputSequence<'s>);
struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>);
impl FromJsValue for PreTokenizedInputSequence<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
let sequence = from
.downcast::<JsArray>()?
.to_vec(cx)?
.into_iter()
.map(|v| Ok(v.downcast::<JsString>()?.value()))
.collect::<LibResult<Vec<_>>>()?;
Ok(Self(sequence.into()))
}
}
impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> {
fn from(v: PreTokenizedInputSequence<'s>) -> Self {
v.0
}
}
impl FromJsValue for TextInputSequence<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, _cx: &mut C) -> LibResult<Self> {
Ok(Self(from.downcast::<JsString>()?.value().into()))
}
}
impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> {
fn from(v: TextInputSequence<'s>) -> Self {
v.0
}
}
struct TextEncodeInput<'s>(tk::EncodeInput<'s>);
struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>);
impl FromJsValue for PreTokenizedEncodeInput<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
// If array is of size 2, and the first element is also an array, we'll parse a pair
let array = from.downcast::<JsArray>()?;
let is_pair = array.len() == 2
&& array
.get(cx, 0)
.map_or(false, |a| a.downcast::<JsArray>().is_ok());
if is_pair {
let first_seq: tk::InputSequence =
PreTokenizedInputSequence::from_value(array.get(cx, 0)?, cx)?.into();
let pair_seq: tk::InputSequence =
PreTokenizedInputSequence::from_value(array.get(cx, 1)?, cx)?.into();
Ok(Self((first_seq, pair_seq).into()))
} else {
Ok(Self(
PreTokenizedInputSequence::from_value(from, cx)?.into(),
))
}
}
}
impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::EncodeInput<'s> {
fn from(v: PreTokenizedEncodeInput<'s>) -> Self {
v.0
}
}
impl FromJsValue for TextEncodeInput<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
// If we get an array, it's a pair of sequences
if let Ok(array) = from.downcast::<JsArray>() {
if array.len() != 2 {
return Err(Error(
"TextEncodeInput should be \
`TextInputSequence | [TextInputSequence, TextInputSequence]`"
.into(),
));
}
let first_seq: tk::InputSequence =
TextInputSequence::from_value(array.get(cx, 0)?, cx)?.into();
let pair_seq: tk::InputSequence =
TextInputSequence::from_value(array.get(cx, 1)?, cx)?.into();
Ok(Self((first_seq, pair_seq).into()))
} else {
Ok(Self(TextInputSequence::from_value(from, cx)?.into()))
}
}
}
impl<'s> From<TextEncodeInput<'s>> for tk::EncodeInput<'s> {
fn from(v: TextEncodeInput<'s>) -> Self {
v.0
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EncodeOptions {
#[serde(default)]
is_pretokenized: bool,
#[serde(default)]
add_special_tokens: bool,
}
impl Default for EncodeOptions {
fn default() -> Self {
Self {
is_pretokenized: false,
add_special_tokens: true,
}
}
}
// Encoding
#[repr(transparent)]
pub struct Encoding(tk::Encoding);
impl FromJsValue for Encoding {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
from.downcast::<JsEncoding>()
.map(|e| {
let guard = cx.lock();
let enc = e.borrow(&guard).encoding.clone();
Self(enc.expect("Uninitialized Encoding"))
})
.map_err(|_| Error("Expected Encoding".into()))
}
}
impl From<Encoding> for tk::Encoding {
fn from(v: Encoding) -> Self {
v.0
}
}
// Truncation
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::TruncationStrategy", rename_all = "snake_case")]
pub enum TruncationStrategyDef {
LongestFirst,
OnlyFirst,
OnlySecond,
}
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::TruncationDirection", rename_all = "camelCase")]
pub enum TruncationDirectionDef {
Left,
Right,
}
#[derive(Serialize, Deserialize)]
#[serde(
remote = "tk::TruncationParams",
rename_all = "camelCase",
default = "tk::TruncationParams::default"
)]
pub struct TruncationParamsDef {
max_length: usize,
#[serde(with = "TruncationStrategyDef")]
strategy: tk::TruncationStrategy,
#[serde(with = "TruncationDirectionDef")]
direction: tk::TruncationDirection,
stride: usize,
}
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct TruncationParams(#[serde(with = "TruncationParamsDef")] pub tk::TruncationParams);
// Padding
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::PaddingDirection", rename_all = "camelCase")]
pub enum PaddingDirectionDef {
Left,
Right,
}
// Here we define a custom method of serializing and deserializing a PaddingStrategy because
// we want it to actually be very different from the classic representation.
// In Rust, we use an enum to define the strategy, but in JS, we just want to have a optional
// length number => If defined we use the Fixed(n) strategy and otherwise the BatchLongest.
pub mod padding_strategy_serde {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Strategy {
#[serde(skip_serializing_if = "Option::is_none")]
max_length: Option<usize>,
}
pub fn serialize<S>(value: &tk::PaddingStrategy, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = Strategy {
max_length: match value {
tk::PaddingStrategy::BatchLongest => None,
tk::PaddingStrategy::Fixed(s) => Some(*s),
},
};
s.serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<tk::PaddingStrategy, D::Error>
where
D: Deserializer<'de>,
{
let v = Strategy::deserialize(deserializer)?;
if let Some(length) = v.max_length {
Ok(tk::PaddingStrategy::Fixed(length))
} else {
Ok(tk::PaddingStrategy::BatchLongest)
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(
remote = "tk::PaddingParams",
rename_all = "camelCase",
default = "tk::PaddingParams::default"
)]
pub struct PaddingParamsDef {
#[serde(flatten, with = "padding_strategy_serde")]
strategy: tk::PaddingStrategy,
#[serde(with = "PaddingDirectionDef")]
direction: tk::PaddingDirection,
#[serde(skip_serializing_if = "Option::is_none")]
pad_to_multiple_of: Option<usize>,
pad_id: u32,
pad_type_id: u32,
pad_token: String,
}
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct PaddingParams(#[serde(with = "PaddingParamsDef")] pub tk::PaddingParams);
type RsTokenizer = TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>;
/// Tokenizer
#[derive(Clone)]
pub struct Tokenizer {
pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>,
}
declare_types! {
pub class JsTokenizer for Tokenizer {
init(mut cx) {
// init(model: JsModel)
let model = cx.argument::<JsModel>(0)?;
let guard = cx.lock();
let model = model.borrow(&guard).clone();
Ok(Tokenizer {
tokenizer: Arc::new(RwLock::new(TokenizerImpl::new(model)))
})
}
method toString(mut cx) {
// toString(pretty?: bool): string
let pretty = cx.extract_opt::<bool>(0)?.unwrap_or(false);
let this = cx.this();
let guard = cx.lock();
let s = this.borrow(&guard)
.tokenizer.read().unwrap()
.to_string(pretty)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(s).upcast())
}
method save(mut cx) {
// save(path: striing, pretty?: bool): undefined
let path = cx.extract::<String>(0)?;
let pretty = cx.extract_opt::<bool>(1)?.unwrap_or(false);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.tokenizer.read().unwrap()
.save(&path, pretty)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.undefined().upcast())
}
method runningTasks(mut cx) {
// runningTasks(): number
let this = cx.this();
let guard = cx.lock();
let count = std::sync::Arc::strong_count(&this.borrow(&guard).tokenizer);
let running = if count > 0 { count - 1 } else { 0 };
Ok(cx.number(running as f64).upcast())
}
method getVocab(mut cx) {
// getVocab(withAddedTokens: bool = true)
let with_added_tokens = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let vocab = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_vocab(with_added_tokens);
let js_vocab = JsObject::new(&mut cx);
for (token, id) in vocab {
let js_token = cx.string(token);
let js_id = cx.number(id as f64);
js_vocab.set(&mut cx, js_token, js_id)?;
}
Ok(js_vocab.upcast())
}
method getVocabSize(mut cx) {
// getVocabSize(withAddedTokens: bool = true)
let with_added_tokens = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let size = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_vocab_size(with_added_tokens);
Ok(cx.number(size as f64).upcast())
}
method encode(mut cx) {
// type InputSequence = string | string[];
// encode(
// sentence: InputSequence,
// pair?: InputSequence,
// options?: {
// addSpecialTokens?: boolean,
// isPretokenized?: boolean,
// } | (err, encoding) -> void,
// __callback: (err, encoding) -> void
// )
// Start by extracting options if they exist (options is in slot 1 ,or 2)
let mut i = 1;
let (options, option_index) = loop {
if let Ok(Some(opts)) = cx.extract_opt::<EncodeOptions>(i){
break (opts, Some(i));
}
i += 1;
if i == 3{
break (EncodeOptions::default(), None)
}
};
// Then we extract the first input sentence
let sentence: tk::InputSequence = if options.is_pretokenized {
cx.extract::<PreTokenizedInputSequence>(0)
.map_err(|_| Error("encode with isPretokenized=true expect string[]".into()))?
.into()
} else {
cx.extract::<TextInputSequence>(0)
.map_err(|_| Error("encode with isPreTokenized=false expect string".into()))?
.into()
};
let (pair, has_pair_arg): (Option<tk::InputSequence>, bool) = if options.is_pretokenized {
if let Ok(second) = cx.extract_opt::<PreTokenizedInputSequence>(1){
(second.map(|v| v.into()), true)
}else{
(None, false)
}
} else if let Ok(second) = cx.extract_opt::<TextInputSequence>(1){
(second.map(|v| v.into()), true)
}else{
(None, false)
};
// Find the callback index.
let last_index = if let Some(option_index) = option_index{
option_index + 1
}else if has_pair_arg{
2
}else{
1
};
let callback = cx.argument::<JsFunction>(last_index)?;
let input: tk::EncodeInput = match pair {
Some(pair) => (sentence, pair).into(),
None => sentence.into()
};
let this = cx.this();
let guard = cx.lock();
let task = EncodeTask::Single(
this.borrow(&guard).clone(), Some(input), options.add_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method encodeBatch(mut cx) {
// type InputSequence = string | string[];
// type EncodeInput = (InputSequence | [InputSequence, InputSequence])[]
// encode_batch(
// inputs: EncodeInput[],
// options?: {
// addSpecialTokens?: boolean,
// isPretokenized?: boolean,
// } | (err, encodings) -> void,
// __callback: (err, encodings) -> void
// )
// Start by extracting options and callback
let (options, callback) = match cx.extract_opt::<EncodeOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => {
(options, cx.argument::<JsFunction>(2)?)
},
// Options were undefined or null
Ok(None) => {
(EncodeOptions::default(), cx.argument::<JsFunction>(2)?)
}
// Options not specified, callback instead
Err(_) => {
(EncodeOptions::default(), cx.argument::<JsFunction>(1)?)
}
};
let inputs: Vec<tk::EncodeInput> = if options.is_pretokenized {
cx.extract_vec::<PreTokenizedEncodeInput>(0)
.map_err(|_| Error(
"encodeBatch with isPretokenized=true expects input to be `EncodeInput[]` \
with `EncodeInput = string[] | [string[], string[]]`".into()))?
.into_iter().map(|v| v.into()).collect()
} else {
cx.extract_vec::<TextEncodeInput>(0)
.map_err(|_| Error(
"encodeBatch with isPretokenized=false expects input to be `EncodeInput[]` \
with `EncodeInput = string | [string, string]`".into()))?
.into_iter().map(|v| v.into()).collect()
};
let this = cx.this();
let guard = cx.lock();
let task = EncodeTask::Batch(
this.borrow(&guard).clone(), Some(inputs), options.add_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method decode(mut cx) {
// decode(ids: number[], skipSpecialTokens: bool, callback)
let ids = cx.extract_vec::<u32>(0)?;
let (skip_special_tokens, callback_index) = if let Ok(skip_special_tokens) = cx.extract::<bool>(1){
(skip_special_tokens, 2)
}else{
(false, 1)
};
let callback = cx.argument::<JsFunction>(callback_index)?;
let this = cx.this();
let guard = cx.lock();
let task = DecodeTask::Single(
this.borrow(&guard).clone(), ids, skip_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method decodeBatch(mut cx) {
// decodeBatch(sequences: number[][], skipSpecialTokens: bool, callback)
let sentences = cx.extract_vec::<Vec<u32>>(0)?;
let (skip_special_tokens, callback_index) = if let Ok(skip_special_tokens) = cx.extract::<bool>(1){
(skip_special_tokens, 2)
}else{
(false, 1)
};
let callback = cx.argument::<JsFunction>(callback_index)?;
let this = cx.this();
let guard = cx.lock();
let task = DecodeTask::Batch(
this.borrow(&guard).clone(), sentences, skip_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method tokenToId(mut cx) {
// tokenToId(token: string): number | undefined
let token = cx.extract::<String>(0)?;
let this = cx.this();
let guard = cx.lock();
let id = this.borrow(&guard)
.tokenizer.read().unwrap()
.token_to_id(&token);
if let Some(id) = id {
Ok(cx.number(id).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method idToToken(mut cx) {
// idToToken(id: number): string | undefined
let id = cx.extract::<u32>(0)?;
let this = cx.this();
let guard = cx.lock();
let token = this.borrow(&guard)
.tokenizer.read().unwrap()
.id_to_token(id);
if let Some(token) = token {
Ok(cx.string(token).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method addTokens(mut cx) {
// addTokens(tokens: (string | AddedToken)[]): number
let tokens = cx.extract_vec::<AddedToken>(0)?
.into_iter()
.map(|token| token.into())
.collect::<Vec<_>>();
let mut this = cx.this();
let guard = cx.lock();
let added = this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.add_tokens(&tokens);
Ok(cx.number(added as f64).upcast())
}
method addSpecialTokens(mut cx) {
// addSpecialTokens(tokens: (string | AddedToken)[]): number
let tokens = cx.extract_vec::<SpecialToken>(0)?
.into_iter()
.map(|token| token.0)
.collect::<Vec<_>>();
let mut this = cx.this();
let guard = cx.lock();
let added = this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.add_special_tokens(&tokens);
Ok(cx.number(added as f64).upcast())
}
method setTruncation(mut cx) {
// setTruncation(
// maxLength: number,
// options?: { stride?: number; strategy?: string }
// )
let max_length = cx.extract::<usize>(0)?;
let mut options = cx.extract_opt::<TruncationParams>(1)?
.map_or_else(tk::TruncationParams::default, |p| p.0);
options.max_length = max_length;
let params_obj = neon_serde::to_value(&mut cx, &TruncationParams(options.clone()))?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_truncation(Some(options));
Ok(params_obj)
}
method disableTruncation(mut cx) {
// disableTruncation()
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_truncation(None);
Ok(cx.undefined().upcast())
}
method setPadding(mut cx) {
// setPadding(options?: {
// direction?: "left" | "right",
// padId?: number,
// padTypeId?: number,
// padToken?: string,
// maxLength?: number
// })
let options = cx.extract_opt::<PaddingParams>(0)?
.map_or_else(tk::PaddingParams::default, |p| p.0);
let params_obj = neon_serde::to_value(&mut cx, &PaddingParams(options.clone()))?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_padding(Some(options));
Ok(params_obj)
}
method disablePadding(mut cx) {
// disablePadding()
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_padding(None);
Ok(cx.undefined().upcast())
}
method train(mut cx) {
// train(files: string[], trainer?: Trainer)
let files = cx.extract::<Vec<String>>(0)?;
let mut trainer = if let Some(val) = cx.argument_opt(1) {
let js_trainer = val.downcast::<JsTrainer>().or_throw(&mut cx)?;
let guard = cx.lock();
let trainer = js_trainer.borrow(&guard).clone();
trainer
} else {
let this = cx.this();
let guard = cx.lock();
let trainer = this.borrow(&guard).tokenizer.read().unwrap().get_model().get_trainer();
trainer
};
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.train_from_files(&mut trainer, files)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.undefined().upcast())
}
method postProcess(mut cx) {
// postProcess(
// encoding: Encoding,
// pair?: Encoding,
// addSpecialTokens: boolean = true
// ): Encoding
let encoding = cx.extract::<Encoding>(0)?;
let pair = cx.extract_opt::<Encoding>(1)?;
let add_special_tokens = cx.extract_opt::<bool>(2)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let encoding = this.borrow(&guard)
.tokenizer.read().unwrap()
.post_process(encoding.into(), pair.map(|p| p.into()), add_special_tokens)
.map_err(|e| Error(format!("{}", e)))?;
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(encoding);
Ok(js_encoding.upcast())
}
method getModel(mut cx) {
// getModel(): Model
let this = cx.this();
let guard = cx.lock();
let model = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_model()
.model
.clone();
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = model;
Ok(js_model.upcast())
}
method setModel(mut cx) {
// setModel(model: JsModel)
let model = cx.argument::<JsModel>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let model = model.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_model(model);
Ok(cx.undefined().upcast())
}
method getNormalizer(mut cx) {
// getNormalizer(): Normalizer | undefined
let this = cx.this();
let guard = cx.lock();
let normalizer = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_normalizer().cloned();
if let Some(normalizer) = normalizer {
let mut js_normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_normalizer.borrow_mut(&guard).normalizer = normalizer.normalizer;
Ok(js_normalizer.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setNormalizer(mut cx) {
// setNormalizer(normalizer: Normalizer)
let normalizer = cx.argument::<JsNormalizer>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let normalizer = normalizer.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_normalizer(normalizer);
Ok(cx.undefined().upcast())
}
method getPreTokenizer(mut cx) {
// getPreTokenizer(): PreTokenizer | undefined
let this = cx.this();
let guard = cx.lock();
let pretok = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_pre_tokenizer().cloned();
if let Some(pretok) = pretok {
let mut js_pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_pretok.borrow_mut(&guard).pretok = pretok.pretok;
Ok(js_pretok.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setPreTokenizer(mut cx) {
// setPreTokenizer(pretokenizer: PreTokenizer)
let pretok = cx.argument::<JsPreTokenizer>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let pretok = pretok.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_pre_tokenizer(pretok);
Ok(cx.undefined().upcast())
}
method getPostProcessor(mut cx) {
// getPostProcessor(): PostProcessor | undefined
let this = cx.this();
let guard = cx.lock();
let processor = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_post_processor().cloned();
if let Some(processor) = processor {
let mut js_processor =
JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = processor.processor;
Ok(js_processor.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setPostProcessor(mut cx) {
// setPostProcessor(processor: PostProcessor)
let processor = cx.argument::<JsPostProcessor>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let processor = processor.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_post_processor(processor);
Ok(cx.undefined().upcast())
}
method getDecoder(mut cx) {
// getDecoder(): Decoder | undefined
let this = cx.this();
let guard = cx.lock();
let decoder = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_decoder().cloned();
if let Some(decoder) = decoder {
let mut js_decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_decoder.borrow_mut(&guard).decoder = decoder.decoder;
Ok(js_decoder.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setDecoder(mut cx) {
// setDecoder(decoder: Decoder)
let decoder = cx.argument::<JsDecoder>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let decoder = decoder.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_decoder(decoder);
Ok(cx.undefined().upcast())
}
}
}
pub fn tokenizer_from_string(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let tokenizer: tk::tokenizer::TokenizerImpl<
Model,
Normalizer,
PreTokenizer,
Processor,
Decoder,
> = s.parse().map_err(|e| Error(format!("{}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
pub fn tokenizer_from_file(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let tokenizer = tk::tokenizer::TokenizerImpl::from_file(s)
.map_err(|e| Error(format!("Error loading from file{}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct FromPretrainedParametersJs {
#[serde(default)]
revision: Option<String>,
#[serde(default)]
auth_token: Option<String>,
}
impl From<FromPretrainedParametersJs> for tk::FromPretrainedParameters {
fn from(o: FromPretrainedParametersJs) -> Self {
let mut params = Self::default();
if let Some(revision) = o.revision {
params.revision = revision;
}
if let Some(auth_token) = o.auth_token {
params.auth_token = Some(auth_token);
}
params
}
}
pub fn tokenizer_from_pretrained(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let mut p: tk::FromPretrainedParameters = cx
.extract_opt::<FromPretrainedParametersJs>(1)?
.unwrap_or_default()
.into();
p.user_agent = [("bindings", "Node.js"), ("version", crate::VERSION)]
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let tokenizer = tk::tokenizer::TokenizerImpl::from_pretrained(s, Some(p))
.map_err(|e| Error(format!("Error loading from pretrained {}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
pub fn register(m: &mut ModuleContext, prefix: &str) -> Result<(), neon::result::Throw> {
m.export_class::<JsAddedToken>(&format!("{}_AddedToken", prefix))?;
m.export_class::<JsTokenizer>(&format!("{}_Tokenizer", prefix))?;
m.export_function(
&format!("{}_Tokenizer_from_string", prefix),
tokenizer_from_string,
)?;
m.export_function(
&format!("{}_Tokenizer_from_file", prefix),
tokenizer_from_file,
)?;
m.export_function(
&format!("{}_Tokenizer_from_pretrained", prefix),
tokenizer_from_pretrained,
)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/lib.rs | #![warn(clippy::all)]
// We need to allow these to use !declare_types
#![allow(clippy::unnecessary_wraps)]
#![allow(clippy::upper_case_acronyms)]
extern crate neon;
extern crate neon_serde;
#[macro_use]
extern crate serde;
extern crate tokenizers as tk;
mod decoders;
mod encoding;
mod extraction;
mod models;
mod normalizers;
mod pre_tokenizers;
mod processors;
mod tasks;
mod tokenizer;
mod trainers;
mod utils;
use neon::prelude::*;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
register_module!(mut m, {
// Tokenizer
tokenizer::register(&mut m, "tokenizer")?;
// Models
models::register(&mut m, "models")?;
// Decoders
decoders::register(&mut m, "decoders")?;
// Processors
processors::register(&mut m, "processors")?;
// Normalizers
normalizers::register(&mut m, "normalizers")?;
// PreTokenizers
pre_tokenizers::register(&mut m, "pre_tokenizers")?;
// Trainers
trainers::register(&mut m, "trainers")?;
// Utils
utils::register(&mut m, "utils")?;
Ok(())
});
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/models.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::tasks::models::{BPEFromFilesTask, WordLevelFromFilesTask, WordPieceFromFilesTask};
use crate::trainers::Trainer;
use neon::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use tk::models::{
bpe::{BpeBuilder, Merges, Vocab},
wordlevel::WordLevelBuilder,
wordpiece::WordPieceBuilder,
ModelWrapper,
};
use tk::Model as ModelTrait;
use tk::Token;
/// Model
#[derive(Clone, Serialize, Deserialize)]
pub struct Model {
#[serde(flatten)]
pub model: Option<Arc<RwLock<ModelWrapper>>>,
}
impl<M> From<M> for Model
where
M: Into<ModelWrapper>,
{
fn from(wrapper: M) -> Self {
Self {
model: Some(Arc::new(RwLock::new(wrapper.into()))),
}
}
}
impl tk::Model for Model {
type Trainer = Trainer;
fn tokenize(&self, sequence: &str) -> tk::Result<Vec<Token>> {
self.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.tokenize(sequence)
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.model.as_ref()?.read().unwrap().token_to_id(token)
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.model.as_ref()?.read().unwrap().id_to_token(id)
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab()
}
fn get_vocab_size(&self) -> usize {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab_size()
}
fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> {
self.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.save(folder, name)
}
fn get_trainer(&self) -> Self::Trainer {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_trainer()
.into()
}
}
declare_types! {
pub class JsModel for Model {
init(_) {
// This should not be called from JS
Ok(Model { model: None })
}
method save(mut cx) {
// save(folder: string, name?: string)
let folder = cx.extract::<String>(0)?;
let name = cx.extract_opt::<String>(1)?;
let this = cx.this();
let guard = cx.lock();
let files = this.borrow(&guard)
.model.as_ref().expect("Uninitialized Model")
.read().unwrap()
.save(
Path::new(&folder),
name.as_deref()
)
.map_err(|e| Error(format!("{}", e)))?;
Ok(neon_serde::to_value(&mut cx, &files)?)
}
}
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct BpeOptions {
cache_capacity: Option<usize>,
dropout: Option<f32>,
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
fuse_unk: Option<bool>,
byte_fallback: Option<bool>,
}
impl BpeOptions {
fn apply_to_bpe_builder(self, mut builder: BpeBuilder) -> BpeBuilder {
if let Some(cache_capacity) = self.cache_capacity {
builder = builder.cache_capacity(cache_capacity);
}
if let Some(dropout) = self.dropout {
builder = builder.dropout(dropout);
}
if let Some(unk_token) = self.unk_token {
builder = builder.unk_token(unk_token);
}
if let Some(continuing_subword_prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(continuing_subword_prefix);
}
if let Some(end_of_word_suffix) = self.end_of_word_suffix {
builder = builder.end_of_word_suffix(end_of_word_suffix);
}
if let Some(fuse_unk) = self.fuse_unk {
builder = builder.fuse_unk(fuse_unk);
}
if let Some(byte_fallback) = self.byte_fallback {
builder = builder.byte_fallback(byte_fallback);
}
builder
}
}
/// bpe_init(vocab: {[token: string]: number}, merges: [string, string][], options: {
/// cacheCapacity?: number,
/// dropout?: number,
/// unkToken?: string,
/// continuingSubwordPrefix?: string,
/// endOfWordSuffix?: string
/// })
fn bpe_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<Vocab>(0)?;
let merges = cx.extract::<Merges>(1)?;
let options = cx.extract_opt::<BpeOptions>(2)?.unwrap_or_default();
let mut builder = tk::models::bpe::BPE::builder().vocab_and_merges(vocab, merges);
builder = options.apply_to_bpe_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// bpe_from_file(vocab: string, merges: string, options: {
/// cacheCapacity?: number,
/// dropout?: number,
/// unkToken?: string,
/// continuingSubwordPrefix?: string,
/// endOfWordSuffix?: string
/// byteFallback?: bool
/// }, callback)
fn bpe_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<BpeOptions>(2) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(3)?),
// Options were undefined or null
Ok(None) => (BpeOptions::default(), cx.argument::<JsFunction>(3)?),
// Options not specified, callback instead
Err(_) => (BpeOptions::default(), cx.argument::<JsFunction>(2)?),
};
let vocab = cx.extract::<String>(0)?;
let merges = cx.extract::<String>(1)?;
let mut builder = tk::models::bpe::BPE::from_file(&vocab, &merges);
builder = options.apply_to_bpe_builder(builder);
let task = BPEFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// bpe_empty()
fn bpe_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let bpe = tk::models::bpe::BPE::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(bpe.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct WordPieceOptions {
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
max_input_chars_per_word: Option<usize>,
}
impl WordPieceOptions {
fn apply_to_wordpiece_builder(self, mut builder: WordPieceBuilder) -> WordPieceBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
if let Some(prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(prefix);
}
if let Some(max) = self.max_input_chars_per_word {
builder = builder.max_input_chars_per_word(max);
}
builder
}
}
/// wordpiece_init(vocab: {[token: string]: number}, options: {
/// unkToken?: string = "[UNK]",
/// maxInputCharsPerWord?: number = 100,
/// continuingSubwordPrefix?: "##",
/// })
fn wordpiece_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<HashMap<String, u32>>(0)?;
let options = cx.extract_opt::<WordPieceOptions>(1)?.unwrap_or_default();
let mut builder = tk::models::wordpiece::WordPiece::builder().vocab(vocab);
builder = options.apply_to_wordpiece_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// wordpiece_from_file(vocab: string, options: {
/// unkToken?: string = "[UNK]",
/// maxInputCharsPerWord?: number = 100,
/// continuingSubwordPrefix?: "##",
/// }, callback)
fn wordpiece_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<WordPieceOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(2)?),
// Options were undefined or null
Ok(None) => (WordPieceOptions::default(), cx.argument::<JsFunction>(2)?),
// Options not specified, callback instead
Err(_) => (WordPieceOptions::default(), cx.argument::<JsFunction>(1)?),
};
let vocab = cx.extract::<String>(0)?;
let mut builder = tk::models::wordpiece::WordPiece::from_file(&vocab);
builder = options.apply_to_wordpiece_builder(builder);
let task = WordPieceFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// wordpiece_empty()
fn wordpiece_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let wordpiece = tk::models::wordpiece::WordPiece::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordpiece.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct WordLevelOptions {
unk_token: Option<String>,
}
impl WordLevelOptions {
fn apply_to_wordlevel_builder(self, mut builder: WordLevelBuilder) -> WordLevelBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
builder
}
}
/// wordlevel_init(vocab: {[token: string]: number}, options: {
/// unkToken?: String,
/// }, callback)
fn wordlevel_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<HashMap<String, u32>>(0)?;
let options = cx.extract_opt::<WordLevelOptions>(1)?.unwrap_or_default();
let mut builder = tk::models::wordlevel::WordLevel::builder().vocab(vocab);
builder = options.apply_to_wordlevel_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// wordlevel_from_file(vocab: string, options: {
/// unkToken?: String,
/// }, callback)
fn wordlevel_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<WordLevelOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(2)?),
// Options were undefined or null
Ok(None) => (WordLevelOptions::default(), cx.argument::<JsFunction>(2)?),
// Options not specified, callback instead
Err(_) => (WordLevelOptions::default(), cx.argument::<JsFunction>(1)?),
};
let vocab = cx.extract::<String>(0)?;
let mut builder = tk::models::wordlevel::WordLevel::builder().files(vocab);
builder = options.apply_to_wordlevel_builder(builder);
let task = WordLevelFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// wordlevel_empty()
fn wordlevel_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let wordlevel = tk::models::wordlevel::WordLevel::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordlevel.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct UnigramOptions {
unk_id: Option<usize>,
byte_fallback: Option<bool>,
}
/// unigram_init(vocab: [string, number][], options?: {
/// unkId?: number
/// })
fn unigram_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<Vec<(String, f64)>>(0)?;
let options = cx.extract_opt::<UnigramOptions>(1)?.unwrap_or_default();
let byte_fallback = options.byte_fallback.unwrap_or(false);
let unigram = tk::models::unigram::Unigram::from(vocab, options.unk_id, byte_fallback)
.map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(unigram.into())));
Ok(js_model)
}
/// unigram_empty()
fn unigram_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let unigram = tk::models::unigram::Unigram::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(unigram.into())));
Ok(model)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BPE_init", prefix), bpe_init)?;
m.export_function(&format!("{}_BPE_from_file", prefix), bpe_from_file)?;
m.export_function(&format!("{}_BPE_empty", prefix), bpe_empty)?;
m.export_function(&format!("{}_WordPiece_init", prefix), wordpiece_init)?;
m.export_function(
&format!("{}_WordPiece_from_file", prefix),
wordpiece_from_file,
)?;
m.export_function(&format!("{}_WordPiece_empty", prefix), wordpiece_empty)?;
m.export_function(&format!("{}_WordLevel_init", prefix), wordlevel_init)?;
m.export_function(
&format!("{}_WordLevel_from_file", prefix),
wordlevel_from_file,
)?;
m.export_function(&format!("{}_WordLevel_empty", prefix), wordlevel_empty)?;
m.export_function(&format!("{}_Unigram_init", prefix), unigram_init)?;
m.export_function(&format!("{}_Unigram_empty", prefix), unigram_empty)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/normalizers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use serde::{ser::SerializeStruct, Serialize, Serializer};
use std::sync::Arc;
use tk::normalizers::NormalizerWrapper;
use tk::NormalizedString;
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum JsNormalizerWrapper {
Sequence(Vec<Arc<NormalizerWrapper>>),
Wrapped(Arc<NormalizerWrapper>),
}
impl Serialize for JsNormalizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
JsNormalizerWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("normalizers", seq)?;
ser.end()
}
JsNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for JsNormalizerWrapper
where
I: Into<NormalizerWrapper>,
{
fn from(norm: I) -> Self {
JsNormalizerWrapper::Wrapped(Arc::new(norm.into()))
}
}
/// Normalizer
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Normalizer {
#[serde(flatten)]
pub normalizer: Option<JsNormalizerWrapper>,
}
impl tk::Normalizer for Normalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
match self.normalizer.as_ref().ok_or("Uninitialized Normalizer")? {
JsNormalizerWrapper::Sequence(seq) => {
for norm in seq {
norm.normalize(normalized)?;
}
}
JsNormalizerWrapper::Wrapped(norm) => norm.normalize(normalized)?,
};
Ok(())
}
}
declare_types! {
pub class JsNormalizer for Normalizer {
init(_) {
// This should not be called from JS
Ok(Normalizer { normalizer: None })
}
method normalizeString(mut cx) {
use tk::Normalizer;
let sequence = cx.extract::<String>(0)?;
let mut normalized = NormalizedString::from(sequence);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.normalize(&mut normalized)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(normalized.get()).upcast())
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct BertNormalizerOptions {
clean_text: bool,
handle_chinese_chars: bool,
strip_accents: Option<bool>,
lowercase: bool,
}
impl Default for BertNormalizerOptions {
fn default() -> Self {
Self {
clean_text: true,
handle_chinese_chars: true,
strip_accents: None,
lowercase: true,
}
}
}
/// bert_normalizer(options?: {
/// cleanText?: bool = true,
/// handleChineseChars?: bool = true,
/// stripAccents?: bool = true,
/// lowercase?: bool = true
/// })
fn bert_normalizer(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let options = cx
.extract_opt::<BertNormalizerOptions>(0)?
.unwrap_or_default();
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::bert::BertNormalizer::new(
options.clean_text,
options.handle_chinese_chars,
options.strip_accents,
options.lowercase,
)
.into(),
);
Ok(normalizer)
}
/// nfd()
fn nfd(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFD.into());
Ok(normalizer)
}
/// nfkd()
fn nfkd(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFKD.into());
Ok(normalizer)
}
/// nfc()
fn nfc(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFC.into());
Ok(normalizer)
}
/// nfkc()
fn nfkc(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFKC.into());
Ok(normalizer)
}
/// strip(left?: boolean, right?: boolean)
fn strip(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let left = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let right = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer =
Some(tk::normalizers::strip::Strip::new(left, right).into());
Ok(normalizer)
}
/// prepend(prepend: string)
fn prepend(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let prepend: String = cx.extract::<String>(0)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer =
Some(tk::normalizers::prepend::Prepend::new(prepend).into());
Ok(normalizer)
}
/// strip_accents()
fn strip_accents(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::strip::StripAccents.into());
Ok(normalizer)
}
/// sequence(normalizers: Normalizer[])
fn sequence(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let normalizers = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(normalizers.len());
normalizers.into_iter().try_for_each(|normalizer| {
match normalizer.downcast::<JsNormalizer>().or_throw(&mut cx) {
Ok(normalizer) => {
let guard = cx.lock();
let normalizer = normalizer.borrow(&guard).normalizer.clone();
if let Some(normalizer) = normalizer {
match normalizer {
JsNormalizerWrapper::Sequence(seq) => sequence.extend(seq),
JsNormalizerWrapper::Wrapped(inner) => sequence.push(inner),
}
Ok(())
} else {
cx.throw_error("Uninitialized Normalizer")
}
}
Err(e) => Err(e),
}
})?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(JsNormalizerWrapper::Sequence(sequence));
Ok(normalizer)
}
/// lowercase()
fn lowercase(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::utils::Lowercase.into());
Ok(normalizer)
}
/// replace()
fn replace(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let pattern: String = cx.extract::<String>(0)?;
let content: String = cx.extract::<String>(1)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(normalizer)
}
/// nmt()
fn nmt(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::Nmt.into());
Ok(normalizer)
}
/// precompiled()
fn precompiled(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let bytes = cx.extract::<Vec<u8>>(0)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::precompiled::Precompiled::from(&bytes)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(normalizer)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BertNormalizer", prefix), bert_normalizer)?;
m.export_function(&format!("{}_NFD", prefix), nfd)?;
m.export_function(&format!("{}_NFKD", prefix), nfkd)?;
m.export_function(&format!("{}_NFC", prefix), nfc)?;
m.export_function(&format!("{}_NFKC", prefix), nfkc)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
m.export_function(&format!("{}_Lowercase", prefix), lowercase)?;
m.export_function(&format!("{}_Strip", prefix), strip)?;
m.export_function(&format!("{}_Prepend", prefix), prepend)?;
m.export_function(&format!("{}_StripAccents", prefix), strip_accents)?;
m.export_function(&format!("{}_Nmt", prefix), nmt)?;
m.export_function(&format!("{}_Precompiled", prefix), precompiled)?;
m.export_function(&format!("{}_Replace", prefix), replace)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use tk::normalizers::unicode::{NFC, NFKC};
use tk::normalizers::utils::Sequence;
use tk::normalizers::NormalizerWrapper;
#[test]
fn serialize() {
let js_wrapped: JsNormalizerWrapper = NFKC.into();
let js_ser = serde_json::to_string(&js_wrapped).unwrap();
let rs_wrapped = NormalizerWrapper::NFKC(NFKC);
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_ser, rs_ser);
let js_norm: Normalizer = serde_json::from_str(&rs_ser).unwrap();
match js_norm.normalizer.unwrap() {
JsNormalizerWrapper::Wrapped(nfc) => match nfc.as_ref() {
NormalizerWrapper::NFKC(_) => {}
_ => panic!("Expected NFKC"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let js_seq: JsNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into();
let js_wrapper_ser = serde_json::to_string(&js_seq).unwrap();
let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
let js_seq = Normalizer {
normalizer: Some(js_seq),
};
let js_ser = serde_json::to_string(&js_seq).unwrap();
assert_eq!(js_wrapper_ser, js_ser);
let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]);
let rs_ser = serde_json::to_string(&rs_seq).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/mod.rs | pub mod models;
pub mod tokenizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/tokenizer.rs | extern crate tokenizers as tk;
use crate::encoding::*;
use crate::tokenizer::Tokenizer;
use neon::prelude::*;
use tk::tokenizer::{EncodeInput, Encoding};
pub enum EncodeTask<'s> {
Single(Tokenizer, Option<EncodeInput<'s>>, bool),
Batch(Tokenizer, Option<Vec<EncodeInput<'s>>>, bool),
}
pub enum EncodeOutput {
Single(Box<Encoding>),
Batch(Vec<Encoding>),
}
impl Task for EncodeTask<'static> {
type Output = EncodeOutput;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
match self {
EncodeTask::Single(worker, input, add_special_tokens) => {
let mut input: Option<EncodeInput> =
unsafe { std::ptr::replace(input as *const _ as *mut _, None) };
worker
.tokenizer
.read()
.unwrap()
.encode_char_offsets(
input.take().ok_or("No provided input")?,
*add_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(|item| EncodeOutput::Single(Box::new(item)))
}
EncodeTask::Batch(worker, input, add_special_tokens) => {
let mut input: Option<Vec<EncodeInput>> =
unsafe { std::ptr::replace(input as *const _ as *mut _, None) };
worker
.tokenizer
.read()
.unwrap()
.encode_batch_char_offsets(
input.take().ok_or("No provided input")?,
*add_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(EncodeOutput::Batch)
}
}
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
match result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())? {
EncodeOutput::Single(encoding) => {
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the actual encoding
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(*encoding);
Ok(js_encoding.upcast())
}
EncodeOutput::Batch(encodings) => {
let result = JsArray::new(&mut cx, encodings.len() as u32);
for (i, encoding) in encodings.into_iter().enumerate() {
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the actual encoding
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(encoding);
result.set(&mut cx, i as u32, js_encoding)?;
}
Ok(result.upcast())
}
}
}
}
pub enum DecodeTask {
Single(Tokenizer, Vec<u32>, bool),
Batch(Tokenizer, Vec<Vec<u32>>, bool),
}
pub enum DecodeOutput {
Single(String),
Batch(Vec<String>),
}
impl Task for DecodeTask {
type Output = DecodeOutput;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
match self {
DecodeTask::Single(worker, ids, skip_special_tokens) => worker
.tokenizer
.read()
.unwrap()
.decode(ids.as_slice(), *skip_special_tokens)
.map_err(|e| format!("{}", e))
.map(DecodeOutput::Single),
DecodeTask::Batch(worker, ids, skip_special_tokens) => worker
.tokenizer
.read()
.unwrap()
.decode_batch(
&ids.iter().map(|v| v.as_slice()).collect::<Vec<&[u32]>>(),
*skip_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(DecodeOutput::Batch),
}
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
match result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())? {
DecodeOutput::Single(string) => Ok(cx.string(string).upcast()),
DecodeOutput::Batch(strings) => {
let result = JsArray::new(&mut cx, strings.len() as u32);
for (i, string) in strings.into_iter().enumerate() {
let js_string = cx.string(string);
result.set(&mut cx, i as u32, js_string)?;
}
Ok(result.upcast())
}
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/models.rs | extern crate tokenizers as tk;
use crate::models::*;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::models::bpe::{BpeBuilder, BPE};
use tk::models::wordlevel::{WordLevel, WordLevelBuilder};
use tk::models::wordpiece::{WordPiece, WordPieceBuilder};
pub struct WordPieceFromFilesTask(Option<WordPieceBuilder>);
impl WordPieceFromFilesTask {
pub fn new(builder: WordPieceBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for WordPieceFromFilesTask {
type Output = WordPiece;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<WordPieceBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let wordpiece = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordpiece.into())));
Ok(js_model.upcast())
}
}
pub struct WordLevelFromFilesTask(Option<WordLevelBuilder>);
impl WordLevelFromFilesTask {
pub fn new(builder: WordLevelBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for WordLevelFromFilesTask {
type Output = WordLevel;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<WordLevelBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let wordlevel = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordlevel.into())));
Ok(js_model.upcast())
}
}
pub struct BPEFromFilesTask(Option<BpeBuilder>);
impl BPEFromFilesTask {
pub fn new(builder: BpeBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for BPEFromFilesTask {
type Output = BPE;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<BpeBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let bpe = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(bpe.into())));
Ok(js_model.upcast())
}
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- [#1096] Python 3.11 support
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#956] PyO3 version upgrade
- [#1055] M1 automated builds
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#962] Fix tests for python 3.10
- [#961] Added link for Ruby port of `tokenizers`
## [0.11.6]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.5]
- [#895] Build `python 3.10` wheels.
## [0.11.4]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.3]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#585] Conda version should now work on old CentOS
- [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`.
- [#851] Doc links
### Added
- [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor
- [#845]: Documentation for `Decoders`.
### Changed
- [#850]: Added a feature gate to enable disabling `http` features
- [#718]: Fix `WordLevel` tokenizer determinism during training
- [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer`
- [#770]: Improved documentation for `UnigramTrainer`
- [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub
- [#793]: Saving a pretty JSON file by default when saving a tokenizer
## [0.10.3]
### Fixed
- [#686]: Fix SPM conversion process for whitespace deduplication
- [#707]: Fix stripping strings containing Unicode characters
### Added
- [#693]: Add a CTC Decoder for Wave2Vec models
### Removed
- [#714]: Removed support for Python 3.5
## [0.10.2]
### Fixed
- [#652]: Fix offsets for `Precompiled` corner case
- [#656]: Fix BPE `continuing_subword_prefix`
- [#674]: Fix `Metaspace` serialization problems
## [0.10.1]
### Fixed
- [#616]: Fix SentencePiece tokenizers conversion
- [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM)
- [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut`
- [#620]: Fix serialization/deserialization for overlapping models
- [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`)
## [0.10.0]
### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern
- [#544]: Ability to train from memory. This also improves the integration with `datasets`
- [#590]: Add getters/setters for components on BaseTokenizer
- [#574]: Add `fust_unk` option to SentencePieceBPETokenizer
### Changed
- [#509]: Automatically stubbing the `.pyi` files
- [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()`
- [#530]: The various attributes on each component can be get/set (ie.
`tokenizer.model.dropout = 0.1`)
- [#538]: The API Reference has been improved and is now up-to-date.
### Fixed
- [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were
forcing to reload the `Model` after a training.
- [#539]: Fix `BaseTokenizer` enable_truncation docstring
## [0.9.4]
### Fixed
- [#492]: Fix `from_file` on `BertWordPieceTokenizer`
- [#498]: Fix the link to download `sentencepiece_model_pb2.py`
- [#500]: Fix a typo in the docs quicktour
### Changed
- [#506]: Improve Encoding mappings for pairs of sequence
## [0.9.3]
### Fixed
- [#470]: Fix hanging error when training with custom component
- [#476]: TemplateProcessing serialization is now deterministic
- [#481]: Fix SentencePieceBPETokenizer.from_files
### Added
- [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts
- [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly
## [0.9.2]
### Fixed
- [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing
## [0.9.1]
### Fixed
- [#459]: Fix a problem with deserialization
## [0.9.0]
### Fixed
- [#362]: Fix training deadlock with Python components.
- [#363]: Fix a crash when calling `.train` with some non-existent files
- [#355]: Remove a lot of possible crashes
- [#389]: Improve truncation (crash and consistency)
### Added
- [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays
- [#292]: Support for the Unigram algorithm
- [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer
- [#403]: Add `TemplateProcessing` `PostProcessor`.
- [#420]: Ability to fuse the "unk" token in BPE.
### Changed
- [#360]: Lots of improvements related to words/alignment tracking
- [#426]: Improvements on error messages thanks to PyO3 0.12
## [0.8.1]
### Fixed
- [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly
### Changed
- [#329]: Improved warning and behavior when we detect a fork
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
## [0.8.0]
### Highlights of this release
- We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when
processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps
while applying labels to each word.
- Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later
load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code.
- With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components,
Encodings, everything can be pickled!
- Training a tokenizer is now even faster (up to 5-10x) than before!
- Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library
makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization,
this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to
disable the parallelism, and will warn you if this is necessary.
- And a lot of other improvements, and fixes.
### Fixed
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
### Added
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...).
This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`).
- [#273]: `Tokenizer` and its parts are now pickable
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with
`enable_padding(pad_to_multiple_of=8)` for example.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork`
start method, which happens to be the default on Linux systems. Without disabling the parallelism,
the process dead-locks while encoding. (Cf [#187] for more information)
### Changed
- Improved errors generated during truncation: When the provided max length is too low are
now handled properly.
- [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized,
the argument `is_pretokenized=True` must be specified.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
## [0.7.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we
should add the special tokens. This is activated by default.
- [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by
`encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint.
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore.
- The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using
`train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these
tokens.
- [#136]: Updated Pyo3 version
- [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using
constructors.
- [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default.
### Added
- [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated.
This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these
whitespaces are part of the actual token.
It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`).
- [#236]: `RobertaProcessing` also handles trimming the offsets.
- [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char`
or `word` (input space) and `token` (output space).
- `post_process` can be called on the `Tokenizer`
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with
`get_vocab(with_added_tokens: bool)`
- [#136] Models can now be instantiated through object constructors.
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space=True`
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if
not advised, but that's not the question).
- [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer`
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are
using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`).
- `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or
`encode_batch`
- Access to the `original_str` on the `Encoding` has been removed. The original string is the input
of `encode` so it didn't make sense to keep it here.
- No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They
are now relative to the original string by default.
- Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling
`normalize(sequence)` on the `Tokenizer`
- Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take
the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`)
- If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set
`bert_normalizer=False` and `split_on_whitespace_only=True`.
## [0.6.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#160]: Some default tokens were missing from `BertWordPieceTokenizer`
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes.
- [#174]: The `longest_first` truncation strategy had a bug
## [0.5.2]
- [#163]: Do not open all files directly while training
### Fixed
- We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file
was named `vocab.json`. This is now fixed.
- The `WordLevel` model was also saving its vocabulary to the wrong format.
## [0.5.1]
### Changed
- `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not
specified, the files get a more generic naming, like `vocab.json` or `merges.txt`.
## [0.5.0]
### Changed
- [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding
- [#149]: `ByteLevelBPETokenizer` now has `dropout`.
- `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers.
(Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`)
- [#139]: Expose `__len__` on `Encoding`
- Improved padding performances.
### Added
- Added a new `Strip` normalizer
### Fixed
- [#145]: Decoding was buggy on `BertWordPieceTokenizer`.
- [#152]: Some documentation and examples were still using the old `BPETokenizer`
### How to migrate
- Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of
`do_lowercase`.
## [0.4.2]
### Fixed
- [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from
being trained.
## [0.4.1]
### Fixed
- [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer
## [0.4.0]
### Changed
- [#131]: Replaced all .new() class methods by a proper __new__ implementation
- Improved typings
### How to migrate
- Remove all `.new` on all classe instanciations
## [0.3.0]
### Changed
- BPETokenizer has been renamed to CharBPETokenizer for clarity.
- Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets
truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language
model, just as the main `Encoding`.
- Provide mapping to the original string offsets using:
```
output = tokenizer.encode(...)
print(output.original_str.offsets(output.offsets[3]))
```
- [#99]: Exposed the vocabulary size on all tokenizers
### Added
- Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given
delimiter (Works like `.split(delimiter)`)
- Added `WordLevel`: a new model that simply maps `tokens` to their `ids`.
### Fixed
- Fix a bug with IndexableString
- Fix a bug with truncation
### How to migrate
- Rename `BPETokenizer` to `CharBPETokenizer`
- `Encoding.overflowing` is now a List instead of a `Optional[Encoding]`
## [0.2.1]
### Fixed
- Fix a bug with the IDs associated with added tokens.
- Fix a bug that was causing crashes in Python 3.5
[#1096]: https://github.com/huggingface/tokenizers/pull/1096
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#895]: https://github.com/huggingface/tokenizers/pull/895
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#850]: https://github.com/huggingface/tokenizers/pull/850
[#844]: https://github.com/huggingface/tokenizers/pull/844
[#845]: https://github.com/huggingface/tokenizers/pull/845
[#851]: https://github.com/huggingface/tokenizers/pull/851
[#585]: https://github.com/huggingface/tokenizers/pull/585
[#793]: https://github.com/huggingface/tokenizers/pull/793
[#780]: https://github.com/huggingface/tokenizers/pull/780
[#770]: https://github.com/huggingface/tokenizers/pull/770
[#762]: https://github.com/huggingface/tokenizers/pull/762
[#718]: https://github.com/huggingface/tokenizers/pull/718
[#714]: https://github.com/huggingface/tokenizers/pull/714
[#707]: https://github.com/huggingface/tokenizers/pull/707
[#693]: https://github.com/huggingface/tokenizers/pull/693
[#686]: https://github.com/huggingface/tokenizers/pull/686
[#674]: https://github.com/huggingface/tokenizers/pull/674
[#657]: https://github.com/huggingface/tokenizers/pull/657
[#656]: https://github.com/huggingface/tokenizers/pull/656
[#652]: https://github.com/huggingface/tokenizers/pull/652
[#621]: https://github.com/huggingface/tokenizers/pull/621
[#620]: https://github.com/huggingface/tokenizers/pull/620
[#618]: https://github.com/huggingface/tokenizers/pull/618
[#617]: https://github.com/huggingface/tokenizers/pull/617
[#616]: https://github.com/huggingface/tokenizers/pull/616
[#590]: https://github.com/huggingface/tokenizers/pull/590
[#574]: https://github.com/huggingface/tokenizers/pull/574
[#544]: https://github.com/huggingface/tokenizers/pull/544
[#542]: https://github.com/huggingface/tokenizers/pull/542
[#539]: https://github.com/huggingface/tokenizers/pull/539
[#538]: https://github.com/huggingface/tokenizers/pull/538
[#533]: https://github.com/huggingface/tokenizers/pull/533
[#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498
[#492]: https://github.com/huggingface/tokenizers/pull/492
[#481]: https://github.com/huggingface/tokenizers/pull/481
[#480]: https://github.com/huggingface/tokenizers/pull/480
[#477]: https://github.com/huggingface/tokenizers/pull/477
[#476]: https://github.com/huggingface/tokenizers/pull/476
[#470]: https://github.com/huggingface/tokenizers/pull/470
[#464]: https://github.com/huggingface/tokenizers/pull/464
[#459]: https://github.com/huggingface/tokenizers/pull/459
[#420]: https://github.com/huggingface/tokenizers/pull/420
[#417]: https://github.com/huggingface/tokenizers/pull/417
[#416]: https://github.com/huggingface/tokenizers/pull/416
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#394]: https://github.com/huggingface/tokenizers/pull/394
[#389]: https://github.com/huggingface/tokenizers/pull/389
[#379]: https://github.com/huggingface/tokenizers/pull/379
[#378]: https://github.com/huggingface/tokenizers/pull/378
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#362]: https://github.com/huggingface/tokenizers/pull/362
[#360]: https://github.com/huggingface/tokenizers/pull/360
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#333]: https://github.com/huggingface/tokenizers/pull/333
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#329]: https://github.com/huggingface/tokenizers/pull/329
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#292]: https://github.com/huggingface/tokenizers/pull/292
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#273]: https://github.com/huggingface/tokenizers/pull/273
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[#239]: https://github.com/huggingface/tokenizers/pull/239
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#187]: https://github.com/huggingface/tokenizers/issues/187
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#160]: https://github.com/huggingface/tokenizers/issues/160
[#156]: https://github.com/huggingface/tokenizers/pull/156
[#152]: https://github.com/huggingface/tokenizers/issues/152
[#149]: https://github.com/huggingface/tokenizers/issues/149
[#145]: https://github.com/huggingface/tokenizers/issues/145
[#139]: https://github.com/huggingface/tokenizers/issues/139
[#137]: https://github.com/huggingface/tokenizers/issues/137
[#134]: https://github.com/huggingface/tokenizers/issues/134
[#131]: https://github.com/huggingface/tokenizers/issues/131
[#99]: https://github.com/huggingface/tokenizers/pull/99
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install setuptools_rust
python setup.py install
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/rust-toolchain | stable
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/tokenizers_manual_m1_build.sh | #! /bin/bash
for VARIABLE in "3.7.12" "3.8.12" "3.9.10" "3.10.2"
do
MACOSX_DEPLOYMENT_TARGET=10.11 SDKROOT="/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk" CFLAGS="-I/usr/include/openssl -I/usr/local/opt/readline/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include" CPPFLAGS="-I/usr/local/opt/zlib/include" LDFLAGS="-L/usr/lib -L/usr/local/opt/readline/lib" pyenv install $VARIABLE
~/.pyenv/versions/$VARIABLE/bin/pip install setuptools wheel setuptools-rust==0.11.3 --ignore-installed --force-reinstall
MACOSX_DEPLOYMENT_TARGET=10.11 ~/.pyenv/versions/$VARIABLE/bin/python setup.py bdist_wheel
done
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.py | from setuptools import setup
from setuptools_rust import Binding, RustExtension
extras = {}
extras["testing"] = ["pytest", "requests", "numpy", "datasets", "black==22.3"]
extras["docs"] = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"]
extras["dev"] = extras["testing"]
setup(
name="tokenizers",
version="0.13.3",
description="Fast and Customizable Tokenizers",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP tokenizer BPE transformer deep learning",
author="Anthony MOI",
author_email="[email protected]",
url="https://github.com/huggingface/tokenizers",
license="Apache License 2.0",
rust_extensions=[RustExtension("tokenizers.tokenizers", binding=Binding.PyO3, debug=False)],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
package_dir={"": "py_src"},
packages=[
"tokenizers",
"tokenizers.models",
"tokenizers.decoders",
"tokenizers.normalizers",
"tokenizers.pre_tokenizers",
"tokenizers.processors",
"tokenizers.trainers",
"tokenizers.implementations",
"tokenizers.tools",
],
package_data={
"tokenizers": ["py.typed", "__init__.pyi"],
"tokenizers.models": ["py.typed", "__init__.pyi"],
"tokenizers.decoders": ["py.typed", "__init__.pyi"],
"tokenizers.normalizers": ["py.typed", "__init__.pyi"],
"tokenizers.pre_tokenizers": ["py.typed", "__init__.pyi"],
"tokenizers.processors": ["py.typed", "__init__.pyi"],
"tokenizers.trainers": ["py.typed", "__init__.pyi"],
"tokenizers.implementations": ["py.typed"],
"tokenizers.tools": ["py.typed", "visualizer-styles.css"],
},
zip_safe=False,
)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/pyproject.toml | [build-system]
requires = ["setuptools", "wheel", "setuptools-rust"]
build-backend = "setuptools.build_meta"
[tool.black]
target-version = ['py35']
line-length = 119
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/conftest.py | import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.cfg | [isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/stub.py | import argparse
import inspect
import os
from pathlib import Path
import black
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for (name, fn) in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesing to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
def do_black(content, is_pyi):
mode = black.Mode(
target_versions={black.TargetVersion.PY35},
line_length=119,
is_pyi=is_pyi,
string_normalization=True,
experimental_string_processing=False,
)
try:
return black.format_file_contents(content, fast=True, mode=mode)
except black.NothingChanged:
return content
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_black(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_black(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Cargo.toml | [package]
name = "tokenizers-python"
version = "0.13.3"
authors = ["Anthony MOI <[email protected]>"]
edition = "2021"
[lib]
name = "tokenizers"
crate-type = ["cdylib"]
[dependencies]
rayon = "1.3"
serde = { version = "1.0", features = [ "rc", "derive" ]}
serde_json = "1.0"
libc = "0.2"
env_logger = "0.7.1"
pyo3 = "0.18.1"
numpy = "0.18.0"
ndarray = "0.13"
onig = { version = "6.0", default-features = false }
itertools = "0.9"
[dependencies.tokenizers]
version = "*"
path = "../../tokenizers"
[dev-dependencies]
tempfile = "3.1"
pyo3 = { version = "0.18.1", features = ["auto-initialize"] }
[features]
default = ["pyo3/extension-module"]
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/test.txt | <DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/MANIFEST.in | include Cargo.toml
include pyproject.toml
include rust-toolchain
include ../../LICENSE
recursive-include src *
recursive-include tokenizers-lib *
recursive-exclude tokenizers-lib/target *
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/build-sdist.sh | #!/bin/bash
set -ex
# Create a symlink for tokenizers-lib
ln -sf ../../tokenizers tokenizers-lib
# Modify cargo.toml to include this symlink
sed -i 's/\.\.\/\.\.\/tokenizers/\.\/tokenizers-lib/' Cargo.toml
# Build the source distribution
python setup.py sdist
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/build-wheels.sh | #!/bin/bash
set -ex
if ! command -v cargo &> /dev/null
then
curl https://sh.rustup.rs -sSf | sh -s -- -y
fi
export PATH="$HOME/.cargo/bin:$PATH"
# https://users.rust-lang.org/t/cargo-uses-too-much-memory-being-run-in-qemu/76531
echo -e "[net]\ngit-fetch-with-cli = true" > "$HOME/.cargo/config"
for PYBIN in /opt/python/cp{37,38,39,310,311}*/bin; do
export PYTHON_SYS_EXECUTABLE="$PYBIN/python"
"${PYBIN}/pip" install -U setuptools-rust setuptools wheel
"${PYBIN}/python" setup.py bdist_wheel
rm -rf build/*
done
for whl in ./dist/*.whl; do
auditwheel repair "$whl" -w dist/
done
# Keep only manylinux wheels
rm ./dist/*-linux_*
# Upload wheels
/opt/python/cp37-cp37m/bin/pip install -U awscli
/opt/python/cp37-cp37m/bin/python -m awscli s3 sync --exact-timestamps ./dist "s3://tokenizers-releases/python/$DIST_DIR"
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Makefile | .PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
check_dirs := examples py_src/tokenizers tests
# Format source code automatically
style:
python stub.py
black --line-length 119 --target-version py35 $(check_dirs)
# Check the source code is formatted correctly
check-style:
python stub.py --check
black --check --line-length 119 --target-version py35 examples py_src/tokenizers tests
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json
# Launch the test suite
test: $(TESTS_RESOURCES)
pip install pytest requests setuptools_rust numpy pyarrow datasets
python -m pytest -s -v tests
cargo test --no-default-features
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_bert_wordpiece.py | import argparse
import glob
from tokenizers import BertWordPieceTokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = BertWordPieceTokenizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
limit_alphabet=1000,
wordpieces_prefix="##",
)
# Save the files
tokenizer.save_model(args.out, args.name)
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/using_the_visualizer.ipynb | from tokenizers import BertWordPieceTokenizer
from tokenizers.tools import EncodingVisualizer
EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True)
visualizer = EncodingVisualizer(tokenizer=tokenizer)visualizer(text)from tokenizers.tools import Annotationanno1 = Annotation(start=0, end=2, label="foo")
anno2 = Annotation(start=2, end=4, label="bar")
anno3 = Annotation(start=6, end=8, label="poo")
anno4 = Annotation(start=9, end=12, label="shoe")
annotations=[
anno1,
anno2,
anno3,
anno4,
Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"),
Annotation(start=63, end=70, label="foo"),
Annotation(start=80, end=95, label="bar"),
Annotation(start=120, end=128, label="bar"),
Annotation(start=152, end=155, label="poo"),
]
visualizer(text,annotations=annotations)funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)]
funnyAnnotationsconverter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag'])
visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)visualizer(text, annotations=funnyAnnotations)from tokenizers import ByteLevelBPETokenizer
roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt')
roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True)
roberta_visualizer(text, annotations=annotations) | 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/custom_components.py | from typing import List
import jieba
from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import BPE
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
class JiebaPreTokenizer:
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# we need to call `str(normalized_string)` because jieba expects a str,
# not a NormalizedString
for token, start, stop in jieba.tokenize(str(normalized_string)):
splits.append(normalized_string[start:stop])
return splits
# We can also easily do it in one line:
# return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))]
def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
# Just an odd example...
splits = []
last = 0
for i, char in enumerate(str(normalized_string)):
if char.isnumeric() and int(char) % 2 == 1:
splits.append(normalized_string[last:i])
last = i
# Don't forget the last one
splits.append(normalized_string[last:])
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
# Let's call split on the PreTokenizedString to split using `self.jieba_split`
pretok.split(self.jieba_split)
# Here we can call `pretok.split` multiple times if we want to apply
# different algorithm, but we generally just need to call it once.
pretok.split(self.odd_number_split)
class CustomDecoder:
def decode(self, tokens: List[str]) -> str:
return "".join(tokens)
class CustomNormalizer:
def normalize(self, normalized: NormalizedString):
# Most of these can be replaced by a `Sequence` combining some provided Normalizer,
# (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ])
# and it should be the prefered way. That being said, here is an example of the kind
# of things that can be done here:
normalized.nfkc()
normalized.filter(lambda char: not char.isnumeric())
normalized.replace(Regex("\s+"), " ")
normalized.lowercase()
# This section shows how to attach these custom components to the Tokenizer
tok = Tokenizer(BPE())
tok.normalizer = Normalizer.custom(CustomNormalizer())
tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer())
tok.decoder = Decoder.custom(CustomDecoder())
input = "永和服装饰品有限公司"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('永和', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公司', (6, 10))]
input = "112233"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))]
input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!"
print("Normalize:", input)
print(tok.normalizer.normalize_str(input))
# " hello there my dear dear friend!"
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/example.py | import argparse
import logging
import time
from tqdm import tqdm
logging.getLogger("transformers").disabled = True
logging.getLogger("transformers.tokenization_utils").disabled = True
from tokenizers import Tokenizer, decoders, pre_tokenizers
from tokenizers.models import BPE, WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.processors import BertProcessing
from transformers import BertTokenizer, GPT2Tokenizer
parser = argparse.ArgumentParser()
parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)")
parser.add_argument("--file", default=None, type=str, help="The file to encode")
parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file")
parser.add_argument("--merges", default=None, type=str, help="The merges.txt file")
parser.add_argument("--debug", action="store_true", help="Verbose output")
args = parser.parse_args()
if args.type == "gpt2" and args.merges is None:
raise Exception("Expected merges.txt file")
if args.file is not None:
with open(args.file, "r") as fp:
text = [line.strip() for line in fp]
else:
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
""".split(
"\n"
)
if args.type == "gpt2":
print("Running GPT-2 tokenizer")
tok_p = GPT2Tokenizer.from_pretrained("gpt2")
# Create a Tokenizer using BPE
tok_r = Tokenizer(BPE(args.vocab, args.merges))
# Use ByteLevel PreTokenizer
tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
# Use ByteLevel Decoder
tok_r.decoder = decoders.ByteLevel()
elif args.type == "bert":
print("Running Bert tokenizer")
tok_p = BertTokenizer.from_pretrained(args.vocab)
tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100))
tok_r.normalizer = BertNormalizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# tok_r.pre_tokenizer = pre_tokenizers.Whitespace()
tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tok_r.decoder = decoders.WordPiece()
tok_r.post_processor = BertProcessing(
("[SEP]", tok_r.token_to_id("[SEP]")),
("[CLS]", tok_r.token_to_id("[CLS]")),
)
else:
raise Exception(f"Unknown type {args.type}")
def tokenize_r():
return tok_r.encode_batch(text)
def tokenize_p():
return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)]
print(f"Tokenizing {len(text)} lines")
# Rust version
start = time.time()
encoded_r = tokenize_r()
end = time.time()
time_r = end - start
print(f"Rust tokenizer took: {time_r} sec")
# Python version
start = time.time()
encoded_p = tokenize_p()
end = time.time()
time_p = end - start
print(f"Transformer tokenizer took: {time_p} sec")
print(f"SpeedUp Ratio: {time_p / time_r}")
ids_r = [sentence.ids for sentence in encoded_r]
diff_ids = 0
for i in range(0, len(encoded_r)):
if encoded_r[i].ids != encoded_p[i]:
diff_ids += 1
if args.debug:
print(encoded_r[i].ids)
print(encoded_p[i])
print(encoded_r[i].tokens)
print(tok_p.tokenize(text[i]))
print(text[i])
print("")
print(f"Ids differences: {diff_ids}")
decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False)
decoded_p = [tok_p.decode(en) for en in encoded_p]
diff_decoded = 0
for i in range(0, len(text)):
if decoded_r[i] != decoded_p[i]:
diff_decoded += 1
if args.debug:
print(f"Original: {text[i]}")
print(f"Rust: {decoded_r[i]}")
print(f"Python: {decoded_p[i]}")
print("")
print(f"Decoding differences: {diff_decoded}")
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_with_datasets.py | import datasets
from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers
# Build a tokenizer
bpe_tokenizer = Tokenizer(models.BPE())
bpe_tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
bpe_tokenizer.normalizer = normalizers.Lowercase()
# Initialize a dataset
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train")
# Build an iterator over this dataset
def batch_iterator():
batch_size = 1000
for batch in dataset.iter(batch_size=batch_size):
yield batch["text"]
# And finally train
bpe_tokenizer.train_from_iterator(batch_iterator(), length=len(dataset))
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_bytelevel_bpe.py | import argparse
import glob
from os.path import join
from tokenizers import ByteLevelBPETokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["<s>", "<pad>", "</s>"],
)
# Save the files
tokenizer.save_model(args.out, args.name)
# Restoring model from learned vocab/merges
tokenizer = ByteLevelBPETokenizer(
join(args.out, "{}-vocab.json".format(args.name)),
join(args.out, "{}-merges.txt".format(args.name)),
add_prefix_space=True,
)
# Test encoding
print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/tests/test_serialization.py | import json
import os
import unittest
import tqdm
from huggingface_hub import HfApi, cached_download, hf_hub_url
from tokenizers import Tokenizer
from .utils import albert_base, data_dir
class TestSerialization:
def test_full_serialization_albert(self, albert_base):
# Check we can read this file.
# This used to fail because of BufReader that would fail because the
# file exceeds the buffer capacity
tokenizer = Tokenizer.from_file(albert_base)
def check(tokenizer_file) -> bool:
with open(tokenizer_file, "r") as f:
data = json.load(f)
if "pre_tokenizer" not in data:
return True
if "type" not in data["pre_tokenizer"]:
return False
if data["pre_tokenizer"]["type"] == "Sequence":
for pre_tok in data["pre_tokenizer"]["pretokenizers"]:
if "type" not in pre_tok:
return False
return True
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if os.getenv("RUN_SLOW") != "1":
return unittest.skip("use `RUN_SLOW=1` to run")(test_case)
else:
return test_case
@slow
class TestFullDeserialization(unittest.TestCase):
def test_full_deserialization_hub(self):
# Check we can read this file.
# This used to fail because of BufReader that would fail because the
# file exceeds the buffer capacity
api = HfApi()
not_loadable = []
invalid_pre_tokenizer = []
# models = api.list_models(filter="transformers")
# for model in tqdm.tqdm(models):
# model_id = model.modelId
# for model_file in model.siblings:
# filename = model_file.rfilename
# if filename == "tokenizer.json":
# all_models.append((model_id, filename))
all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")]
for model_id, filename in tqdm.tqdm(all_models):
tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename))
is_ok = check(tokenizer_file)
if not is_ok:
print(f"{model_id} is affected by no type")
invalid_pre_tokenizer.append(model_id)
try:
Tokenizer.from_file(tokenizer_file)
except Exception as e:
print(f"{model_id} is not loadable: {e}")
not_loadable.append(model_id)
except:
print(f"{model_id} is not loadable: Rust error")
not_loadable.append(model_id)
self.assertEqual(invalid_pre_tokenizer, [])
self.assertEqual(not_loadable, [])
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/tests/utils.py | import multiprocessing as mp
import os
import pytest
import requests
DATA_PATH = os.path.join("tests", "data")
def download(url, with_filename=None):
filename = with_filename if with_filename is not None else url.rsplit("/")[-1]
filepath = os.path.join(DATA_PATH, filename)
if not os.path.exists(filepath):
with open(filepath, "wb") as f:
response = requests.get(url, stream=True)
response.raise_for_status()
for chunk in response.iter_content(1024):
f.write(chunk)
return filepath
@pytest.fixture(scope="session")
def data_dir():
assert os.getcwd().endswith("python")
exist = os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH)
if not exist:
os.mkdir(DATA_PATH)
@pytest.fixture(scope="session")
def roberta_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt"),
}
@pytest.fixture(scope="session")
def bert_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"),
}
@pytest.fixture(scope="session")
def openai_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"),
}
@pytest.fixture(scope="session")
def train_files(data_dir):
big = download("https://norvig.com/big.txt")
small = os.path.join(DATA_PATH, "small.txt")
with open(small, "w") as f:
with open(big, "r") as g:
for i, line in enumerate(g):
f.write(line)
if i > 100:
break
return {
"small": small,
"big": big,
}
@pytest.fixture(scope="session")
def albert_base(data_dir):
return download("https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json")
@pytest.fixture(scope="session")
def doc_wiki_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json",
"tokenizer-wiki.json",
)
@pytest.fixture(scope="session")
def doc_pipeline_bert_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json",
"bert-wiki.json",
)
# On MacOS Python 3.8+ the default was modified to `spawn`, we need `fork` in tests.
mp.set_start_method("fork")
def multiprocessing_with_parallelism(tokenizer, enabled: bool):
"""
This helper can be used to test that disabling parallelism avoids dead locks when the
same tokenizer is used after forking.
"""
# It's essential to this test that we call 'encode' or 'encode_batch'
# before the fork. This causes the main process to "lock" some resources
# provided by the Rust "rayon" crate that are needed for parallel processing.
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
def encode(tokenizer):
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
# Make sure this environment variable is set before the fork happens
os.environ["TOKENIZERS_PARALLELISM"] = str(enabled)
p = mp.Process(target=encode, args=(tokenizer,))
p.start()
p.join(timeout=1)
# At this point the process should have successfully exited, depending on whether parallelism
# was activated or not. So we check the status and kill it if needed
alive = p.is_alive()
if alive:
p.terminate()
assert (alive and mp.get_start_method() == "fork") == enabled
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_char_bpe.py | import pytest
from tokenizers import CharBPETokenizer
from ..utils import data_dir, multiprocessing_with_parallelism, openai_files
class TestCharBPETokenizer:
def test_basic_encode(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
assert output.tokens == [
"<unk>",
"y</w>",
"name</w>",
"is</w>",
"<unk>",
"o",
"hn</w>",
"pair</w>",
]
assert output.offsets == [
(0, 1),
(1, 2),
(3, 7),
(8, 10),
(11, 12),
(12, 13),
(13, 15),
(0, 4),
]
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
def test_lowercase(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True)
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
assert output.ids == [547, 1362, 544, 2476, 2688]
assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
assert output.type_ids == [0, 0, 0, 0, 1]
def test_decoding(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True)
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
assert decoded == "my name is john"
def test_multiprocessing_with_parallelism(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = CharBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["A</w>", "sentence</w>"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py | import pytest
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors
from tokenizers.implementations import BaseTokenizer
class TestBaseTokenizer:
def test_get_set_components(self):
toki = Tokenizer(models.BPE())
toki.normalizer = normalizers.NFC()
toki.pre_tokenizer = pre_tokenizers.ByteLevel()
toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1))
toki.decoder = decoders.ByteLevel()
tokenizer = BaseTokenizer(toki)
assert isinstance(tokenizer.model, models.BPE)
assert isinstance(tokenizer.normalizer, normalizers.NFC)
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel)
assert isinstance(tokenizer.post_processor, processors.BertProcessing)
assert isinstance(tokenizer.decoder, decoders.ByteLevel)
tokenizer.model = models.Unigram()
assert isinstance(tokenizer.model, models.Unigram)
tokenizer.normalizer = normalizers.NFD()
assert isinstance(tokenizer.normalizer, normalizers.NFD)
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace)
tokenizer.post_processor = processors.ByteLevel()
assert isinstance(tokenizer.post_processor, processors.ByteLevel)
tokenizer.decoder = decoders.WordPiece()
assert isinstance(tokenizer.decoder, decoders.WordPiece)
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_sentencepiece.py | import os
import pytest
from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer
class TestSentencePieceBPE:
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁sentence"]
class TestSentencePieceUnigram:
def test_train(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence 🤗")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_with_unk_token(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>")
output = tokenizer.encode("A sentence 🤗")
assert output.ids[-1] == 0
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence 🤗")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_from_iterator_with_unk_token(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>"
)
output = tokenizer.encode("A sentence 🤗")
assert output.ids[-1] == 0
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py | import pytest
from tokenizers import ByteLevelBPETokenizer
from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files
class TestByteLevelBPE:
def test_basic_encode(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"The",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_add_prefix_space(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
)
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"ĠThe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_lowerspace(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"],
roberta_files["merges"],
add_prefix_space=True,
lowercase=True,
)
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"Ġthe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
def test_multiprocessing_with_parallelism(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = ByteLevelBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["A", "Ġsentence"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py | import pytest
from tokenizers import BertWordPieceTokenizer
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism
class TestBertWordPieceTokenizer:
def test_basic_encode(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
# Encode with special tokens by default
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
assert output.tokens == [
"[CLS]",
"my",
"name",
"is",
"john",
"[SEP]",
"pair",
"[SEP]",
]
assert output.offsets == [
(0, 0),
(0, 2),
(3, 7),
(8, 10),
(11, 15),
(0, 0),
(0, 4),
(0, 0),
]
assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
# Can encode without the special tokens
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
assert output.ids == [2026, 2171, 2003, 2198, 3940]
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
assert output.type_ids == [0, 0, 0, 0, 1]
def test_multiprocessing_with_parallelism(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = BertWordPieceTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["a", "sentence"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py | import gzip
import os
import datasets
import pytest
from ..utils import data_dir, train_files
class TestTrainFromIterators:
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer_trainer
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
tokenizer = Tokenizer(models.Unigram())
tokenizer.normalizer = normalizers.NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel()
tokenizer.decoder = decoders.ByteLevel()
trainer = trainers.UnigramTrainer(
vocab_size=20000,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
special_tokens=["<PAD>", "<BOS>", "<EOS>"],
)
# END init_tokenizer_trainer
trainer.show_progress = False
return tokenizer, trainer
@staticmethod
def load_dummy_dataset():
# START load_dataset
import datasets
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation")
# END load_dataset
@pytest.fixture(scope="class")
def setup_gzip_files(self, train_files):
with open(train_files["small"], "rt") as small:
for n in range(3):
path = f"data/my-file.{n}.gz"
with gzip.open(path, "wt") as f:
f.write(small.read())
def test_train_basic(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# START train_basic
# First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/
data = [
"Beautiful is better than ugly."
"Explicit is better than implicit."
"Simple is better than complex."
"Complex is better than complicated."
"Flat is better than nested."
"Sparse is better than dense."
"Readability counts."
]
tokenizer.train_from_iterator(data, trainer=trainer)
# END train_basic
def test_datasets(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# In order to keep tests fast, we only use the first 100 examples
os.environ["TOKENIZERS_PARALLELISM"] = "true"
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]")
# START def_batch_iterator
def batch_iterator(batch_size=1000):
for i in range(0, len(dataset), batch_size):
yield dataset[i : i + batch_size]["text"]
# END def_batch_iterator
# START train_datasets
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset))
# END train_datasets
def test_gzip(self, setup_gzip_files):
tokenizer, trainer = self.get_tokenizer_trainer()
# START single_gzip
import gzip
with gzip.open("data/my-file.0.gz", "rt") as f:
tokenizer.train_from_iterator(f, trainer=trainer)
# END single_gzip
# START multi_gzip
files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"]
def gzip_iterator():
for path in files:
with gzip.open(path, "rt") as f:
for line in f:
yield line
tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer)
# END multi_gzip
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_quicktour.py | from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import BpeTrainer
from ..utils import data_dir, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestQuicktour:
# This method contains everything we don't want to run
@staticmethod
def slow_train():
tokenizer, trainer = TestQuicktour.get_tokenizer_trainer()
# START train
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
tokenizer.train(files, trainer)
# END train
# START save
tokenizer.save("data/tokenizer-wiki.json")
# END save
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
# END init_tokenizer
# START init_trainer
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
# END init_trainer
# START init_pretok
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
# END init_pretok
return tokenizer, trainer
def test_quicktour(self, doc_wiki_tokenizer):
def print(*args, **kwargs):
pass
try:
# START reload_tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START encode
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
# END encode
# START print_tokens
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
# END print_tokens
assert output.tokens == [
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
]
# START print_ids
print(output.ids)
# [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# END print_ids
assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# START print_offsets
print(output.offsets[9])
# (26, 27)
# END print_offsets
assert output.offsets[9] == (26, 27)
# START use_offsets
sentence = "Hello, y'all! How are you 😁 ?"
sentence[26:27]
# "😁"
# END use_offsets
assert sentence[26:27] == "😁"
# START check_sep
tokenizer.token_to_id("[SEP]")
# 2
# END check_sep
assert tokenizer.token_to_id("[SEP]") == 2
# START init_template_processing
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", tokenizer.token_to_id("[CLS]")),
("[SEP]", tokenizer.token_to_id("[SEP]")),
],
)
# END init_template_processing
# START print_special_tokens
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_special_tokens_pair
output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens_pair
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"[SEP]",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_type_ids
print(output.type_ids)
# [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# END print_type_ids
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# START encode_batch
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
# END encode_batch
# START encode_batch_pair
output = tokenizer.encode_batch(
[["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]]
)
# END encode_batch_pair
# START enable_padding
tokenizer.enable_padding(pad_id=3, pad_token="[PAD]")
# END enable_padding
# START print_batch_tokens
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
print(output[1].tokens)
# ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# END print_batch_tokens
assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# START print_attention_mask
print(output[1].attention_mask)
# [1, 1, 1, 1, 1, 1, 1, 0]
# END print_attention_mask
assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0]
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestQuicktour.slow_train()
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_pipeline.py | from tokenizers import Tokenizer
from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestPipeline:
def test_pipeline(self, doc_wiki_tokenizer):
try:
# START reload_tokenizer
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START setup_normalizer
from tokenizers import normalizers
from tokenizers.normalizers import NFD, StripAccents
normalizer = normalizers.Sequence([NFD(), StripAccents()])
# END setup_normalizer
# START test_normalizer
normalizer.normalize_str("Héllò hôw are ü?")
# "Hello how are u?"
# END test_normalizer
assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?"
# START replace_normalizer
tokenizer.normalizer = normalizer
# END replace_normalizer
# START setup_pre_tokenizer
from tokenizers.pre_tokenizers import Whitespace
pre_tokenizer = Whitespace()
pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.")
# [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)),
# ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)),
# (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))]
# END setup_pre_tokenizer
assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [
("Hello", (0, 5)),
("!", (5, 6)),
("How", (7, 10)),
("are", (11, 14)),
("you", (15, 18)),
("?", (18, 19)),
("I", (20, 21)),
("'", (21, 22)),
("m", (22, 23)),
("fine", (24, 28)),
(",", (28, 29)),
("thank", (30, 35)),
("you", (36, 39)),
(".", (39, 40)),
]
# START combine_pre_tokenizer
from tokenizers import pre_tokenizers
from tokenizers.pre_tokenizers import Digits
pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)])
pre_tokenizer.pre_tokenize_str("Call 911!")
# [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))]
# END combine_pre_tokenizer
assert pre_tokenizer.pre_tokenize_str("Call 911!") == [
("Call", (0, 4)),
("9", (5, 6)),
("1", (6, 7)),
("1", (7, 8)),
("!", (8, 9)),
]
# START replace_pre_tokenizer
tokenizer.pre_tokenizer = pre_tokenizer
# END replace_pre_tokenizer
# START setup_processor
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 2)],
)
# END setup_processor
# START test_decoding
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.ids)
# [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2])
# "Hello , y ' all ! How are you ?"
# END test_decoding
assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
assert (
tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2])
== "Hello , y ' all ! How are you ?"
)
@staticmethod
def slow_train():
# START bert_setup_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
# END bert_setup_tokenizer
# START bert_setup_normalizer
from tokenizers import normalizers
from tokenizers.normalizers import NFD, Lowercase, StripAccents
bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()])
# END bert_setup_normalizer
# START bert_setup_pre_tokenizer
from tokenizers.pre_tokenizers import Whitespace
bert_tokenizer.pre_tokenizer = Whitespace()
# END bert_setup_pre_tokenizer
# START bert_setup_processor
from tokenizers.processors import TemplateProcessing
bert_tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", 1),
("[SEP]", 2),
],
)
# END bert_setup_processor
# START bert_train_tokenizer
from tokenizers.trainers import WordPieceTrainer
trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
bert_tokenizer.train(files, trainer)
bert_tokenizer.save("data/bert-wiki.json")
# END bert_train_tokenizer
def test_bert_example(self, doc_pipeline_bert_tokenizer):
try:
bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json")
except Exception:
bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer)
# START bert_test_decoding
output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.")
print(output.tokens)
# ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"]
bert_tokenizer.decode(output.ids)
# "welcome to the tok ##eni ##zer ##s library ."
# END bert_test_decoding
assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ."
# START bert_proper_decoding
from tokenizers import decoders
bert_tokenizer.decoder = decoders.WordPiece()
bert_tokenizer.decode(output.ids)
# "welcome to the tokenizers library."
# END bert_proper_decoding
assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library."
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestPipeline.slow_train()
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_trainers.py | import copy
import os
import pickle
import pytest
from tokenizers import (
AddedToken,
SentencePieceUnigramTokenizer,
Tokenizer,
models,
normalizers,
pre_tokenizers,
trainers,
)
from ..utils import data_dir, train_files
class TestBpeTrainer:
def test_can_modify(self):
trainer = trainers.BpeTrainer(
vocab_size=12345,
min_frequency=12,
show_progress=False,
special_tokens=["1", "2"],
limit_alphabet=13,
initial_alphabet=["a", "b", "c"],
continuing_subword_prefix="pref",
end_of_word_suffix="suf",
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1"),
AddedToken("2"),
]
assert trainer.limit_alphabet == 13
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
assert trainer.continuing_subword_prefix == "pref"
assert trainer.end_of_word_suffix == "suf"
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.limit_alphabet = None
assert trainer.limit_alphabet == None
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
trainer.continuing_subword_prefix = None
assert trainer.continuing_subword_prefix == None
trainer.end_of_word_suffix = None
assert trainer.continuing_subword_prefix == None
def test_can_pickle(self):
assert (
trainers.BpeTrainer(min_frequency=12).__getstate__()
== b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}"""
)
assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer)
assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer)
# Make sure everything is correct
assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps(
trainers.BpeTrainer(min_frequency=12)
)
class TestWordPieceTrainer:
def test_can_modify(self):
trainer = trainers.WordPieceTrainer(
vocab_size=12345,
min_frequency=12,
show_progress=False,
special_tokens=["1", "2"],
limit_alphabet=13,
initial_alphabet=["a", "b", "c"],
continuing_subword_prefix="pref",
end_of_word_suffix="suf",
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1"),
AddedToken("2"),
]
assert trainer.limit_alphabet == 13
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
assert trainer.continuing_subword_prefix == "pref"
assert trainer.end_of_word_suffix == "suf"
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.limit_alphabet = None
assert trainer.limit_alphabet == None
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
trainer.continuing_subword_prefix = None
assert trainer.continuing_subword_prefix == None
trainer.end_of_word_suffix = None
assert trainer.continuing_subword_prefix == None
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer)
class TestWordLevelTrainer:
def test_can_modify(self):
trainer = trainers.WordLevelTrainer(
vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"]
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1"),
AddedToken("2"),
]
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer)
class TestUnigram:
def test_train(self, train_files):
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(train_files["small"], show_progress=False)
filename = "tests/data/unigram_trained.json"
tokenizer.save(filename)
os.remove(filename)
def test_train_parallelism_with_custom_pretokenizer(self, train_files):
class GoodCustomPretok:
def split(self, n, normalized):
# Here we just test that we can return a List[NormalizedString], it
# does not really make sense to return twice the same otherwise
return [normalized, normalized]
def pre_tokenize(self, pretok):
pretok.split(self.split)
custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok())
bpe_tokenizer = Tokenizer(models.BPE())
bpe_tokenizer.normalizer = normalizers.Lowercase()
bpe_tokenizer.pre_tokenizer = custom
if "TOKENIZERS_PARALLELISM" in os.environ:
del os.environ["TOKENIZERS_PARALLELISM"]
trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False)
bpe_tokenizer.train([train_files["small"]], trainer=trainer)
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer)
def test_train_with_special_tokens(self):
filename = "tests/data/dummy-unigram-special_tokens-train.txt"
with open(filename, "w") as f:
f.write(
"""
[CLS] The Zen of Python, by Tim Peters [SEP]
[CLS] Beautiful is better than ugly. [SEP]
[CLS] Explicit is better than implicit. [SEP]
[CLS] Simple is better than complex. [SEP]
[CLS] Complex is better than complicated. [SEP]
[CLS] Flat is better than nested. [SEP]
[CLS] Sparse is better than dense. [SEP]
[CLS] Readability counts. [SEP]
[CLS] Special cases aren't special enough to break the rules. [SEP]
[CLS] Although practicality beats purity. [SEP]
[CLS] Errors should never pass silently. [SEP]
[CLS] Unless explicitly silenced. [SEP]
[CLS] In the face of ambiguity, refuse the temptation to guess. [SEP]
[CLS] There should be one-- and preferably only one --obvious way to do it. [SEP]
[CLS] Although that way may not be obvious at first unless you're Dutch. [SEP]
[CLS] Now is better than never. [SEP]
[CLS] Although never is often better than *right* now. [SEP]
[CLS] If the implementation is hard to explain, it's a bad idea. [SEP]
[CLS] If the implementation is easy to explain, it may be a good idea. [SEP]
[CLS] Namespaces are one honking great idea -- let's do more of those! [SEP]
"""
)
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]"
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [
"[CLS]",
" T",
"h",
"i",
"s",
" is ",
"a",
" ",
"te",
"s",
"t ",
"[SEP]",
]
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False,
special_tokens=["[PAD]", "[SEP]", "[CLS]"],
unk_token="[UNK]",
vocab_size=100,
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.get_vocab_size() == 100
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False,
special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"],
unk_token="[UNK]",
vocab_size=100,
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.get_vocab_size() == 100
def test_cannot_train_different_model(self):
tokenizer = Tokenizer(models.BPE())
trainer = trainers.UnigramTrainer(show_progress=False)
with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"):
tokenizer.train([], trainer)
def test_can_modify(self):
trainer = trainers.UnigramTrainer(
vocab_size=12345,
show_progress=False,
special_tokens=["1", AddedToken("2", lstrip=True)],
initial_alphabet=["a", "b", "c"],
)
assert trainer.vocab_size == 12345
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1", normalized=False),
AddedToken("2", lstrip=True, normalized=False),
]
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
def test_continuing_prefix_trainer_mistmatch(self):
UNK = "[UNK]"
special_tokens = [UNK]
tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##"))
trainer = trainers.BpeTrainer(special_tokens=special_tokens)
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)]
)
tokenizer.train(files=["data/big.txt"], trainer=trainer)
tokenizer.save("data/tokenizer.json")
tokenizer.from_file("data/tokenizer.json")
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_tokenizer.py | import pickle
import numpy as np
import pytest
from tokenizers import AddedToken, Encoding, Tokenizer
from tokenizers.implementations import BertWordPieceTokenizer
from tokenizers.models import BPE, Model, WordPiece, Unigram
from tokenizers.normalizers import Lowercase
from tokenizers.pre_tokenizers import ByteLevel
from tokenizers.processors import BertProcessing, RobertaProcessing
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files
class TestAddedToken:
def test_instantiate_with_content_only(self):
added_token = AddedToken("<mask>")
assert type(added_token) == AddedToken
assert str(added_token) == "<mask>"
assert (
repr(added_token) == 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True)'
)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken)
def test_can_set_rstrip(self):
added_token = AddedToken("<mask>", rstrip=True)
assert added_token.rstrip == True
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_lstrip(self):
added_token = AddedToken("<mask>", lstrip=True)
assert added_token.rstrip == False
assert added_token.lstrip == True
assert added_token.single_word == False
assert added_token.normalized == True
def test_can_set_single_world(self):
added_token = AddedToken("<mask>", single_word=True)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == True
assert added_token.normalized == True
def test_can_set_normalized(self):
added_token = AddedToken("<mask>", normalized=False)
assert added_token.rstrip == False
assert added_token.lstrip == False
assert added_token.single_word == False
assert added_token.normalized == False
class TestTokenizer:
def test_has_expected_type_and_methods(self):
tokenizer = Tokenizer(BPE())
assert type(tokenizer) == Tokenizer
assert callable(tokenizer.num_special_tokens_to_add)
assert callable(tokenizer.get_vocab)
assert callable(tokenizer.get_vocab_size)
assert callable(tokenizer.enable_truncation)
assert callable(tokenizer.no_truncation)
assert callable(tokenizer.enable_padding)
assert callable(tokenizer.no_padding)
assert callable(tokenizer.encode)
assert callable(tokenizer.encode_batch)
assert callable(tokenizer.decode)
assert callable(tokenizer.decode_batch)
assert callable(tokenizer.token_to_id)
assert callable(tokenizer.id_to_token)
assert callable(tokenizer.add_tokens)
assert callable(tokenizer.add_special_tokens)
assert callable(tokenizer.train)
assert callable(tokenizer.post_process)
assert isinstance(tokenizer.model, Model)
assert tokenizer.normalizer is None
assert tokenizer.pre_tokenizer is None
assert tokenizer.post_processor is None
assert tokenizer.decoder is None
assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer)
def test_add_tokens(self):
tokenizer = Tokenizer(BPE())
added = tokenizer.add_tokens(["my", "name", "is", "john"])
assert added == 4
tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_tokens(tokens)
assert added == 2
assert tokens[0].normalized == True
assert tokens[1].normalized == False
def test_add_special_tokens(self):
tokenizer = Tokenizer(BPE())
# Can add special tokens as `str`
added = tokenizer.add_special_tokens(["my", "name", "is", "john"])
assert added == 4
# Can add special tokens as `AddedToken`
tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()]
assert tokens[0].normalized == True
added = tokenizer.add_special_tokens(tokens)
assert added == 2
assert tokens[0].normalized == False
assert tokens[1].normalized == True
def test_encode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can encode single sequence
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name", "is", "john"]
assert type(output.ids) == list
assert type(output.type_ids) == list
assert type(output.offsets) == list
with pytest.warns(DeprecationWarning):
assert type(output.words) == list
assert type(output.word_ids) == list
assert type(output.special_tokens_mask) == list
assert type(output.attention_mask) == list
assert type(output.overflowing) == list
# Can encode a pair of sequences
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert isinstance(pickle.loads(pickle.dumps(output)), Encoding)
# Can encode a single pre-tokenized sequence
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["my", "name", "is", "john"]
# Can encode a batch with both a single sequence and a pair of sequences
output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")])
assert len(output) == 2
def test_encode_formats(self, bert_files):
with pytest.deprecated_call():
tokenizer = BertWordPieceTokenizer(bert_files["vocab"])
# Encode
output = tokenizer.encode("my name is john")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"]
output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True)
assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"]
# Encode batch
result_single = [
["[CLS]", "my", "name", "is", "john", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]"],
]
result_pair = [
["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"],
["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"],
]
def format(encodings):
return [e.tokens for e in encodings]
def test_single(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_single
def test_pair(input, is_pretokenized=False):
output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized)
assert format(output) == result_pair
# Classic inputs
# Lists
test_single(["My name is John", "My name is Georges"])
test_pair([("my name is john", "pair"), ("my name is georges", "pair")])
test_pair([["my name is john", "pair"], ["my name is georges", "pair"]])
# Tuples
test_single(("My name is John", "My name is Georges"))
test_pair((("My name is John", "pair"), ("My name is Georges", "pair")))
# Numpy
test_single(np.array(["My name is John", "My name is Georges"]))
test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")]))
test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]]))
# PreTokenized inputs
# Lists
test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True)
test_pair(
[
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
],
True,
)
test_pair(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
True,
)
# Tuples
test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True)
test_pair(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
True,
)
test_pair(
(
(["My", "name", "is", "John"], ["pair"]),
(["My", "name", "is", "Georges"], ["pair"]),
),
True,
)
# Numpy
test_single(
np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]),
True,
)
test_single(
np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))),
True,
)
test_pair(
np.array(
[
[["My", "name", "is", "John"], ["pair"]],
[["My", "name", "is", "Georges"], ["pair"]],
],
dtype=object,
),
True,
)
test_pair(
np.array(
(
(("My", "name", "is", "John"), ("pair",)),
(("My", "name", "is", "Georges"), ("pair",)),
),
dtype=object,
),
True,
)
# Mal formed
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode([["my", "name"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("My name is john", [["pair"]])
with pytest.raises(TypeError, match="TextInputSequence must be str"):
tokenizer.encode("my name is john", ["pair"])
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode("My name is john", ["pair"], is_pretokenized=True)
with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"):
tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True)
def test_encode_add_special_tokens(self, roberta_files):
with pytest.deprecated_call():
tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"]))
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True)
tokenizer.post_processor = RobertaProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
# Can encode with special tokens
output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True)
assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"]
# Can encode without special tokens
output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False)
assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
def test_truncation(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
# Can truncate single sequences
output = tokenizer.encode("my name is john")
assert output.tokens == ["my", "name"]
# Can truncate pair sequences as well
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["my", "pair"]
# Can get the params and give them to enable_truncation
trunc = tokenizer.truncation
tokenizer.enable_truncation(**trunc)
# Left truncation direction
tokenizer.enable_truncation(2, direction="left")
output = tokenizer.encode("my name is john")
assert output.tokens == ["is", "john"]
output = tokenizer.encode("my name is john", "pair")
assert output.tokens == ["john", "pair"]
def test_padding(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# By default it does nothing when encoding single sequence
tokenizer.enable_padding()
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name"]
# Can pad to the longest in a batch
output = tokenizer.encode_batch(["my name", "my name is john"])
assert all([len(encoding) == 4 for encoding in output])
# Can pad to the specified length otherwise
tokenizer.enable_padding(length=4)
output = tokenizer.encode("my name")
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["my", "name", "pair", "[PAD]"]
# Can get the params and give them to enable_padding
padding = tokenizer.padding
tokenizer.enable_padding(**padding)
def test_decode(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can decode single sequences
output = tokenizer.decode([0, 1, 2, 3])
assert output == "my name is john"
# Can decode batch
output = tokenizer.decode_batch([[0, 1, 2, 3], [4]])
assert output == ["my name is john", "pair"]
def test_get_vocab(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab with added tokens
vocab = tokenizer.get_vocab(with_added_tokens=True)
assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4}
# Can retrieve vocab without added tokens
vocab = tokenizer.get_vocab(with_added_tokens=False)
assert vocab == {}
def test_get_vocab_size(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
# Can retrieve vocab's size with added tokens
size = tokenizer.get_vocab_size(with_added_tokens=True)
assert size == 5
# Can retrieve vocab's size without added tokens
size = tokenizer.get_vocab_size(with_added_tokens=False)
assert size == 0
def test_post_process(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.enable_truncation(2)
tokenizer.enable_padding(length=4)
encoding = tokenizer.encode("my name is john")
pair_encoding = tokenizer.encode("pair")
# Can post process a single encoding
output = tokenizer.post_process(encoding)
assert output.tokens == ["my", "name", "[PAD]", "[PAD]"]
# Can post process a pair of encodings
output = tokenizer.post_process(encoding, pair_encoding)
assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"]
def test_multiprocessing_with_parallelism(self):
tokenizer = Tokenizer(BPE())
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_from_pretrained(self):
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "there", "dear", "friend", "!"]
def test_from_pretrained_revision(self):
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["hey", "there", "dear", "friend", "!"]
tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2")
output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False)
assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]
def test_unigram_byte_fallback(self):
vocab = [
("<unk>", 0.0),
("A", -0.01),
("sen", -0.02),
("te", -0.03),
("n", -0.04),
("ce", -0.05),
("<0xF0>", -0.06),
("<0x9F>", -0.06),
("<0xA4>", -0.06),
("<0x97>", -0.06),
(" ", -0.4),
]
tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"]
tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True))
output = tokenizer.encode("A sentence 🤗")
assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9]
assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_normalizers.py | import pickle
import pytest
from tokenizers import NormalizedString, Tokenizer
from tokenizers.models import BPE
from tokenizers.normalizers import BertNormalizer, Lowercase, Normalizer, Sequence, Strip, Prepend
class TestBertNormalizer:
def test_instantiate(self):
assert isinstance(BertNormalizer(), Normalizer)
assert isinstance(BertNormalizer(), BertNormalizer)
assert isinstance(pickle.loads(pickle.dumps(BertNormalizer())), BertNormalizer)
def test_strip_accents(self):
normalizer = BertNormalizer(strip_accents=True, lowercase=False, handle_chinese_chars=False, clean_text=False)
output = normalizer.normalize_str("Héllò")
assert output == "Hello"
def test_handle_chinese_chars(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=True, clean_text=False)
output = normalizer.normalize_str("你好")
assert output == " 你 好 "
def test_clean_text(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=False, handle_chinese_chars=False, clean_text=True)
output = normalizer.normalize_str("\ufeffHello")
assert output == "Hello"
def test_lowercase(self):
normalizer = BertNormalizer(strip_accents=False, lowercase=True, handle_chinese_chars=False, clean_text=False)
output = normalizer.normalize_str("Héllò")
assert output == "héllò"
def test_can_modify(self):
normalizer = BertNormalizer(clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True)
assert normalizer.clean_text == True
assert normalizer.handle_chinese_chars == True
assert normalizer.strip_accents == True
assert normalizer.lowercase == True
# Modify these
normalizer.clean_text = False
assert normalizer.clean_text == False
normalizer.handle_chinese_chars = False
assert normalizer.handle_chinese_chars == False
normalizer.strip_accents = None
assert normalizer.strip_accents == None
normalizer.lowercase = False
assert normalizer.lowercase == False
class TestSequence:
def test_instantiate(self):
assert isinstance(Sequence([]), Normalizer)
assert isinstance(Sequence([]), Sequence)
assert isinstance(pickle.loads(pickle.dumps(Sequence([]))), Sequence)
def test_can_make_sequences(self):
normalizer = Sequence([Lowercase(), Strip()])
output = normalizer.normalize_str(" HELLO ")
assert output == "hello"
class TestLowercase:
def test_instantiate(self):
assert isinstance(Lowercase(), Normalizer)
assert isinstance(Lowercase(), Lowercase)
assert isinstance(pickle.loads(pickle.dumps(Lowercase())), Lowercase)
def test_lowercase(self):
normalizer = Lowercase()
output = normalizer.normalize_str("HELLO")
assert output == "hello"
class TestStrip:
def test_instantiate(self):
assert isinstance(Strip(), Normalizer)
assert isinstance(Strip(), Strip)
assert isinstance(pickle.loads(pickle.dumps(Strip())), Strip)
def test_left_strip(self):
normalizer = Strip(left=True, right=False)
output = normalizer.normalize_str(" hello ")
assert output == "hello "
def test_right_strip(self):
normalizer = Strip(left=False, right=True)
output = normalizer.normalize_str(" hello ")
assert output == " hello"
def test_full_strip(self):
normalizer = Strip(left=True, right=True)
output = normalizer.normalize_str(" hello ")
assert output == "hello"
def test_can_modify(self):
normalizer = Strip(left=True, right=True)
assert normalizer.left == True
assert normalizer.right == True
# Modify these
normalizer.left = False
assert normalizer.left == False
normalizer.right = False
assert normalizer.right == False
class TestPrepend:
def test_instantiate(self):
assert isinstance(Prepend("▁"), Normalizer)
assert isinstance(Prepend("▁"), Prepend)
assert isinstance(pickle.loads(pickle.dumps(Prepend("▁"))), Prepend)
def test_prepend(self):
normalizer = Prepend(prepend="▁")
output = normalizer.normalize_str("hello")
assert output == "▁hello"
def test_can_modify(self):
normalizer = Prepend("▁")
assert normalizer.prepend == "▁"
# Modify these
normalizer.prepend = "-"
assert normalizer.prepend == "-"
class TestCustomNormalizer:
class BadCustomNormalizer:
def normalize(self, normalized, wrong):
pass
class GoodCustomNormalizer:
def normalize(self, normalized):
self.kept_normalized = normalized
normalized.replace("there", "you")
def use_after_normalize(self):
self.kept_normalized.replace("something", "else")
def test_instantiate(self):
bad = Normalizer.custom(TestCustomNormalizer.BadCustomNormalizer())
good_custom = TestCustomNormalizer.GoodCustomNormalizer()
good = Normalizer.custom(good_custom)
assert isinstance(bad, Normalizer)
assert isinstance(good, Normalizer)
with pytest.raises(Exception, match="TypeError:.*normalize()"):
bad.normalize_str("Hey there!")
assert good.normalize_str("Hey there!") == "Hey you!"
with pytest.raises(Exception, match="Cannot use a NormalizedStringRefMut outside `normalize`"):
good_custom.use_after_normalize()
def test_normalizer_interface(self):
normalizer = Normalizer.custom(TestCustomNormalizer.GoodCustomNormalizer())
normalized = NormalizedString("Hey there!")
normalizer.normalize(normalized)
assert repr(normalized) == 'NormalizedString(original="Hey there!", normalized="Hey you!")'
assert str(normalized) == "Hey you!"
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_pre_tokenizers.py | import json
import pickle
import pytest
from tokenizers.pre_tokenizers import (
BertPreTokenizer,
ByteLevel,
CharDelimiterSplit,
Digits,
Metaspace,
PreTokenizer,
Punctuation,
Sequence,
Split,
UnicodeScripts,
Whitespace,
WhitespaceSplit,
)
class TestByteLevel:
def test_instantiate(self):
assert ByteLevel() is not None
assert ByteLevel(add_prefix_space=True) is not None
assert ByteLevel(add_prefix_space=False) is not None
assert isinstance(ByteLevel(), PreTokenizer)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_has_alphabet(self):
assert isinstance(ByteLevel.alphabet(), list)
assert len(ByteLevel.alphabet()) == 256
def test_can_modify(self):
pretok = ByteLevel(add_prefix_space=False)
assert pretok.add_prefix_space == False
# Modify these
pretok.add_prefix_space = True
assert pretok.add_prefix_space == True
def test_manual_reload(self):
byte_level = ByteLevel()
state = json.loads(byte_level.__getstate__())
reloaded = ByteLevel(**state)
assert isinstance(reloaded, ByteLevel)
class TestSplit:
def test_instantiate(self):
pre_tokenizer = Split(pattern=" ", behavior="removed")
assert pre_tokenizer is not None
assert isinstance(pre_tokenizer, PreTokenizer)
assert isinstance(pre_tokenizer, Split)
assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed"))), Split)
# test with invert=True
pre_tokenizer_with_invert = Split(pattern=" ", behavior="isolated", invert=True)
assert pre_tokenizer_with_invert is not None
assert isinstance(pre_tokenizer_with_invert, PreTokenizer)
assert isinstance(pre_tokenizer_with_invert, Split)
assert isinstance(pickle.loads(pickle.dumps(Split(" ", "removed", True))), Split)
class TestWhitespace:
def test_instantiate(self):
assert Whitespace() is not None
assert isinstance(Whitespace(), PreTokenizer)
assert isinstance(Whitespace(), Whitespace)
assert isinstance(pickle.loads(pickle.dumps(Whitespace())), Whitespace)
class TestWhitespaceSplit:
def test_instantiate(self):
assert WhitespaceSplit() is not None
assert isinstance(WhitespaceSplit(), PreTokenizer)
assert isinstance(WhitespaceSplit(), WhitespaceSplit)
assert isinstance(pickle.loads(pickle.dumps(WhitespaceSplit())), WhitespaceSplit)
class TestBertPreTokenizer:
def test_instantiate(self):
assert BertPreTokenizer() is not None
assert isinstance(BertPreTokenizer(), PreTokenizer)
assert isinstance(BertPreTokenizer(), BertPreTokenizer)
assert isinstance(pickle.loads(pickle.dumps(BertPreTokenizer())), BertPreTokenizer)
class TestMetaspace:
def test_instantiate(self):
assert Metaspace() is not None
assert Metaspace(replacement="-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
Metaspace(replacement="")
assert Metaspace(add_prefix_space=True) is not None
assert isinstance(Metaspace(), PreTokenizer)
assert isinstance(Metaspace(), Metaspace)
assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace)
def test_can_modify(self):
pretok = Metaspace(replacement="$", add_prefix_space=False)
assert pretok.replacement == "$"
assert pretok.add_prefix_space == False
# Modify these
pretok.replacement = "%"
assert pretok.replacement == "%"
pretok.add_prefix_space = True
assert pretok.add_prefix_space == True
class TestCharDelimiterSplit:
def test_instantiate(self):
assert CharDelimiterSplit("-") is not None
with pytest.raises(ValueError, match="expected a string of length 1"):
CharDelimiterSplit("")
assert isinstance(CharDelimiterSplit(" "), PreTokenizer)
assert isinstance(CharDelimiterSplit(" "), CharDelimiterSplit)
assert isinstance(pickle.loads(pickle.dumps(CharDelimiterSplit("-"))), CharDelimiterSplit)
def test_can_modify(self):
pretok = CharDelimiterSplit("@")
assert pretok.delimiter == "@"
# Modify these
pretok.delimiter = "!"
assert pretok.delimiter == "!"
class TestPunctuation:
def test_instantiate(self):
assert Punctuation() is not None
assert Punctuation("removed") is not None
assert isinstance(Punctuation(), PreTokenizer)
assert isinstance(Punctuation(), Punctuation)
assert isinstance(pickle.loads(pickle.dumps(Punctuation())), Punctuation)
class TestSequence:
def test_instantiate(self):
assert Sequence([]) is not None
assert isinstance(Sequence([]), PreTokenizer)
assert isinstance(Sequence([]), Sequence)
dumped = pickle.dumps(Sequence([]))
assert isinstance(pickle.loads(dumped), Sequence)
def test_bert_like(self):
pre_tokenizer = Sequence([WhitespaceSplit(), Punctuation()])
assert isinstance(Sequence([]), PreTokenizer)
assert isinstance(Sequence([]), Sequence)
assert isinstance(pickle.loads(pickle.dumps(pre_tokenizer)), Sequence)
result = pre_tokenizer.pre_tokenize_str("Hey friend! How are you?!?")
assert result == [
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
class TestDigits:
def test_instantiate(self):
assert Digits() is not None
assert isinstance(Digits(), PreTokenizer)
assert isinstance(Digits(), Digits)
assert isinstance(Digits(True), Digits)
assert isinstance(Digits(False), Digits)
assert isinstance(pickle.loads(pickle.dumps(Digits())), Digits)
def test_can_modify(self):
pretok = Digits(individual_digits=False)
assert pretok.individual_digits == False
# Modify these
pretok.individual_digits = True
assert pretok.individual_digits == True
class TestUnicodeScripts:
def test_instantiate(self):
assert UnicodeScripts() is not None
assert isinstance(UnicodeScripts(), PreTokenizer)
assert isinstance(UnicodeScripts(), UnicodeScripts)
assert isinstance(pickle.loads(pickle.dumps(UnicodeScripts())), UnicodeScripts)
class TestCustomPreTokenizer:
class BadCustomPretok:
def pre_tokenize(self, pretok, wrong):
# This method does not have the right signature: it takes one too many arg
pass
class GoodCustomPretok:
def split(self, n, normalized):
# Here we just test that we can return a List[NormalizedString], it
# does not really make sense to return twice the same otherwise
return [normalized, normalized]
def pre_tokenize(self, pretok):
pretok.split(self.split)
def test_instantiate(self):
bad = PreTokenizer.custom(TestCustomPreTokenizer.BadCustomPretok())
good = PreTokenizer.custom(TestCustomPreTokenizer.GoodCustomPretok())
assert isinstance(bad, PreTokenizer)
assert isinstance(good, PreTokenizer)
with pytest.raises(Exception, match="TypeError:.*pre_tokenize()"):
bad.pre_tokenize_str("Hey there!")
assert good.pre_tokenize_str("Hey there!") == [
("Hey there!", (0, 10)),
("Hey there!", (0, 10)),
]
def test_camel_case(self):
class CamelCasePretok:
def get_state(self, c):
if c.islower():
return "lower"
elif c.isupper():
return "upper"
elif c.isdigit():
return "digit"
else:
return "rest"
def split(self, n, normalized):
i = 0
# states = {"any", "lower", "upper", "digit", "rest"}
state = "any"
pieces = []
for j, c in enumerate(normalized.normalized):
c_state = self.get_state(c)
if state == "any":
state = c_state
if state != "rest" and state == c_state:
pass
elif state == "upper" and c_state == "lower":
pass
else:
pieces.append(normalized[i:j])
i = j
state = c_state
pieces.append(normalized[i:])
return pieces
def pre_tokenize(self, pretok):
pretok.split(self.split)
camel = PreTokenizer.custom(CamelCasePretok())
assert camel.pre_tokenize_str("HeyThere!?-ThisIsLife") == [
("Hey", (0, 3)),
("There", (3, 8)),
("!", (8, 9)),
("?", (9, 10)),
("-", (10, 11)),
("This", (11, 15)),
("Is", (15, 17)),
("Life", (17, 21)),
]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_processors.py | import json
import pickle
import pytest
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer
from tokenizers.processors import (
BertProcessing,
ByteLevel,
PostProcessor,
RobertaProcessing,
Sequence,
TemplateProcessing,
)
from ..utils import data_dir, roberta_files
class TestBertProcessing:
def test_instantiate(self):
processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
assert processor is not None
assert isinstance(processor, PostProcessor)
assert isinstance(processor, BertProcessing)
assert isinstance(
pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))),
BertProcessing,
)
def test_processing(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"]
assert output.ids == [1, 2, 3, 0, 6, 0]
class TestRobertaProcessing:
def test_instantiate(self):
processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
assert processor is not None
assert isinstance(processor, PostProcessor)
assert isinstance(processor, RobertaProcessing)
assert isinstance(
pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))),
RobertaProcessing,
)
def test_processing(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"]
assert output.ids == [0, 2, 3, 1, 1, 6, 1]
class TestByteLevelProcessing:
def test_instantiate(self):
assert ByteLevel() is not None
assert ByteLevel(trim_offsets=True) is not None
assert isinstance(ByteLevel(), PostProcessor)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_processing(self, roberta_files):
# Deprecated in 0.9
with pytest.deprecated_call():
tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"]))
tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True)
# Keeps original offsets
output = tokenizer.encode("My name is John")
assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)]
# Trims offsets when activated
tokenizer.post_processor = ByteLevel(trim_offsets=True)
output = tokenizer.encode("My name is John")
assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)]
def test_manual_reload(self):
byte_level = ByteLevel()
state = json.loads(byte_level.__getstate__())
reloaded = ByteLevel(**state)
assert isinstance(reloaded, ByteLevel)
class TestTemplateProcessing:
def get_bert(self):
return TemplateProcessing(
single=["[CLS]", "$0", "[SEP]"],
pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"],
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
def get_roberta(self):
return TemplateProcessing(
single="<s> $0 </s>",
pair="<s> $A </s> </s> $B </s>",
special_tokens=[("<s>", 0), ("</s>", 1)],
)
def get_t5_squad(self):
# >>> from transformers import AutoTokenizer
# >>> tok = AutoTokenizer.from_pretrained("t5-small")
# >>> tok.tokenize("question: ")
# ['▁question', ':']
# >>> tok.tokenize("context: ")
# ['▁context', ':']
# >>> tok.encode("context: ")
# [2625, 10]
# >>> tok.encode("question: ")
# [822, 10]
return TemplateProcessing(
single=["$0"],
pair=["Q", "$A", "C", "$B"],
special_tokens=[
{
"id": "Q",
"ids": [2625, 10],
"tokens": ["_question", ":"],
},
{
"id": "C",
"ids": [822, 10],
"tokens": ["_context", ":"],
},
],
)
def test_instantiate(self):
bert = self.get_bert()
assert bert is not None
assert isinstance(bert, PostProcessor)
assert isinstance(bert, TemplateProcessing)
assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing)
# It is absolutely legal to have tokens with spaces in the name:
processor = TemplateProcessing(
single=["[ C L S ]", "Token with space"],
special_tokens=[("[ C L S ]", 0), ("Token with space", 1)],
)
# Sequence identifiers must be well formed:
with pytest.raises(Exception, match="Cannot build Piece"):
processor = TemplateProcessing(single="[CLS] $$ [SEP]")
with pytest.raises(Exception, match="Cannot build Piece"):
processor = TemplateProcessing(single="[CLS] $A: [SEP]")
# Special tokens must be provided when used in template:
with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"):
processor = TemplateProcessing(single=["[CLS]"])
def test_bert_parity(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
original = tokenizer.encode("my name", "pair")
tokenizer.post_processor = self.get_bert()
template = tokenizer.encode("my name", "pair")
assert original.ids == template.ids
def test_roberta_parity(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
original = tokenizer.encode("my name is john", "pair")
tokenizer.post_processor = self.get_roberta()
template = tokenizer.encode("my name is john", "pair")
assert original.ids == template.ids
class TestSequenceProcessing:
def test_sequence_processing(self):
assert Sequence([]) is not None
assert Sequence([ByteLevel()]) is not None
assert isinstance(Sequence([]), PostProcessor)
assert isinstance(Sequence([]), Sequence)
serialized = pickle.dumps(Sequence([]))
assert isinstance(pickle.loads(serialized), Sequence)
def test_post_process(self):
byte_level = ByteLevel(trim_offsets=True)
template = TemplateProcessing(
single=["[CLS]", "$0", "[SEP]"],
pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"],
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"])
tokenizer.post_processor = template
# Before the sequence
original = tokenizer.encode("my name is Ġjohn")
assert original.ids == [1, 2, 3, 4, 5, 0]
assert original.type_ids == [0, 0, 0, 0, 0, 0]
assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)]
pair = tokenizer.encode("my name is Ġjohn", "pair")
# assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0]
assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)]
processor = Sequence([byte_level, template])
tokenizer.post_processor = processor
original = tokenizer.encode("my name is Ġjohn")
assert original.ids == [1, 2, 3, 4, 5, 0]
assert original.type_ids == [0, 0, 0, 0, 0, 0]
# Offsets ARE trimmed
assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)]
pair = tokenizer.encode("my name is Ġjohn", "pair")
# assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0]
assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_encoding.py | import pytest
from tokenizers import BertWordPieceTokenizer
from ..utils import bert_files, data_dir
class TestEncoding:
@pytest.fixture(scope="class")
def encodings(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
single_encoding = tokenizer.encode("I love HuggingFace")
pair_encoding = tokenizer.encode("I love HuggingFace", "Do you?")
return single_encoding, pair_encoding
def test_sequence_ids(self, encodings):
single, pair = encodings
assert single.sequence_ids == [None, 0, 0, 0, 0, None]
assert pair.sequence_ids == [None, 0, 0, 0, 0, None, 1, 1, 1, None]
def test_n_sequences(self, encodings):
single, pair = encodings
assert single.n_sequences == 1
assert pair.n_sequences == 2
def test_word_to_tokens(self, encodings):
single, pair = encodings
assert single.tokens == ["[CLS]", "i", "love", "hugging", "##face", "[SEP]"]
assert single.word_to_tokens(0) == (1, 2)
assert pair.tokens == [
"[CLS]",
"i",
"love",
"hugging",
"##face",
"[SEP]",
"do",
"you",
"?",
"[SEP]",
]
assert pair.word_to_tokens(0) == (1, 2)
assert pair.word_to_tokens(0, 0) == (1, 2)
assert pair.word_to_tokens(6, 0) == None
assert pair.word_to_tokens(0, 1) == (6, 7)
def test_word_to_chars(self, encodings):
single, pair = encodings
assert single.word_to_chars(2) == (7, 18)
assert pair.word_to_chars(2) == (7, 18)
assert pair.word_to_chars(2, 0) == (7, 18)
assert pair.word_to_chars(2, 1) == (6, 7)
def test_token_to_sequence(self, encodings):
single, pair = encodings
assert single.token_to_sequence(2) == 0
assert pair.token_to_sequence(2) == 0
assert pair.token_to_sequence(0) == None
assert pair.token_to_sequence(5) == None
assert pair.token_to_sequence(6) == 1
assert pair.token_to_sequence(8) == 1
assert pair.token_to_sequence(9) == None
assert pair.token_to_sequence(1200) == None
def test_token_to_chars(self, encodings):
single, pair = encodings
assert single.token_to_chars(0) == None
assert single.token_to_chars(2) == (2, 6)
assert pair.token_to_chars(2) == (2, 6)
assert pair.token_to_chars(5) == None
assert pair.token_to_chars(6) == (0, 2)
def test_token_to_word(self, encodings):
single, pair = encodings
assert single.token_to_word(0) == None
assert single.token_to_word(1) == 0
assert single.token_to_word(4) == 2
assert pair.token_to_word(1) == 0
assert pair.token_to_word(4) == 2
assert pair.token_to_word(5) == None
assert pair.token_to_word(6) == 0
assert pair.token_to_word(7) == 1
def test_char_to_token(self, encodings):
single, pair = encodings
assert single.char_to_token(0) == 1
assert pair.char_to_token(0) == 1
assert pair.char_to_token(0, 0) == 1
assert pair.char_to_token(1, 0) == None
assert pair.char_to_token(0, 1) == 6
assert pair.char_to_token(2, 1) == None
def test_char_to_word(self, encodings):
single, pair = encodings
assert single.char_to_word(0) == 0
assert single.char_to_word(1) == None
assert pair.char_to_word(2) == 1
assert pair.char_to_word(2, 0) == 1
assert pair.char_to_word(2, 1) == None
assert pair.char_to_word(3, 1) == 1
def test_truncation(self, encodings):
single, _ = encodings
single.truncate(2, 1, "right")
assert single.tokens == ["[CLS]", "i"]
assert single.overflowing[0].tokens == ["i", "love"]
def test_invalid_truncate_direction(self, encodings):
single, _ = encodings
with pytest.raises(ValueError) as excinfo:
single.truncate(2, 1, "not_a_direction")
assert "Invalid truncation direction value : not_a_direction" == str(excinfo.value)
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_models.py | import pickle
import pytest
from tokenizers.models import BPE, Model, WordLevel, WordPiece
from ..utils import bert_files, data_dir, roberta_files
class TestBPE:
def test_instantiate(self, roberta_files):
assert isinstance(BPE(), Model)
assert isinstance(BPE(), BPE)
vocab = {"a": 0, "b": 1, "ab": 2}
merges = [("a", "b")]
assert isinstance(BPE(vocab, merges), Model)
assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(vocab=vocab)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(merges=merges)
assert isinstance(
pickle.loads(pickle.dumps(BPE(vocab, merges))),
BPE,
)
# Deprecated calls in 0.9
with pytest.deprecated_call():
assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model)
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(vocab=roberta_files["vocab"])
with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"):
BPE(merges=roberta_files["merges"])
with pytest.deprecated_call():
assert isinstance(
pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))),
BPE,
)
def test_can_modify(self):
model = BPE(
dropout=0.5,
unk_token="[UNK]",
continuing_subword_prefix="__prefix__",
end_of_word_suffix="__suffix__",
fuse_unk=False,
)
assert model.dropout == 0.5
assert model.unk_token == "[UNK]"
assert model.continuing_subword_prefix == "__prefix__"
assert model.end_of_word_suffix == "__suffix__"
assert model.fuse_unk == False
assert model.byte_fallback == False
# Modify these
model.dropout = 0.1
assert pytest.approx(model.dropout) == 0.1
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
model.continuing_subword_prefix = None
assert model.continuing_subword_prefix == None
model.end_of_word_suffix = "suff"
assert model.end_of_word_suffix == "suff"
model.fuse_unk = True
assert model.fuse_unk == True
model.byte_fallback = True
assert model.byte_fallback == True
class TestWordPiece:
def test_instantiate(self, bert_files):
assert isinstance(WordPiece(), Model)
assert isinstance(WordPiece(), WordPiece)
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordPiece(vocab), Model)
assert isinstance(WordPiece(vocab), WordPiece)
assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece)
assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece)
# Deprecated calls in 0.9
with pytest.deprecated_call():
assert isinstance(WordPiece(bert_files["vocab"]), Model)
with pytest.deprecated_call():
assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece)
def test_can_modify(self):
model = WordPiece(
unk_token="<oov>",
continuing_subword_prefix="__prefix__",
max_input_chars_per_word=200,
)
assert model.unk_token == "<oov>"
assert model.continuing_subword_prefix == "__prefix__"
assert model.max_input_chars_per_word == 200
# Modify these
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
model.continuing_subword_prefix = "$$$"
assert model.continuing_subword_prefix == "$$$"
model.max_input_chars_per_word = 10
assert model.max_input_chars_per_word == 10
class TestWordLevel:
def test_instantiate(self, roberta_files):
assert isinstance(WordLevel(), Model)
assert isinstance(WordLevel(), WordLevel)
vocab = {"a": 0, "b": 1, "ab": 2}
assert isinstance(WordLevel(vocab), Model)
assert isinstance(WordLevel(vocab), WordLevel)
assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel)
# The WordLevel model expects a vocab.json using the same format as roberta
# so we can just try to load with this file
with pytest.deprecated_call():
assert isinstance(WordLevel(roberta_files["vocab"]), Model)
with pytest.deprecated_call():
assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel)
def test_can_modify(self):
model = WordLevel(unk_token="<oov>")
assert model.unk_token == "<oov>"
# Modify these
model.unk_token = "<unk>"
assert model.unk_token == "<unk>"
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.